FreeBSD/Linux Kernel Cross Reference
sys/dev/bxe/bxe.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #define BXE_DRIVER_VERSION "1.78.91"
33
34 #include "bxe.h"
35 #include "ecore_sp.h"
36 #include "ecore_init.h"
37 #include "ecore_init_ops.h"
38
39 #include "57710_int_offsets.h"
40 #include "57711_int_offsets.h"
41 #include "57712_int_offsets.h"
42
43 /*
44 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
45 * explicitly here for older kernels that don't include this changeset.
46 */
47 #ifndef CTLTYPE_U64
48 #define CTLTYPE_U64 CTLTYPE_QUAD
49 #define sysctl_handle_64 sysctl_handle_quad
50 #endif
51
52 /*
53 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
54 * here as zero(0) for older kernels that don't include this changeset
55 * thereby masking the functionality.
56 */
57 #ifndef CSUM_TCP_IPV6
58 #define CSUM_TCP_IPV6 0
59 #define CSUM_UDP_IPV6 0
60 #endif
61
62 #define BXE_DEF_SB_ATT_IDX 0x0001
63 #define BXE_DEF_SB_IDX 0x0002
64
65 /*
66 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
67 * function HW initialization.
68 */
69 #define FLR_WAIT_USEC 10000 /* 10 msecs */
70 #define FLR_WAIT_INTERVAL 50 /* usecs */
71 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
72
73 struct pbf_pN_buf_regs {
74 int pN;
75 uint32_t init_crd;
76 uint32_t crd;
77 uint32_t crd_freed;
78 };
79
80 struct pbf_pN_cmd_regs {
81 int pN;
82 uint32_t lines_occup;
83 uint32_t lines_freed;
84 };
85
86 /*
87 * PCI Device ID Table used by bxe_probe().
88 */
89 #define BXE_DEVDESC_MAX 64
90 static struct bxe_device_type bxe_devs[] = {
91 {
92 BRCM_VENDORID,
93 CHIP_NUM_57710,
94 PCI_ANY_ID, PCI_ANY_ID,
95 "QLogic NetXtreme II BCM57710 10GbE"
96 },
97 {
98 BRCM_VENDORID,
99 CHIP_NUM_57711,
100 PCI_ANY_ID, PCI_ANY_ID,
101 "QLogic NetXtreme II BCM57711 10GbE"
102 },
103 {
104 BRCM_VENDORID,
105 CHIP_NUM_57711E,
106 PCI_ANY_ID, PCI_ANY_ID,
107 "QLogic NetXtreme II BCM57711E 10GbE"
108 },
109 {
110 BRCM_VENDORID,
111 CHIP_NUM_57712,
112 PCI_ANY_ID, PCI_ANY_ID,
113 "QLogic NetXtreme II BCM57712 10GbE"
114 },
115 {
116 BRCM_VENDORID,
117 CHIP_NUM_57712_MF,
118 PCI_ANY_ID, PCI_ANY_ID,
119 "QLogic NetXtreme II BCM57712 MF 10GbE"
120 },
121 {
122 BRCM_VENDORID,
123 CHIP_NUM_57800,
124 PCI_ANY_ID, PCI_ANY_ID,
125 "QLogic NetXtreme II BCM57800 10GbE"
126 },
127 {
128 BRCM_VENDORID,
129 CHIP_NUM_57800_MF,
130 PCI_ANY_ID, PCI_ANY_ID,
131 "QLogic NetXtreme II BCM57800 MF 10GbE"
132 },
133 {
134 BRCM_VENDORID,
135 CHIP_NUM_57810,
136 PCI_ANY_ID, PCI_ANY_ID,
137 "QLogic NetXtreme II BCM57810 10GbE"
138 },
139 {
140 BRCM_VENDORID,
141 CHIP_NUM_57810_MF,
142 PCI_ANY_ID, PCI_ANY_ID,
143 "QLogic NetXtreme II BCM57810 MF 10GbE"
144 },
145 {
146 BRCM_VENDORID,
147 CHIP_NUM_57811,
148 PCI_ANY_ID, PCI_ANY_ID,
149 "QLogic NetXtreme II BCM57811 10GbE"
150 },
151 {
152 BRCM_VENDORID,
153 CHIP_NUM_57811_MF,
154 PCI_ANY_ID, PCI_ANY_ID,
155 "QLogic NetXtreme II BCM57811 MF 10GbE"
156 },
157 {
158 BRCM_VENDORID,
159 CHIP_NUM_57840_4_10,
160 PCI_ANY_ID, PCI_ANY_ID,
161 "QLogic NetXtreme II BCM57840 4x10GbE"
162 },
163 {
164 QLOGIC_VENDORID,
165 CHIP_NUM_57840_4_10,
166 PCI_ANY_ID, PCI_ANY_ID,
167 "QLogic NetXtreme II BCM57840 4x10GbE"
168 },
169 {
170 BRCM_VENDORID,
171 CHIP_NUM_57840_2_20,
172 PCI_ANY_ID, PCI_ANY_ID,
173 "QLogic NetXtreme II BCM57840 2x20GbE"
174 },
175 {
176 BRCM_VENDORID,
177 CHIP_NUM_57840_MF,
178 PCI_ANY_ID, PCI_ANY_ID,
179 "QLogic NetXtreme II BCM57840 MF 10GbE"
180 },
181 {
182 0, 0, 0, 0, NULL
183 }
184 };
185
186 MALLOC_DECLARE(M_BXE_ILT);
187 MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
188
189 /*
190 * FreeBSD device entry points.
191 */
192 static int bxe_probe(device_t);
193 static int bxe_attach(device_t);
194 static int bxe_detach(device_t);
195 static int bxe_shutdown(device_t);
196
197
198 /*
199 * FreeBSD KLD module/device interface event handler method.
200 */
201 static device_method_t bxe_methods[] = {
202 /* Device interface (device_if.h) */
203 DEVMETHOD(device_probe, bxe_probe),
204 DEVMETHOD(device_attach, bxe_attach),
205 DEVMETHOD(device_detach, bxe_detach),
206 DEVMETHOD(device_shutdown, bxe_shutdown),
207 /* Bus interface (bus_if.h) */
208 DEVMETHOD(bus_print_child, bus_generic_print_child),
209 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
210 KOBJMETHOD_END
211 };
212
213 /*
214 * FreeBSD KLD Module data declaration
215 */
216 static driver_t bxe_driver = {
217 "bxe", /* module name */
218 bxe_methods, /* event handler */
219 sizeof(struct bxe_softc) /* extra data */
220 };
221
222 MODULE_DEPEND(bxe, pci, 1, 1, 1);
223 MODULE_DEPEND(bxe, ether, 1, 1, 1);
224 DRIVER_MODULE(bxe, pci, bxe_driver, 0, 0);
225
226 DEBUGNET_DEFINE(bxe);
227
228 /* resources needed for unloading a previously loaded device */
229
230 #define BXE_PREV_WAIT_NEEDED 1
231 struct mtx bxe_prev_mtx;
232 MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
233 struct bxe_prev_list_node {
234 LIST_ENTRY(bxe_prev_list_node) node;
235 uint8_t bus;
236 uint8_t slot;
237 uint8_t path;
238 uint8_t aer; /* XXX automatic error recovery */
239 uint8_t undi;
240 };
241 static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
242
243 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
244
245 /* Tunable device values... */
246
247 SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
248 "bxe driver parameters");
249
250 /* Debug */
251 unsigned long bxe_debug = 0;
252 SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
253 &bxe_debug, 0, "Debug logging mode");
254
255 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
256 static int bxe_interrupt_mode = INTR_MODE_MSIX;
257 SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
258 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
259
260 /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
261 static int bxe_queue_count = 4;
262 SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
263 &bxe_queue_count, 0, "Multi-Queue queue count");
264
265 /* max number of buffers per queue (default RX_BD_USABLE) */
266 static int bxe_max_rx_bufs = 0;
267 SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
268 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
269
270 /* Host interrupt coalescing RX tick timer (usecs) */
271 static int bxe_hc_rx_ticks = 25;
272 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
273 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
274
275 /* Host interrupt coalescing TX tick timer (usecs) */
276 static int bxe_hc_tx_ticks = 50;
277 SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
278 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
279
280 /* Maximum number of Rx packets to process at a time */
281 static int bxe_rx_budget = 0xffffffff;
282 SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
283 &bxe_rx_budget, 0, "Rx processing budget");
284
285 /* Maximum LRO aggregation size */
286 static int bxe_max_aggregation_size = 0;
287 SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
288 &bxe_max_aggregation_size, 0, "max aggregation size");
289
290 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
291 static int bxe_mrrs = -1;
292 SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
293 &bxe_mrrs, 0, "PCIe maximum read request size");
294
295 /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
296 static int bxe_autogreeen = 0;
297 SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
298 &bxe_autogreeen, 0, "AutoGrEEEn support");
299
300 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
301 static int bxe_udp_rss = 0;
302 SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
303 &bxe_udp_rss, 0, "UDP RSS support");
304
305
306 #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
307
308 #define STATS_OFFSET32(stat_name) \
309 (offsetof(struct bxe_eth_stats, stat_name) / 4)
310
311 #define Q_STATS_OFFSET32(stat_name) \
312 (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
313
314 static const struct {
315 uint32_t offset;
316 uint32_t size;
317 uint32_t flags;
318 #define STATS_FLAGS_PORT 1
319 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */
320 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
321 char string[STAT_NAME_LEN];
322 } bxe_eth_stats_arr[] = {
323 { STATS_OFFSET32(total_bytes_received_hi),
324 8, STATS_FLAGS_BOTH, "rx_bytes" },
325 { STATS_OFFSET32(error_bytes_received_hi),
326 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
327 { STATS_OFFSET32(total_unicast_packets_received_hi),
328 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
329 { STATS_OFFSET32(total_multicast_packets_received_hi),
330 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
331 { STATS_OFFSET32(total_broadcast_packets_received_hi),
332 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
333 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
334 8, STATS_FLAGS_PORT, "rx_crc_errors" },
335 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
336 8, STATS_FLAGS_PORT, "rx_align_errors" },
337 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
338 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
339 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
340 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
341 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
342 8, STATS_FLAGS_PORT, "rx_fragments" },
343 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
344 8, STATS_FLAGS_PORT, "rx_jabbers" },
345 { STATS_OFFSET32(no_buff_discard_hi),
346 8, STATS_FLAGS_BOTH, "rx_discards" },
347 { STATS_OFFSET32(mac_filter_discard),
348 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
349 { STATS_OFFSET32(mf_tag_discard),
350 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
351 { STATS_OFFSET32(pfc_frames_received_hi),
352 8, STATS_FLAGS_PORT, "pfc_frames_received" },
353 { STATS_OFFSET32(pfc_frames_sent_hi),
354 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
355 { STATS_OFFSET32(brb_drop_hi),
356 8, STATS_FLAGS_PORT, "rx_brb_discard" },
357 { STATS_OFFSET32(brb_truncate_hi),
358 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
359 { STATS_OFFSET32(pause_frames_received_hi),
360 8, STATS_FLAGS_PORT, "rx_pause_frames" },
361 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
362 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
363 { STATS_OFFSET32(nig_timer_max),
364 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
365 { STATS_OFFSET32(total_bytes_transmitted_hi),
366 8, STATS_FLAGS_BOTH, "tx_bytes" },
367 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
368 8, STATS_FLAGS_PORT, "tx_error_bytes" },
369 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
370 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
371 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
372 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
373 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
374 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
375 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
376 8, STATS_FLAGS_PORT, "tx_mac_errors" },
377 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
378 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
379 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
380 8, STATS_FLAGS_PORT, "tx_single_collisions" },
381 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
382 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
383 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
384 8, STATS_FLAGS_PORT, "tx_deferred" },
385 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
386 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
387 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
388 8, STATS_FLAGS_PORT, "tx_late_collisions" },
389 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
390 8, STATS_FLAGS_PORT, "tx_total_collisions" },
391 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
392 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
393 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
394 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
395 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
396 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
397 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
398 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
399 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
400 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
401 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
402 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
403 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
404 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
405 { STATS_OFFSET32(pause_frames_sent_hi),
406 8, STATS_FLAGS_PORT, "tx_pause_frames" },
407 { STATS_OFFSET32(total_tpa_aggregations_hi),
408 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
409 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
410 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
411 { STATS_OFFSET32(total_tpa_bytes_hi),
412 8, STATS_FLAGS_FUNC, "tpa_bytes"},
413 { STATS_OFFSET32(eee_tx_lpi),
414 4, STATS_FLAGS_PORT, "eee_tx_lpi"},
415 { STATS_OFFSET32(rx_calls),
416 4, STATS_FLAGS_FUNC, "rx_calls"},
417 { STATS_OFFSET32(rx_pkts),
418 4, STATS_FLAGS_FUNC, "rx_pkts"},
419 { STATS_OFFSET32(rx_tpa_pkts),
420 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
421 { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
422 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
423 { STATS_OFFSET32(rx_bxe_service_rxsgl),
424 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
425 { STATS_OFFSET32(rx_jumbo_sge_pkts),
426 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
427 { STATS_OFFSET32(rx_soft_errors),
428 4, STATS_FLAGS_FUNC, "rx_soft_errors"},
429 { STATS_OFFSET32(rx_hw_csum_errors),
430 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
431 { STATS_OFFSET32(rx_ofld_frames_csum_ip),
432 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
433 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
434 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
435 { STATS_OFFSET32(rx_budget_reached),
436 4, STATS_FLAGS_FUNC, "rx_budget_reached"},
437 { STATS_OFFSET32(tx_pkts),
438 4, STATS_FLAGS_FUNC, "tx_pkts"},
439 { STATS_OFFSET32(tx_soft_errors),
440 4, STATS_FLAGS_FUNC, "tx_soft_errors"},
441 { STATS_OFFSET32(tx_ofld_frames_csum_ip),
442 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
443 { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
444 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
445 { STATS_OFFSET32(tx_ofld_frames_csum_udp),
446 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
447 { STATS_OFFSET32(tx_ofld_frames_lso),
448 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
449 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
450 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
451 { STATS_OFFSET32(tx_encap_failures),
452 4, STATS_FLAGS_FUNC, "tx_encap_failures"},
453 { STATS_OFFSET32(tx_hw_queue_full),
454 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
455 { STATS_OFFSET32(tx_hw_max_queue_depth),
456 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
457 { STATS_OFFSET32(tx_dma_mapping_failure),
458 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
459 { STATS_OFFSET32(tx_max_drbr_queue_depth),
460 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
461 { STATS_OFFSET32(tx_window_violation_std),
462 4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
463 { STATS_OFFSET32(tx_window_violation_tso),
464 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
465 { STATS_OFFSET32(tx_chain_lost_mbuf),
466 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
467 { STATS_OFFSET32(tx_frames_deferred),
468 4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
469 { STATS_OFFSET32(tx_queue_xoff),
470 4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
471 { STATS_OFFSET32(mbuf_defrag_attempts),
472 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
473 { STATS_OFFSET32(mbuf_defrag_failures),
474 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
475 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
476 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
477 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
478 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
479 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
480 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
481 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
482 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
483 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
484 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
485 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
486 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
487 { STATS_OFFSET32(mbuf_alloc_tx),
488 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
489 { STATS_OFFSET32(mbuf_alloc_rx),
490 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
491 { STATS_OFFSET32(mbuf_alloc_sge),
492 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
493 { STATS_OFFSET32(mbuf_alloc_tpa),
494 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
495 { STATS_OFFSET32(tx_queue_full_return),
496 4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
497 { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
498 4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
499 { STATS_OFFSET32(tx_request_link_down_failures),
500 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
501 { STATS_OFFSET32(bd_avail_too_less_failures),
502 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
503 { STATS_OFFSET32(tx_mq_not_empty),
504 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
505 { STATS_OFFSET32(nsegs_path1_errors),
506 4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
507 { STATS_OFFSET32(nsegs_path2_errors),
508 4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
509
510
511 };
512
513 static const struct {
514 uint32_t offset;
515 uint32_t size;
516 char string[STAT_NAME_LEN];
517 } bxe_eth_q_stats_arr[] = {
518 { Q_STATS_OFFSET32(total_bytes_received_hi),
519 8, "rx_bytes" },
520 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
521 8, "rx_ucast_packets" },
522 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
523 8, "rx_mcast_packets" },
524 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
525 8, "rx_bcast_packets" },
526 { Q_STATS_OFFSET32(no_buff_discard_hi),
527 8, "rx_discards" },
528 { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
529 8, "tx_bytes" },
530 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
531 8, "tx_ucast_packets" },
532 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
533 8, "tx_mcast_packets" },
534 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
535 8, "tx_bcast_packets" },
536 { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
537 8, "tpa_aggregations" },
538 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
539 8, "tpa_aggregated_frames"},
540 { Q_STATS_OFFSET32(total_tpa_bytes_hi),
541 8, "tpa_bytes"},
542 { Q_STATS_OFFSET32(rx_calls),
543 4, "rx_calls"},
544 { Q_STATS_OFFSET32(rx_pkts),
545 4, "rx_pkts"},
546 { Q_STATS_OFFSET32(rx_tpa_pkts),
547 4, "rx_tpa_pkts"},
548 { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
549 4, "rx_erroneous_jumbo_sge_pkts"},
550 { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
551 4, "rx_bxe_service_rxsgl"},
552 { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
553 4, "rx_jumbo_sge_pkts"},
554 { Q_STATS_OFFSET32(rx_soft_errors),
555 4, "rx_soft_errors"},
556 { Q_STATS_OFFSET32(rx_hw_csum_errors),
557 4, "rx_hw_csum_errors"},
558 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
559 4, "rx_ofld_frames_csum_ip"},
560 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
561 4, "rx_ofld_frames_csum_tcp_udp"},
562 { Q_STATS_OFFSET32(rx_budget_reached),
563 4, "rx_budget_reached"},
564 { Q_STATS_OFFSET32(tx_pkts),
565 4, "tx_pkts"},
566 { Q_STATS_OFFSET32(tx_soft_errors),
567 4, "tx_soft_errors"},
568 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
569 4, "tx_ofld_frames_csum_ip"},
570 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
571 4, "tx_ofld_frames_csum_tcp"},
572 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
573 4, "tx_ofld_frames_csum_udp"},
574 { Q_STATS_OFFSET32(tx_ofld_frames_lso),
575 4, "tx_ofld_frames_lso"},
576 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
577 4, "tx_ofld_frames_lso_hdr_splits"},
578 { Q_STATS_OFFSET32(tx_encap_failures),
579 4, "tx_encap_failures"},
580 { Q_STATS_OFFSET32(tx_hw_queue_full),
581 4, "tx_hw_queue_full"},
582 { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
583 4, "tx_hw_max_queue_depth"},
584 { Q_STATS_OFFSET32(tx_dma_mapping_failure),
585 4, "tx_dma_mapping_failure"},
586 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
587 4, "tx_max_drbr_queue_depth"},
588 { Q_STATS_OFFSET32(tx_window_violation_std),
589 4, "tx_window_violation_std"},
590 { Q_STATS_OFFSET32(tx_window_violation_tso),
591 4, "tx_window_violation_tso"},
592 { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
593 4, "tx_chain_lost_mbuf"},
594 { Q_STATS_OFFSET32(tx_frames_deferred),
595 4, "tx_frames_deferred"},
596 { Q_STATS_OFFSET32(tx_queue_xoff),
597 4, "tx_queue_xoff"},
598 { Q_STATS_OFFSET32(mbuf_defrag_attempts),
599 4, "mbuf_defrag_attempts"},
600 { Q_STATS_OFFSET32(mbuf_defrag_failures),
601 4, "mbuf_defrag_failures"},
602 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
603 4, "mbuf_rx_bd_alloc_failed"},
604 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
605 4, "mbuf_rx_bd_mapping_failed"},
606 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
607 4, "mbuf_rx_tpa_alloc_failed"},
608 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
609 4, "mbuf_rx_tpa_mapping_failed"},
610 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
611 4, "mbuf_rx_sge_alloc_failed"},
612 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
613 4, "mbuf_rx_sge_mapping_failed"},
614 { Q_STATS_OFFSET32(mbuf_alloc_tx),
615 4, "mbuf_alloc_tx"},
616 { Q_STATS_OFFSET32(mbuf_alloc_rx),
617 4, "mbuf_alloc_rx"},
618 { Q_STATS_OFFSET32(mbuf_alloc_sge),
619 4, "mbuf_alloc_sge"},
620 { Q_STATS_OFFSET32(mbuf_alloc_tpa),
621 4, "mbuf_alloc_tpa"},
622 { Q_STATS_OFFSET32(tx_queue_full_return),
623 4, "tx_queue_full_return"},
624 { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
625 4, "bxe_tx_mq_sc_state_failures"},
626 { Q_STATS_OFFSET32(tx_request_link_down_failures),
627 4, "tx_request_link_down_failures"},
628 { Q_STATS_OFFSET32(bd_avail_too_less_failures),
629 4, "bd_avail_too_less_failures"},
630 { Q_STATS_OFFSET32(tx_mq_not_empty),
631 4, "tx_mq_not_empty"},
632 { Q_STATS_OFFSET32(nsegs_path1_errors),
633 4, "nsegs_path1_errors"},
634 { Q_STATS_OFFSET32(nsegs_path2_errors),
635 4, "nsegs_path2_errors"}
636
637
638 };
639
640 #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr)
641 #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
642
643
644 static void bxe_cmng_fns_init(struct bxe_softc *sc,
645 uint8_t read_cfg,
646 uint8_t cmng_type);
647 static int bxe_get_cmng_fns_mode(struct bxe_softc *sc);
648 static void storm_memset_cmng(struct bxe_softc *sc,
649 struct cmng_init *cmng,
650 uint8_t port);
651 static void bxe_set_reset_global(struct bxe_softc *sc);
652 static void bxe_set_reset_in_progress(struct bxe_softc *sc);
653 static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
654 int engine);
655 static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
656 static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
657 uint8_t *global,
658 uint8_t print);
659 static void bxe_int_disable(struct bxe_softc *sc);
660 static int bxe_release_leader_lock(struct bxe_softc *sc);
661 static void bxe_pf_disable(struct bxe_softc *sc);
662 static void bxe_free_fp_buffers(struct bxe_softc *sc);
663 static inline void bxe_update_rx_prod(struct bxe_softc *sc,
664 struct bxe_fastpath *fp,
665 uint16_t rx_bd_prod,
666 uint16_t rx_cq_prod,
667 uint16_t rx_sge_prod);
668 static void bxe_link_report_locked(struct bxe_softc *sc);
669 static void bxe_link_report(struct bxe_softc *sc);
670 static void bxe_link_status_update(struct bxe_softc *sc);
671 static void bxe_periodic_callout_func(void *xsc);
672 static void bxe_periodic_start(struct bxe_softc *sc);
673 static void bxe_periodic_stop(struct bxe_softc *sc);
674 static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
675 uint16_t prev_index,
676 uint16_t index);
677 static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
678 int queue);
679 static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
680 uint16_t index);
681 static uint8_t bxe_txeof(struct bxe_softc *sc,
682 struct bxe_fastpath *fp);
683 static void bxe_task_fp(struct bxe_fastpath *fp);
684 static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
685 struct mbuf *m,
686 uint8_t contents);
687 static int bxe_alloc_mem(struct bxe_softc *sc);
688 static void bxe_free_mem(struct bxe_softc *sc);
689 static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
690 static void bxe_free_fw_stats_mem(struct bxe_softc *sc);
691 static int bxe_interrupt_attach(struct bxe_softc *sc);
692 static void bxe_interrupt_detach(struct bxe_softc *sc);
693 static void bxe_set_rx_mode(struct bxe_softc *sc);
694 static int bxe_init_locked(struct bxe_softc *sc);
695 static int bxe_stop_locked(struct bxe_softc *sc);
696 static void bxe_sp_err_timeout_task(void *arg, int pending);
697 void bxe_parity_recover(struct bxe_softc *sc);
698 void bxe_handle_error(struct bxe_softc *sc);
699 static __noinline int bxe_nic_load(struct bxe_softc *sc,
700 int load_mode);
701 static __noinline int bxe_nic_unload(struct bxe_softc *sc,
702 uint32_t unload_mode,
703 uint8_t keep_link);
704
705 static void bxe_handle_sp_tq(void *context, int pending);
706 static void bxe_handle_fp_tq(void *context, int pending);
707
708 static int bxe_add_cdev(struct bxe_softc *sc);
709 static void bxe_del_cdev(struct bxe_softc *sc);
710 int bxe_grc_dump(struct bxe_softc *sc);
711 static int bxe_alloc_buf_rings(struct bxe_softc *sc);
712 static void bxe_free_buf_rings(struct bxe_softc *sc);
713
714 /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
715 uint32_t
716 calc_crc32(uint8_t *crc32_packet,
717 uint32_t crc32_length,
718 uint32_t crc32_seed,
719 uint8_t complement)
720 {
721 uint32_t byte = 0;
722 uint32_t bit = 0;
723 uint8_t msb = 0;
724 uint32_t temp = 0;
725 uint32_t shft = 0;
726 uint8_t current_byte = 0;
727 uint32_t crc32_result = crc32_seed;
728 const uint32_t CRC32_POLY = 0x1edc6f41;
729
730 if ((crc32_packet == NULL) ||
731 (crc32_length == 0) ||
732 ((crc32_length % 8) != 0))
733 {
734 return (crc32_result);
735 }
736
737 for (byte = 0; byte < crc32_length; byte = byte + 1)
738 {
739 current_byte = crc32_packet[byte];
740 for (bit = 0; bit < 8; bit = bit + 1)
741 {
742 /* msb = crc32_result[31]; */
743 msb = (uint8_t)(crc32_result >> 31);
744
745 crc32_result = crc32_result << 1;
746
747 /* it (msb != current_byte[bit]) */
748 if (msb != (0x1 & (current_byte >> bit)))
749 {
750 crc32_result = crc32_result ^ CRC32_POLY;
751 /* crc32_result[0] = 1 */
752 crc32_result |= 1;
753 }
754 }
755 }
756
757 /* Last step is to:
758 * 1. "mirror" every bit
759 * 2. swap the 4 bytes
760 * 3. complement each bit
761 */
762
763 /* Mirror */
764 temp = crc32_result;
765 shft = sizeof(crc32_result) * 8 - 1;
766
767 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
768 {
769 temp <<= 1;
770 temp |= crc32_result & 1;
771 shft-- ;
772 }
773
774 /* temp[31-bit] = crc32_result[bit] */
775 temp <<= shft;
776
777 /* Swap */
778 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
779 {
780 uint32_t t0, t1, t2, t3;
781 t0 = (0x000000ff & (temp >> 24));
782 t1 = (0x0000ff00 & (temp >> 8));
783 t2 = (0x00ff0000 & (temp << 8));
784 t3 = (0xff000000 & (temp << 24));
785 crc32_result = t0 | t1 | t2 | t3;
786 }
787
788 /* Complement */
789 if (complement)
790 {
791 crc32_result = ~crc32_result;
792 }
793
794 return (crc32_result);
795 }
796
797 int
798 bxe_test_bit(int nr,
799 volatile unsigned long *addr)
800 {
801 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
802 }
803
804 void
805 bxe_set_bit(unsigned int nr,
806 volatile unsigned long *addr)
807 {
808 atomic_set_acq_long(addr, (1 << nr));
809 }
810
811 void
812 bxe_clear_bit(int nr,
813 volatile unsigned long *addr)
814 {
815 atomic_clear_acq_long(addr, (1 << nr));
816 }
817
818 int
819 bxe_test_and_set_bit(int nr,
820 volatile unsigned long *addr)
821 {
822 unsigned long x;
823 nr = (1 << nr);
824 do {
825 x = *addr;
826 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
827 // if (x & nr) bit_was_set; else bit_was_not_set;
828 return (x & nr);
829 }
830
831 int
832 bxe_test_and_clear_bit(int nr,
833 volatile unsigned long *addr)
834 {
835 unsigned long x;
836 nr = (1 << nr);
837 do {
838 x = *addr;
839 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
840 // if (x & nr) bit_was_set; else bit_was_not_set;
841 return (x & nr);
842 }
843
844 int
845 bxe_cmpxchg(volatile int *addr,
846 int old,
847 int new)
848 {
849 int x;
850 do {
851 x = *addr;
852 } while (atomic_cmpset_acq_int(addr, old, new) == 0);
853 return (x);
854 }
855
856 /*
857 * Get DMA memory from the OS.
858 *
859 * Validates that the OS has provided DMA buffers in response to a
860 * bus_dmamap_load call and saves the physical address of those buffers.
861 * When the callback is used the OS will return 0 for the mapping function
862 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
863 * failures back to the caller.
864 *
865 * Returns:
866 * Nothing.
867 */
868 static void
869 bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
870 {
871 struct bxe_dma *dma = arg;
872
873 if (error) {
874 dma->paddr = 0;
875 dma->nseg = 0;
876 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
877 } else {
878 dma->paddr = segs->ds_addr;
879 dma->nseg = nseg;
880 }
881 }
882
883 /*
884 * Allocate a block of memory and map it for DMA. No partial completions
885 * allowed and release any resources acquired if we can't acquire all
886 * resources.
887 *
888 * Returns:
889 * 0 = Success, !0 = Failure
890 */
891 int
892 bxe_dma_alloc(struct bxe_softc *sc,
893 bus_size_t size,
894 struct bxe_dma *dma,
895 const char *msg)
896 {
897 int rc;
898
899 if (dma->size > 0) {
900 BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
901 (unsigned long)dma->size);
902 return (1);
903 }
904
905 memset(dma, 0, sizeof(*dma)); /* sanity */
906 dma->sc = sc;
907 dma->size = size;
908 snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
909
910 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
911 BCM_PAGE_SIZE, /* alignment */
912 0, /* boundary limit */
913 BUS_SPACE_MAXADDR, /* restricted low */
914 BUS_SPACE_MAXADDR, /* restricted hi */
915 NULL, /* addr filter() */
916 NULL, /* addr filter() arg */
917 size, /* max map size */
918 1, /* num discontinuous */
919 size, /* max seg size */
920 BUS_DMA_ALLOCNOW, /* flags */
921 NULL, /* lock() */
922 NULL, /* lock() arg */
923 &dma->tag); /* returned dma tag */
924 if (rc != 0) {
925 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
926 memset(dma, 0, sizeof(*dma));
927 return (1);
928 }
929
930 rc = bus_dmamem_alloc(dma->tag,
931 (void **)&dma->vaddr,
932 (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
933 &dma->map);
934 if (rc != 0) {
935 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
936 bus_dma_tag_destroy(dma->tag);
937 memset(dma, 0, sizeof(*dma));
938 return (1);
939 }
940
941 rc = bus_dmamap_load(dma->tag,
942 dma->map,
943 dma->vaddr,
944 size,
945 bxe_dma_map_addr, /* BLOGD in here */
946 dma,
947 BUS_DMA_NOWAIT);
948 if (rc != 0) {
949 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
950 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
951 bus_dma_tag_destroy(dma->tag);
952 memset(dma, 0, sizeof(*dma));
953 return (1);
954 }
955
956 return (0);
957 }
958
959 void
960 bxe_dma_free(struct bxe_softc *sc,
961 struct bxe_dma *dma)
962 {
963 if (dma->size > 0) {
964 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
965
966 bus_dmamap_sync(dma->tag, dma->map,
967 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
968 bus_dmamap_unload(dma->tag, dma->map);
969 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
970 bus_dma_tag_destroy(dma->tag);
971 }
972
973 memset(dma, 0, sizeof(*dma));
974 }
975
976 /*
977 * These indirect read and write routines are only during init.
978 * The locking is handled by the MCP.
979 */
980
981 void
982 bxe_reg_wr_ind(struct bxe_softc *sc,
983 uint32_t addr,
984 uint32_t val)
985 {
986 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
987 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
988 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
989 }
990
991 uint32_t
992 bxe_reg_rd_ind(struct bxe_softc *sc,
993 uint32_t addr)
994 {
995 uint32_t val;
996
997 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
998 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
999 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1000
1001 return (val);
1002 }
1003
1004 static int
1005 bxe_acquire_hw_lock(struct bxe_softc *sc,
1006 uint32_t resource)
1007 {
1008 uint32_t lock_status;
1009 uint32_t resource_bit = (1 << resource);
1010 int func = SC_FUNC(sc);
1011 uint32_t hw_lock_control_reg;
1012 int cnt;
1013
1014 /* validate the resource is within range */
1015 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1016 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1017 " resource_bit 0x%x\n", resource, resource_bit);
1018 return (-1);
1019 }
1020
1021 if (func <= 5) {
1022 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1023 } else {
1024 hw_lock_control_reg =
1025 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1026 }
1027
1028 /* validate the resource is not already taken */
1029 lock_status = REG_RD(sc, hw_lock_control_reg);
1030 if (lock_status & resource_bit) {
1031 BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1032 resource, lock_status, resource_bit);
1033 return (-1);
1034 }
1035
1036 /* try every 5ms for 5 seconds */
1037 for (cnt = 0; cnt < 1000; cnt++) {
1038 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1039 lock_status = REG_RD(sc, hw_lock_control_reg);
1040 if (lock_status & resource_bit) {
1041 return (0);
1042 }
1043 DELAY(5000);
1044 }
1045
1046 BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1047 resource, resource_bit);
1048 return (-1);
1049 }
1050
1051 static int
1052 bxe_release_hw_lock(struct bxe_softc *sc,
1053 uint32_t resource)
1054 {
1055 uint32_t lock_status;
1056 uint32_t resource_bit = (1 << resource);
1057 int func = SC_FUNC(sc);
1058 uint32_t hw_lock_control_reg;
1059
1060 /* validate the resource is within range */
1061 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1062 BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1063 " resource_bit 0x%x\n", resource, resource_bit);
1064 return (-1);
1065 }
1066
1067 if (func <= 5) {
1068 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1069 } else {
1070 hw_lock_control_reg =
1071 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1072 }
1073
1074 /* validate the resource is currently taken */
1075 lock_status = REG_RD(sc, hw_lock_control_reg);
1076 if (!(lock_status & resource_bit)) {
1077 BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1078 resource, lock_status, resource_bit);
1079 return (-1);
1080 }
1081
1082 REG_WR(sc, hw_lock_control_reg, resource_bit);
1083 return (0);
1084 }
1085 static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1086 {
1087 BXE_PHY_LOCK(sc);
1088 bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1089 }
1090
1091 static void bxe_release_phy_lock(struct bxe_softc *sc)
1092 {
1093 bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1094 BXE_PHY_UNLOCK(sc);
1095 }
1096 /*
1097 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1098 * had we done things the other way around, if two pfs from the same port
1099 * would attempt to access nvram at the same time, we could run into a
1100 * scenario such as:
1101 * pf A takes the port lock.
1102 * pf B succeeds in taking the same lock since they are from the same port.
1103 * pf A takes the per pf misc lock. Performs eeprom access.
1104 * pf A finishes. Unlocks the per pf misc lock.
1105 * Pf B takes the lock and proceeds to perform it's own access.
1106 * pf A unlocks the per port lock, while pf B is still working (!).
1107 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1108 * access corrupted by pf B).*
1109 */
1110 static int
1111 bxe_acquire_nvram_lock(struct bxe_softc *sc)
1112 {
1113 int port = SC_PORT(sc);
1114 int count, i;
1115 uint32_t val = 0;
1116
1117 /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1118 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1119
1120 /* adjust timeout for emulation/FPGA */
1121 count = NVRAM_TIMEOUT_COUNT;
1122 if (CHIP_REV_IS_SLOW(sc)) {
1123 count *= 100;
1124 }
1125
1126 /* request access to nvram interface */
1127 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1128 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1129
1130 for (i = 0; i < count*10; i++) {
1131 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1132 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1133 break;
1134 }
1135
1136 DELAY(5);
1137 }
1138
1139 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1140 BLOGE(sc, "Cannot get access to nvram interface "
1141 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1142 port, val);
1143 return (-1);
1144 }
1145
1146 return (0);
1147 }
1148
1149 static int
1150 bxe_release_nvram_lock(struct bxe_softc *sc)
1151 {
1152 int port = SC_PORT(sc);
1153 int count, i;
1154 uint32_t val = 0;
1155
1156 /* adjust timeout for emulation/FPGA */
1157 count = NVRAM_TIMEOUT_COUNT;
1158 if (CHIP_REV_IS_SLOW(sc)) {
1159 count *= 100;
1160 }
1161
1162 /* relinquish nvram interface */
1163 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1164 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1165
1166 for (i = 0; i < count*10; i++) {
1167 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1168 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1169 break;
1170 }
1171
1172 DELAY(5);
1173 }
1174
1175 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1176 BLOGE(sc, "Cannot free access to nvram interface "
1177 "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1178 port, val);
1179 return (-1);
1180 }
1181
1182 /* release HW lock: protect against other PFs in PF Direct Assignment */
1183 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1184
1185 return (0);
1186 }
1187
1188 static void
1189 bxe_enable_nvram_access(struct bxe_softc *sc)
1190 {
1191 uint32_t val;
1192
1193 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1194
1195 /* enable both bits, even on read */
1196 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1197 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1198 }
1199
1200 static void
1201 bxe_disable_nvram_access(struct bxe_softc *sc)
1202 {
1203 uint32_t val;
1204
1205 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1206
1207 /* disable both bits, even after read */
1208 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1209 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1210 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1211 }
1212
1213 static int
1214 bxe_nvram_read_dword(struct bxe_softc *sc,
1215 uint32_t offset,
1216 uint32_t *ret_val,
1217 uint32_t cmd_flags)
1218 {
1219 int count, i, rc;
1220 uint32_t val;
1221
1222 /* build the command word */
1223 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1224
1225 /* need to clear DONE bit separately */
1226 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1227
1228 /* address of the NVRAM to read from */
1229 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1230 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1231
1232 /* issue a read command */
1233 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1234
1235 /* adjust timeout for emulation/FPGA */
1236 count = NVRAM_TIMEOUT_COUNT;
1237 if (CHIP_REV_IS_SLOW(sc)) {
1238 count *= 100;
1239 }
1240
1241 /* wait for completion */
1242 *ret_val = 0;
1243 rc = -1;
1244 for (i = 0; i < count; i++) {
1245 DELAY(5);
1246 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1247
1248 if (val & MCPR_NVM_COMMAND_DONE) {
1249 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1250 /* we read nvram data in cpu order
1251 * but ethtool sees it as an array of bytes
1252 * converting to big-endian will do the work
1253 */
1254 *ret_val = htobe32(val);
1255 rc = 0;
1256 break;
1257 }
1258 }
1259
1260 if (rc == -1) {
1261 BLOGE(sc, "nvram read timeout expired "
1262 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1263 offset, cmd_flags, val);
1264 }
1265
1266 return (rc);
1267 }
1268
1269 static int
1270 bxe_nvram_read(struct bxe_softc *sc,
1271 uint32_t offset,
1272 uint8_t *ret_buf,
1273 int buf_size)
1274 {
1275 uint32_t cmd_flags;
1276 uint32_t val;
1277 int rc;
1278
1279 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1280 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1281 offset, buf_size);
1282 return (-1);
1283 }
1284
1285 if ((offset + buf_size) > sc->devinfo.flash_size) {
1286 BLOGE(sc, "Invalid parameter, "
1287 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1288 offset, buf_size, sc->devinfo.flash_size);
1289 return (-1);
1290 }
1291
1292 /* request access to nvram interface */
1293 rc = bxe_acquire_nvram_lock(sc);
1294 if (rc) {
1295 return (rc);
1296 }
1297
1298 /* enable access to nvram interface */
1299 bxe_enable_nvram_access(sc);
1300
1301 /* read the first word(s) */
1302 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1303 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1304 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1305 memcpy(ret_buf, &val, 4);
1306
1307 /* advance to the next dword */
1308 offset += sizeof(uint32_t);
1309 ret_buf += sizeof(uint32_t);
1310 buf_size -= sizeof(uint32_t);
1311 cmd_flags = 0;
1312 }
1313
1314 if (rc == 0) {
1315 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1316 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1317 memcpy(ret_buf, &val, 4);
1318 }
1319
1320 /* disable access to nvram interface */
1321 bxe_disable_nvram_access(sc);
1322 bxe_release_nvram_lock(sc);
1323
1324 return (rc);
1325 }
1326
1327 static int
1328 bxe_nvram_write_dword(struct bxe_softc *sc,
1329 uint32_t offset,
1330 uint32_t val,
1331 uint32_t cmd_flags)
1332 {
1333 int count, i, rc;
1334
1335 /* build the command word */
1336 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1337
1338 /* need to clear DONE bit separately */
1339 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1340
1341 /* write the data */
1342 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1343
1344 /* address of the NVRAM to write to */
1345 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1346 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1347
1348 /* issue the write command */
1349 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1350
1351 /* adjust timeout for emulation/FPGA */
1352 count = NVRAM_TIMEOUT_COUNT;
1353 if (CHIP_REV_IS_SLOW(sc)) {
1354 count *= 100;
1355 }
1356
1357 /* wait for completion */
1358 rc = -1;
1359 for (i = 0; i < count; i++) {
1360 DELAY(5);
1361 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1362 if (val & MCPR_NVM_COMMAND_DONE) {
1363 rc = 0;
1364 break;
1365 }
1366 }
1367
1368 if (rc == -1) {
1369 BLOGE(sc, "nvram write timeout expired "
1370 "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1371 offset, cmd_flags, val);
1372 }
1373
1374 return (rc);
1375 }
1376
1377 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1378
1379 static int
1380 bxe_nvram_write1(struct bxe_softc *sc,
1381 uint32_t offset,
1382 uint8_t *data_buf,
1383 int buf_size)
1384 {
1385 uint32_t cmd_flags;
1386 uint32_t align_offset;
1387 uint32_t val;
1388 int rc;
1389
1390 if ((offset + buf_size) > sc->devinfo.flash_size) {
1391 BLOGE(sc, "Invalid parameter, "
1392 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1393 offset, buf_size, sc->devinfo.flash_size);
1394 return (-1);
1395 }
1396
1397 /* request access to nvram interface */
1398 rc = bxe_acquire_nvram_lock(sc);
1399 if (rc) {
1400 return (rc);
1401 }
1402
1403 /* enable access to nvram interface */
1404 bxe_enable_nvram_access(sc);
1405
1406 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1407 align_offset = (offset & ~0x03);
1408 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1409
1410 if (rc == 0) {
1411 val &= ~(0xff << BYTE_OFFSET(offset));
1412 val |= (*data_buf << BYTE_OFFSET(offset));
1413
1414 /* nvram data is returned as an array of bytes
1415 * convert it back to cpu order
1416 */
1417 val = be32toh(val);
1418
1419 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1420 }
1421
1422 /* disable access to nvram interface */
1423 bxe_disable_nvram_access(sc);
1424 bxe_release_nvram_lock(sc);
1425
1426 return (rc);
1427 }
1428
1429 static int
1430 bxe_nvram_write(struct bxe_softc *sc,
1431 uint32_t offset,
1432 uint8_t *data_buf,
1433 int buf_size)
1434 {
1435 uint32_t cmd_flags;
1436 uint32_t val;
1437 uint32_t written_so_far;
1438 int rc;
1439
1440 if (buf_size == 1) {
1441 return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1442 }
1443
1444 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1445 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1446 offset, buf_size);
1447 return (-1);
1448 }
1449
1450 if (buf_size == 0) {
1451 return (0); /* nothing to do */
1452 }
1453
1454 if ((offset + buf_size) > sc->devinfo.flash_size) {
1455 BLOGE(sc, "Invalid parameter, "
1456 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1457 offset, buf_size, sc->devinfo.flash_size);
1458 return (-1);
1459 }
1460
1461 /* request access to nvram interface */
1462 rc = bxe_acquire_nvram_lock(sc);
1463 if (rc) {
1464 return (rc);
1465 }
1466
1467 /* enable access to nvram interface */
1468 bxe_enable_nvram_access(sc);
1469
1470 written_so_far = 0;
1471 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1472 while ((written_so_far < buf_size) && (rc == 0)) {
1473 if (written_so_far == (buf_size - sizeof(uint32_t))) {
1474 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1475 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1476 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1477 } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1478 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1479 }
1480
1481 memcpy(&val, data_buf, 4);
1482
1483 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1484
1485 /* advance to the next dword */
1486 offset += sizeof(uint32_t);
1487 data_buf += sizeof(uint32_t);
1488 written_so_far += sizeof(uint32_t);
1489 cmd_flags = 0;
1490 }
1491
1492 /* disable access to nvram interface */
1493 bxe_disable_nvram_access(sc);
1494 bxe_release_nvram_lock(sc);
1495
1496 return (rc);
1497 }
1498
1499 /* copy command into DMAE command memory and set DMAE command Go */
1500 void
1501 bxe_post_dmae(struct bxe_softc *sc,
1502 struct dmae_cmd *dmae,
1503 int idx)
1504 {
1505 uint32_t cmd_offset;
1506 int i;
1507
1508 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1509 for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1510 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1511 }
1512
1513 REG_WR(sc, dmae_reg_go_c[idx], 1);
1514 }
1515
1516 uint32_t
1517 bxe_dmae_opcode_add_comp(uint32_t opcode,
1518 uint8_t comp_type)
1519 {
1520 return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1521 DMAE_CMD_C_TYPE_ENABLE));
1522 }
1523
1524 uint32_t
1525 bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1526 {
1527 return (opcode & ~DMAE_CMD_SRC_RESET);
1528 }
1529
1530 uint32_t
1531 bxe_dmae_opcode(struct bxe_softc *sc,
1532 uint8_t src_type,
1533 uint8_t dst_type,
1534 uint8_t with_comp,
1535 uint8_t comp_type)
1536 {
1537 uint32_t opcode = 0;
1538
1539 opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1540 (dst_type << DMAE_CMD_DST_SHIFT));
1541
1542 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1543
1544 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1545
1546 opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1547 (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1548
1549 opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1550
1551 #ifdef __BIG_ENDIAN
1552 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1553 #else
1554 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1555 #endif
1556
1557 if (with_comp) {
1558 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1559 }
1560
1561 return (opcode);
1562 }
1563
1564 static void
1565 bxe_prep_dmae_with_comp(struct bxe_softc *sc,
1566 struct dmae_cmd *dmae,
1567 uint8_t src_type,
1568 uint8_t dst_type)
1569 {
1570 memset(dmae, 0, sizeof(struct dmae_cmd));
1571
1572 /* set the opcode */
1573 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1574 TRUE, DMAE_COMP_PCI);
1575
1576 /* fill in the completion parameters */
1577 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1578 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1579 dmae->comp_val = DMAE_COMP_VAL;
1580 }
1581
1582 /* issue a DMAE command over the init channel and wait for completion */
1583 static int
1584 bxe_issue_dmae_with_comp(struct bxe_softc *sc,
1585 struct dmae_cmd *dmae)
1586 {
1587 uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1588 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1589
1590 BXE_DMAE_LOCK(sc);
1591
1592 /* reset completion */
1593 *wb_comp = 0;
1594
1595 /* post the command on the channel used for initializations */
1596 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1597
1598 /* wait for completion */
1599 DELAY(5);
1600
1601 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1602 if (!timeout ||
1603 (sc->recovery_state != BXE_RECOVERY_DONE &&
1604 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1605 BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1606 *wb_comp, sc->recovery_state);
1607 BXE_DMAE_UNLOCK(sc);
1608 return (DMAE_TIMEOUT);
1609 }
1610
1611 timeout--;
1612 DELAY(50);
1613 }
1614
1615 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1616 BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1617 *wb_comp, sc->recovery_state);
1618 BXE_DMAE_UNLOCK(sc);
1619 return (DMAE_PCI_ERROR);
1620 }
1621
1622 BXE_DMAE_UNLOCK(sc);
1623 return (0);
1624 }
1625
1626 void
1627 bxe_read_dmae(struct bxe_softc *sc,
1628 uint32_t src_addr,
1629 uint32_t len32)
1630 {
1631 struct dmae_cmd dmae;
1632 uint32_t *data;
1633 int i, rc;
1634
1635 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1636
1637 if (!sc->dmae_ready) {
1638 data = BXE_SP(sc, wb_data[0]);
1639
1640 for (i = 0; i < len32; i++) {
1641 data[i] = (CHIP_IS_E1(sc)) ?
1642 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1643 REG_RD(sc, (src_addr + (i * 4)));
1644 }
1645
1646 return;
1647 }
1648
1649 /* set opcode and fixed command fields */
1650 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1651
1652 /* fill in addresses and len */
1653 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1654 dmae.src_addr_hi = 0;
1655 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1656 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1657 dmae.len = len32;
1658
1659 /* issue the command and wait for completion */
1660 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1661 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1662 }
1663 }
1664
1665 void
1666 bxe_write_dmae(struct bxe_softc *sc,
1667 bus_addr_t dma_addr,
1668 uint32_t dst_addr,
1669 uint32_t len32)
1670 {
1671 struct dmae_cmd dmae;
1672 int rc;
1673
1674 if (!sc->dmae_ready) {
1675 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1676
1677 if (CHIP_IS_E1(sc)) {
1678 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1679 } else {
1680 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1681 }
1682
1683 return;
1684 }
1685
1686 /* set opcode and fixed command fields */
1687 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1688
1689 /* fill in addresses and len */
1690 dmae.src_addr_lo = U64_LO(dma_addr);
1691 dmae.src_addr_hi = U64_HI(dma_addr);
1692 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1693 dmae.dst_addr_hi = 0;
1694 dmae.len = len32;
1695
1696 /* issue the command and wait for completion */
1697 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1698 bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1699 }
1700 }
1701
1702 void
1703 bxe_write_dmae_phys_len(struct bxe_softc *sc,
1704 bus_addr_t phys_addr,
1705 uint32_t addr,
1706 uint32_t len)
1707 {
1708 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1709 int offset = 0;
1710
1711 while (len > dmae_wr_max) {
1712 bxe_write_dmae(sc,
1713 (phys_addr + offset), /* src DMA address */
1714 (addr + offset), /* dst GRC address */
1715 dmae_wr_max);
1716 offset += (dmae_wr_max * 4);
1717 len -= dmae_wr_max;
1718 }
1719
1720 bxe_write_dmae(sc,
1721 (phys_addr + offset), /* src DMA address */
1722 (addr + offset), /* dst GRC address */
1723 len);
1724 }
1725
1726 void
1727 bxe_set_ctx_validation(struct bxe_softc *sc,
1728 struct eth_context *cxt,
1729 uint32_t cid)
1730 {
1731 /* ustorm cxt validation */
1732 cxt->ustorm_ag_context.cdu_usage =
1733 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1734 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1735 /* xcontext validation */
1736 cxt->xstorm_ag_context.cdu_reserved =
1737 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1738 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1739 }
1740
1741 static void
1742 bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1743 uint8_t port,
1744 uint8_t fw_sb_id,
1745 uint8_t sb_index,
1746 uint8_t ticks)
1747 {
1748 uint32_t addr =
1749 (BAR_CSTRORM_INTMEM +
1750 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1751
1752 REG_WR8(sc, addr, ticks);
1753
1754 BLOGD(sc, DBG_LOAD,
1755 "port %d fw_sb_id %d sb_index %d ticks %d\n",
1756 port, fw_sb_id, sb_index, ticks);
1757 }
1758
1759 static void
1760 bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1761 uint8_t port,
1762 uint16_t fw_sb_id,
1763 uint8_t sb_index,
1764 uint8_t disable)
1765 {
1766 uint32_t enable_flag =
1767 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1768 uint32_t addr =
1769 (BAR_CSTRORM_INTMEM +
1770 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1771 uint8_t flags;
1772
1773 /* clear and set */
1774 flags = REG_RD8(sc, addr);
1775 flags &= ~HC_INDEX_DATA_HC_ENABLED;
1776 flags |= enable_flag;
1777 REG_WR8(sc, addr, flags);
1778
1779 BLOGD(sc, DBG_LOAD,
1780 "port %d fw_sb_id %d sb_index %d disable %d\n",
1781 port, fw_sb_id, sb_index, disable);
1782 }
1783
1784 void
1785 bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1786 uint8_t fw_sb_id,
1787 uint8_t sb_index,
1788 uint8_t disable,
1789 uint16_t usec)
1790 {
1791 int port = SC_PORT(sc);
1792 uint8_t ticks = (usec / 4); /* XXX ??? */
1793
1794 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1795
1796 disable = (disable) ? 1 : ((usec) ? 0 : 1);
1797 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1798 }
1799
1800 void
1801 elink_cb_udelay(struct bxe_softc *sc,
1802 uint32_t usecs)
1803 {
1804 DELAY(usecs);
1805 }
1806
1807 uint32_t
1808 elink_cb_reg_read(struct bxe_softc *sc,
1809 uint32_t reg_addr)
1810 {
1811 return (REG_RD(sc, reg_addr));
1812 }
1813
1814 void
1815 elink_cb_reg_write(struct bxe_softc *sc,
1816 uint32_t reg_addr,
1817 uint32_t val)
1818 {
1819 REG_WR(sc, reg_addr, val);
1820 }
1821
1822 void
1823 elink_cb_reg_wb_write(struct bxe_softc *sc,
1824 uint32_t offset,
1825 uint32_t *wb_write,
1826 uint16_t len)
1827 {
1828 REG_WR_DMAE(sc, offset, wb_write, len);
1829 }
1830
1831 void
1832 elink_cb_reg_wb_read(struct bxe_softc *sc,
1833 uint32_t offset,
1834 uint32_t *wb_write,
1835 uint16_t len)
1836 {
1837 REG_RD_DMAE(sc, offset, wb_write, len);
1838 }
1839
1840 uint8_t
1841 elink_cb_path_id(struct bxe_softc *sc)
1842 {
1843 return (SC_PATH(sc));
1844 }
1845
1846 void
1847 elink_cb_event_log(struct bxe_softc *sc,
1848 const elink_log_id_t elink_log_id,
1849 ...)
1850 {
1851 /* XXX */
1852 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1853 }
1854
1855 static int
1856 bxe_set_spio(struct bxe_softc *sc,
1857 int spio,
1858 uint32_t mode)
1859 {
1860 uint32_t spio_reg;
1861
1862 /* Only 2 SPIOs are configurable */
1863 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1864 BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1865 return (-1);
1866 }
1867
1868 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1869
1870 /* read SPIO and mask except the float bits */
1871 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1872
1873 switch (mode) {
1874 case MISC_SPIO_OUTPUT_LOW:
1875 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1876 /* clear FLOAT and set CLR */
1877 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1878 spio_reg |= (spio << MISC_SPIO_CLR_POS);
1879 break;
1880
1881 case MISC_SPIO_OUTPUT_HIGH:
1882 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1883 /* clear FLOAT and set SET */
1884 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1885 spio_reg |= (spio << MISC_SPIO_SET_POS);
1886 break;
1887
1888 case MISC_SPIO_INPUT_HI_Z:
1889 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1890 /* set FLOAT */
1891 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1892 break;
1893
1894 default:
1895 break;
1896 }
1897
1898 REG_WR(sc, MISC_REG_SPIO, spio_reg);
1899 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1900
1901 return (0);
1902 }
1903
1904 static int
1905 bxe_gpio_read(struct bxe_softc *sc,
1906 int gpio_num,
1907 uint8_t port)
1908 {
1909 /* The GPIO should be swapped if swap register is set and active */
1910 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1911 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1912 int gpio_shift = (gpio_num +
1913 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1914 uint32_t gpio_mask = (1 << gpio_shift);
1915 uint32_t gpio_reg;
1916
1917 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1918 BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1919 " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1920 gpio_mask);
1921 return (-1);
1922 }
1923
1924 /* read GPIO value */
1925 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1926
1927 /* get the requested pin value */
1928 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1929 }
1930
1931 static int
1932 bxe_gpio_write(struct bxe_softc *sc,
1933 int gpio_num,
1934 uint32_t mode,
1935 uint8_t port)
1936 {
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1939 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1940 int gpio_shift = (gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1942 uint32_t gpio_mask = (1 << gpio_shift);
1943 uint32_t gpio_reg;
1944
1945 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1947 " gpio_shift %d gpio_mask 0x%x\n",
1948 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1949 return (-1);
1950 }
1951
1952 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1953
1954 /* read GPIO and mask except the float bits */
1955 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1956
1957 switch (mode) {
1958 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1959 BLOGD(sc, DBG_PHY,
1960 "Set GPIO %d (shift %d) -> output low\n",
1961 gpio_num, gpio_shift);
1962 /* clear FLOAT and set CLR */
1963 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1964 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1965 break;
1966
1967 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1968 BLOGD(sc, DBG_PHY,
1969 "Set GPIO %d (shift %d) -> output high\n",
1970 gpio_num, gpio_shift);
1971 /* clear FLOAT and set SET */
1972 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1973 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1974 break;
1975
1976 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1977 BLOGD(sc, DBG_PHY,
1978 "Set GPIO %d (shift %d) -> input\n",
1979 gpio_num, gpio_shift);
1980 /* set FLOAT */
1981 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1982 break;
1983
1984 default:
1985 break;
1986 }
1987
1988 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1989 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1990
1991 return (0);
1992 }
1993
1994 static int
1995 bxe_gpio_mult_write(struct bxe_softc *sc,
1996 uint8_t pins,
1997 uint32_t mode)
1998 {
1999 uint32_t gpio_reg;
2000
2001 /* any port swapping should be handled by caller */
2002
2003 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2004
2005 /* read GPIO and mask except the float bits */
2006 gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2007 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2008 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2009 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2010
2011 switch (mode) {
2012 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2013 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2014 /* set CLR */
2015 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2016 break;
2017
2018 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2019 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2020 /* set SET */
2021 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2022 break;
2023
2024 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2025 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2026 /* set FLOAT */
2027 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2028 break;
2029
2030 default:
2031 BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2032 " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2033 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2034 return (-1);
2035 }
2036
2037 REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2038 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2039
2040 return (0);
2041 }
2042
2043 static int
2044 bxe_gpio_int_write(struct bxe_softc *sc,
2045 int gpio_num,
2046 uint32_t mode,
2047 uint8_t port)
2048 {
2049 /* The GPIO should be swapped if swap register is set and active */
2050 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2051 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2052 int gpio_shift = (gpio_num +
2053 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2054 uint32_t gpio_mask = (1 << gpio_shift);
2055 uint32_t gpio_reg;
2056
2057 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2058 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2059 " gpio_shift %d gpio_mask 0x%x\n",
2060 gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2061 return (-1);
2062 }
2063
2064 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2065
2066 /* read GPIO int */
2067 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2068
2069 switch (mode) {
2070 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2071 BLOGD(sc, DBG_PHY,
2072 "Clear GPIO INT %d (shift %d) -> output low\n",
2073 gpio_num, gpio_shift);
2074 /* clear SET and set CLR */
2075 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2076 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2077 break;
2078
2079 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2080 BLOGD(sc, DBG_PHY,
2081 "Set GPIO INT %d (shift %d) -> output high\n",
2082 gpio_num, gpio_shift);
2083 /* clear CLR and set SET */
2084 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2085 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2086 break;
2087
2088 default:
2089 break;
2090 }
2091
2092 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2093 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2094
2095 return (0);
2096 }
2097
2098 uint32_t
2099 elink_cb_gpio_read(struct bxe_softc *sc,
2100 uint16_t gpio_num,
2101 uint8_t port)
2102 {
2103 return (bxe_gpio_read(sc, gpio_num, port));
2104 }
2105
2106 uint8_t
2107 elink_cb_gpio_write(struct bxe_softc *sc,
2108 uint16_t gpio_num,
2109 uint8_t mode, /* 0=low 1=high */
2110 uint8_t port)
2111 {
2112 return (bxe_gpio_write(sc, gpio_num, mode, port));
2113 }
2114
2115 uint8_t
2116 elink_cb_gpio_mult_write(struct bxe_softc *sc,
2117 uint8_t pins,
2118 uint8_t mode) /* 0=low 1=high */
2119 {
2120 return (bxe_gpio_mult_write(sc, pins, mode));
2121 }
2122
2123 uint8_t
2124 elink_cb_gpio_int_write(struct bxe_softc *sc,
2125 uint16_t gpio_num,
2126 uint8_t mode, /* 0=low 1=high */
2127 uint8_t port)
2128 {
2129 return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2130 }
2131
2132 void
2133 elink_cb_notify_link_changed(struct bxe_softc *sc)
2134 {
2135 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2136 (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2137 }
2138
2139 /* send the MCP a request, block until there is a reply */
2140 uint32_t
2141 elink_cb_fw_command(struct bxe_softc *sc,
2142 uint32_t command,
2143 uint32_t param)
2144 {
2145 int mb_idx = SC_FW_MB_IDX(sc);
2146 uint32_t seq;
2147 uint32_t rc = 0;
2148 uint32_t cnt = 1;
2149 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2150
2151 BXE_FWMB_LOCK(sc);
2152
2153 seq = ++sc->fw_seq;
2154 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2155 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2156
2157 BLOGD(sc, DBG_PHY,
2158 "wrote command 0x%08x to FW MB param 0x%08x\n",
2159 (command | seq), param);
2160
2161 /* Let the FW do it's magic. GIve it up to 5 seconds... */
2162 do {
2163 DELAY(delay * 1000);
2164 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2165 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2166
2167 BLOGD(sc, DBG_PHY,
2168 "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2169 cnt*delay, rc, seq);
2170
2171 /* is this a reply to our command? */
2172 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2173 rc &= FW_MSG_CODE_MASK;
2174 } else {
2175 /* Ruh-roh! */
2176 BLOGE(sc, "FW failed to respond!\n");
2177 // XXX bxe_fw_dump(sc);
2178 rc = 0;
2179 }
2180
2181 BXE_FWMB_UNLOCK(sc);
2182 return (rc);
2183 }
2184
2185 static uint32_t
2186 bxe_fw_command(struct bxe_softc *sc,
2187 uint32_t command,
2188 uint32_t param)
2189 {
2190 return (elink_cb_fw_command(sc, command, param));
2191 }
2192
2193 static void
2194 __storm_memset_dma_mapping(struct bxe_softc *sc,
2195 uint32_t addr,
2196 bus_addr_t mapping)
2197 {
2198 REG_WR(sc, addr, U64_LO(mapping));
2199 REG_WR(sc, (addr + 4), U64_HI(mapping));
2200 }
2201
2202 static void
2203 storm_memset_spq_addr(struct bxe_softc *sc,
2204 bus_addr_t mapping,
2205 uint16_t abs_fid)
2206 {
2207 uint32_t addr = (XSEM_REG_FAST_MEMORY +
2208 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2209 __storm_memset_dma_mapping(sc, addr, mapping);
2210 }
2211
2212 static void
2213 storm_memset_vf_to_pf(struct bxe_softc *sc,
2214 uint16_t abs_fid,
2215 uint16_t pf_id)
2216 {
2217 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2218 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2219 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2220 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2221 }
2222
2223 static void
2224 storm_memset_func_en(struct bxe_softc *sc,
2225 uint16_t abs_fid,
2226 uint8_t enable)
2227 {
2228 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2229 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2230 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2231 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2232 }
2233
2234 static void
2235 storm_memset_eq_data(struct bxe_softc *sc,
2236 struct event_ring_data *eq_data,
2237 uint16_t pfid)
2238 {
2239 uint32_t addr;
2240 size_t size;
2241
2242 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2243 size = sizeof(struct event_ring_data);
2244 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2245 }
2246
2247 static void
2248 storm_memset_eq_prod(struct bxe_softc *sc,
2249 uint16_t eq_prod,
2250 uint16_t pfid)
2251 {
2252 uint32_t addr = (BAR_CSTRORM_INTMEM +
2253 CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2254 REG_WR16(sc, addr, eq_prod);
2255 }
2256
2257 /*
2258 * Post a slowpath command.
2259 *
2260 * A slowpath command is used to propagate a configuration change through
2261 * the controller in a controlled manner, allowing each STORM processor and
2262 * other H/W blocks to phase in the change. The commands sent on the
2263 * slowpath are referred to as ramrods. Depending on the ramrod used the
2264 * completion of the ramrod will occur in different ways. Here's a
2265 * breakdown of ramrods and how they complete:
2266 *
2267 * RAMROD_CMD_ID_ETH_PORT_SETUP
2268 * Used to setup the leading connection on a port. Completes on the
2269 * Receive Completion Queue (RCQ) of that port (typically fp[0]).
2270 *
2271 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2272 * Used to setup an additional connection on a port. Completes on the
2273 * RCQ of the multi-queue/RSS connection being initialized.
2274 *
2275 * RAMROD_CMD_ID_ETH_STAT_QUERY
2276 * Used to force the storm processors to update the statistics database
2277 * in host memory. This ramrod is send on the leading connection CID and
2278 * completes as an index increment of the CSTORM on the default status
2279 * block.
2280 *
2281 * RAMROD_CMD_ID_ETH_UPDATE
2282 * Used to update the state of the leading connection, usually to udpate
2283 * the RSS indirection table. Completes on the RCQ of the leading
2284 * connection. (Not currently used under FreeBSD until OS support becomes
2285 * available.)
2286 *
2287 * RAMROD_CMD_ID_ETH_HALT
2288 * Used when tearing down a connection prior to driver unload. Completes
2289 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
2290 * use this on the leading connection.
2291 *
2292 * RAMROD_CMD_ID_ETH_SET_MAC
2293 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on
2294 * the RCQ of the leading connection.
2295 *
2296 * RAMROD_CMD_ID_ETH_CFC_DEL
2297 * Used when tearing down a conneciton prior to driver unload. Completes
2298 * on the RCQ of the leading connection (since the current connection
2299 * has been completely removed from controller memory).
2300 *
2301 * RAMROD_CMD_ID_ETH_PORT_DEL
2302 * Used to tear down the leading connection prior to driver unload,
2303 * typically fp[0]. Completes as an index increment of the CSTORM on the
2304 * default status block.
2305 *
2306 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2307 * Used for connection offload. Completes on the RCQ of the multi-queue
2308 * RSS connection that is being offloaded. (Not currently used under
2309 * FreeBSD.)
2310 *
2311 * There can only be one command pending per function.
2312 *
2313 * Returns:
2314 * 0 = Success, !0 = Failure.
2315 */
2316
2317 /* must be called under the spq lock */
2318 static inline
2319 struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2320 {
2321 struct eth_spe *next_spe = sc->spq_prod_bd;
2322
2323 if (sc->spq_prod_bd == sc->spq_last_bd) {
2324 /* wrap back to the first eth_spq */
2325 sc->spq_prod_bd = sc->spq;
2326 sc->spq_prod_idx = 0;
2327 } else {
2328 sc->spq_prod_bd++;
2329 sc->spq_prod_idx++;
2330 }
2331
2332 return (next_spe);
2333 }
2334
2335 /* must be called under the spq lock */
2336 static inline
2337 void bxe_sp_prod_update(struct bxe_softc *sc)
2338 {
2339 int func = SC_FUNC(sc);
2340
2341 /*
2342 * Make sure that BD data is updated before writing the producer.
2343 * BD data is written to the memory, the producer is read from the
2344 * memory, thus we need a full memory barrier to ensure the ordering.
2345 */
2346 mb();
2347
2348 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2349 sc->spq_prod_idx);
2350
2351 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2352 BUS_SPACE_BARRIER_WRITE);
2353 }
2354
2355 /**
2356 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2357 *
2358 * @cmd: command to check
2359 * @cmd_type: command type
2360 */
2361 static inline
2362 int bxe_is_contextless_ramrod(int cmd,
2363 int cmd_type)
2364 {
2365 if ((cmd_type == NONE_CONNECTION_TYPE) ||
2366 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2367 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2368 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2369 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2370 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2371 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2372 return (TRUE);
2373 } else {
2374 return (FALSE);
2375 }
2376 }
2377
2378 /**
2379 * bxe_sp_post - place a single command on an SP ring
2380 *
2381 * @sc: driver handle
2382 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
2383 * @cid: SW CID the command is related to
2384 * @data_hi: command private data address (high 32 bits)
2385 * @data_lo: command private data address (low 32 bits)
2386 * @cmd_type: command type (e.g. NONE, ETH)
2387 *
2388 * SP data is handled as if it's always an address pair, thus data fields are
2389 * not swapped to little endian in upper functions. Instead this function swaps
2390 * data as if it's two uint32 fields.
2391 */
2392 int
2393 bxe_sp_post(struct bxe_softc *sc,
2394 int command,
2395 int cid,
2396 uint32_t data_hi,
2397 uint32_t data_lo,
2398 int cmd_type)
2399 {
2400 struct eth_spe *spe;
2401 uint16_t type;
2402 int common;
2403
2404 common = bxe_is_contextless_ramrod(command, cmd_type);
2405
2406 BXE_SP_LOCK(sc);
2407
2408 if (common) {
2409 if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2410 BLOGE(sc, "EQ ring is full!\n");
2411 BXE_SP_UNLOCK(sc);
2412 return (-1);
2413 }
2414 } else {
2415 if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2416 BLOGE(sc, "SPQ ring is full!\n");
2417 BXE_SP_UNLOCK(sc);
2418 return (-1);
2419 }
2420 }
2421
2422 spe = bxe_sp_get_next(sc);
2423
2424 /* CID needs port number to be encoded int it */
2425 spe->hdr.conn_and_cmd_data =
2426 htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2427
2428 type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2429
2430 /* TBD: Check if it works for VFs */
2431 type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2432 SPE_HDR_T_FUNCTION_ID);
2433
2434 spe->hdr.type = htole16(type);
2435
2436 spe->data.update_data_addr.hi = htole32(data_hi);
2437 spe->data.update_data_addr.lo = htole32(data_lo);
2438
2439 /*
2440 * It's ok if the actual decrement is issued towards the memory
2441 * somewhere between the lock and unlock. Thus no more explict
2442 * memory barrier is needed.
2443 */
2444 if (common) {
2445 atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2446 } else {
2447 atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2448 }
2449
2450 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2451 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2452 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2453 BLOGD(sc, DBG_SP,
2454 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2455 sc->spq_prod_idx,
2456 (uint32_t)U64_HI(sc->spq_dma.paddr),
2457 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2458 command,
2459 common,
2460 HW_CID(sc, cid),
2461 data_hi,
2462 data_lo,
2463 type,
2464 atomic_load_acq_long(&sc->cq_spq_left),
2465 atomic_load_acq_long(&sc->eq_spq_left));
2466
2467 bxe_sp_prod_update(sc);
2468
2469 BXE_SP_UNLOCK(sc);
2470 return (0);
2471 }
2472
2473 /**
2474 * bxe_debug_print_ind_table - prints the indirection table configuration.
2475 *
2476 * @sc: driver hanlde
2477 * @p: pointer to rss configuration
2478 */
2479
2480 /*
2481 * FreeBSD Device probe function.
2482 *
2483 * Compares the device found to the driver's list of supported devices and
2484 * reports back to the bsd loader whether this is the right driver for the device.
2485 * This is the driver entry function called from the "kldload" command.
2486 *
2487 * Returns:
2488 * BUS_PROBE_DEFAULT on success, positive value on failure.
2489 */
2490 static int
2491 bxe_probe(device_t dev)
2492 {
2493 struct bxe_device_type *t;
2494 char *descbuf;
2495 uint16_t did, sdid, svid, vid;
2496
2497 /* Find our device structure */
2498 t = bxe_devs;
2499
2500 /* Get the data for the device to be probed. */
2501 vid = pci_get_vendor(dev);
2502 did = pci_get_device(dev);
2503 svid = pci_get_subvendor(dev);
2504 sdid = pci_get_subdevice(dev);
2505
2506 /* Look through the list of known devices for a match. */
2507 while (t->bxe_name != NULL) {
2508 if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2509 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2510 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2511 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2512 if (descbuf == NULL)
2513 return (ENOMEM);
2514
2515 /* Print out the device identity. */
2516 snprintf(descbuf, BXE_DEVDESC_MAX,
2517 "%s (%c%d) BXE v:%s", t->bxe_name,
2518 (((pci_read_config(dev, PCIR_REVID, 4) &
2519 0xf0) >> 4) + 'A'),
2520 (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2521 BXE_DRIVER_VERSION);
2522
2523 device_set_desc_copy(dev, descbuf);
2524 free(descbuf, M_TEMP);
2525 return (BUS_PROBE_DEFAULT);
2526 }
2527 t++;
2528 }
2529
2530 return (ENXIO);
2531 }
2532
2533 static void
2534 bxe_init_mutexes(struct bxe_softc *sc)
2535 {
2536 #ifdef BXE_CORE_LOCK_SX
2537 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2538 "bxe%d_core_lock", sc->unit);
2539 sx_init(&sc->core_sx, sc->core_sx_name);
2540 #else
2541 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2542 "bxe%d_core_lock", sc->unit);
2543 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2544 #endif
2545
2546 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2547 "bxe%d_sp_lock", sc->unit);
2548 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2549
2550 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2551 "bxe%d_dmae_lock", sc->unit);
2552 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2553
2554 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2555 "bxe%d_phy_lock", sc->unit);
2556 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2557
2558 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2559 "bxe%d_fwmb_lock", sc->unit);
2560 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2561
2562 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2563 "bxe%d_print_lock", sc->unit);
2564 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2565
2566 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2567 "bxe%d_stats_lock", sc->unit);
2568 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2569
2570 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2571 "bxe%d_mcast_lock", sc->unit);
2572 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2573 }
2574
2575 static void
2576 bxe_release_mutexes(struct bxe_softc *sc)
2577 {
2578 #ifdef BXE_CORE_LOCK_SX
2579 sx_destroy(&sc->core_sx);
2580 #else
2581 if (mtx_initialized(&sc->core_mtx)) {
2582 mtx_destroy(&sc->core_mtx);
2583 }
2584 #endif
2585
2586 if (mtx_initialized(&sc->sp_mtx)) {
2587 mtx_destroy(&sc->sp_mtx);
2588 }
2589
2590 if (mtx_initialized(&sc->dmae_mtx)) {
2591 mtx_destroy(&sc->dmae_mtx);
2592 }
2593
2594 if (mtx_initialized(&sc->port.phy_mtx)) {
2595 mtx_destroy(&sc->port.phy_mtx);
2596 }
2597
2598 if (mtx_initialized(&sc->fwmb_mtx)) {
2599 mtx_destroy(&sc->fwmb_mtx);
2600 }
2601
2602 if (mtx_initialized(&sc->print_mtx)) {
2603 mtx_destroy(&sc->print_mtx);
2604 }
2605
2606 if (mtx_initialized(&sc->stats_mtx)) {
2607 mtx_destroy(&sc->stats_mtx);
2608 }
2609
2610 if (mtx_initialized(&sc->mcast_mtx)) {
2611 mtx_destroy(&sc->mcast_mtx);
2612 }
2613 }
2614
2615 static void
2616 bxe_tx_disable(struct bxe_softc* sc)
2617 {
2618 if_t ifp = sc->ifp;
2619
2620 /* tell the stack the driver is stopped and TX queue is full */
2621 if (ifp != NULL) {
2622 if_setdrvflags(ifp, 0);
2623 }
2624 }
2625
2626 static void
2627 bxe_drv_pulse(struct bxe_softc *sc)
2628 {
2629 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2630 sc->fw_drv_pulse_wr_seq);
2631 }
2632
2633 static inline uint16_t
2634 bxe_tx_avail(struct bxe_softc *sc,
2635 struct bxe_fastpath *fp)
2636 {
2637 int16_t used;
2638 uint16_t prod;
2639 uint16_t cons;
2640
2641 prod = fp->tx_bd_prod;
2642 cons = fp->tx_bd_cons;
2643
2644 used = SUB_S16(prod, cons);
2645
2646 return (int16_t)(sc->tx_ring_size) - used;
2647 }
2648
2649 static inline int
2650 bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2651 {
2652 uint16_t hw_cons;
2653
2654 mb(); /* status block fields can change */
2655 hw_cons = le16toh(*fp->tx_cons_sb);
2656 return (hw_cons != fp->tx_pkt_cons);
2657 }
2658
2659 static inline uint8_t
2660 bxe_has_tx_work(struct bxe_fastpath *fp)
2661 {
2662 /* expand this for multi-cos if ever supported */
2663 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2664 }
2665
2666 static inline int
2667 bxe_has_rx_work(struct bxe_fastpath *fp)
2668 {
2669 uint16_t rx_cq_cons_sb;
2670
2671 mb(); /* status block fields can change */
2672 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2673 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2674 rx_cq_cons_sb++;
2675 return (fp->rx_cq_cons != rx_cq_cons_sb);
2676 }
2677
2678 static void
2679 bxe_sp_event(struct bxe_softc *sc,
2680 struct bxe_fastpath *fp,
2681 union eth_rx_cqe *rr_cqe)
2682 {
2683 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2684 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2685 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2686 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2687
2688 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2689 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2690
2691 switch (command) {
2692 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2693 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2694 drv_cmd = ECORE_Q_CMD_UPDATE;
2695 break;
2696
2697 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2698 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2699 drv_cmd = ECORE_Q_CMD_SETUP;
2700 break;
2701
2702 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2703 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2704 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2705 break;
2706
2707 case (RAMROD_CMD_ID_ETH_HALT):
2708 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2709 drv_cmd = ECORE_Q_CMD_HALT;
2710 break;
2711
2712 case (RAMROD_CMD_ID_ETH_TERMINATE):
2713 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2714 drv_cmd = ECORE_Q_CMD_TERMINATE;
2715 break;
2716
2717 case (RAMROD_CMD_ID_ETH_EMPTY):
2718 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2719 drv_cmd = ECORE_Q_CMD_EMPTY;
2720 break;
2721
2722 default:
2723 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2724 command, fp->index);
2725 return;
2726 }
2727
2728 if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2729 q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2730 /*
2731 * q_obj->complete_cmd() failure means that this was
2732 * an unexpected completion.
2733 *
2734 * In this case we don't want to increase the sc->spq_left
2735 * because apparently we haven't sent this command the first
2736 * place.
2737 */
2738 // bxe_panic(sc, ("Unexpected SP completion\n"));
2739 return;
2740 }
2741
2742 atomic_add_acq_long(&sc->cq_spq_left, 1);
2743
2744 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2745 atomic_load_acq_long(&sc->cq_spq_left));
2746 }
2747
2748 /*
2749 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2750 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2751 * the current aggregation queue as in-progress.
2752 */
2753 static void
2754 bxe_tpa_start(struct bxe_softc *sc,
2755 struct bxe_fastpath *fp,
2756 uint16_t queue,
2757 uint16_t cons,
2758 uint16_t prod,
2759 struct eth_fast_path_rx_cqe *cqe)
2760 {
2761 struct bxe_sw_rx_bd tmp_bd;
2762 struct bxe_sw_rx_bd *rx_buf;
2763 struct eth_rx_bd *rx_bd;
2764 int max_agg_queues __diagused;
2765 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2766 uint16_t index;
2767
2768 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2769 "cons=%d prod=%d\n",
2770 fp->index, queue, cons, prod);
2771
2772 max_agg_queues = MAX_AGG_QS(sc);
2773
2774 KASSERT((queue < max_agg_queues),
2775 ("fp[%02d] invalid aggr queue (%d >= %d)!",
2776 fp->index, queue, max_agg_queues));
2777
2778 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2779 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2780 fp->index, queue));
2781
2782 /* copy the existing mbuf and mapping from the TPA pool */
2783 tmp_bd = tpa_info->bd;
2784
2785 if (tmp_bd.m == NULL) {
2786 uint32_t *tmp;
2787
2788 tmp = (uint32_t *)cqe;
2789
2790 BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2791 fp->index, queue, cons, prod);
2792 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2793 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2794
2795 /* XXX Error handling? */
2796 return;
2797 }
2798
2799 /* change the TPA queue to the start state */
2800 tpa_info->state = BXE_TPA_STATE_START;
2801 tpa_info->placement_offset = cqe->placement_offset;
2802 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags);
2803 tpa_info->vlan_tag = le16toh(cqe->vlan_tag);
2804 tpa_info->len_on_bd = le16toh(cqe->len_on_bd);
2805
2806 fp->rx_tpa_queue_used |= (1 << queue);
2807
2808 /*
2809 * If all the buffer descriptors are filled with mbufs then fill in
2810 * the current consumer index with a new BD. Else if a maximum Rx
2811 * buffer limit is imposed then fill in the next producer index.
2812 */
2813 index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2814 prod : cons;
2815
2816 /* move the received mbuf and mapping to TPA pool */
2817 tpa_info->bd = fp->rx_mbuf_chain[cons];
2818
2819 /* release any existing RX BD mbuf mappings */
2820 if (cons != index) {
2821 rx_buf = &fp->rx_mbuf_chain[cons];
2822
2823 if (rx_buf->m_map != NULL) {
2824 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2825 BUS_DMASYNC_POSTREAD);
2826 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2827 }
2828
2829 /*
2830 * We get here when the maximum number of rx buffers is less than
2831 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2832 * it out here without concern of a memory leak.
2833 */
2834 fp->rx_mbuf_chain[cons].m = NULL;
2835 }
2836
2837 /* update the Rx SW BD with the mbuf info from the TPA pool */
2838 fp->rx_mbuf_chain[index] = tmp_bd;
2839
2840 /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2841 rx_bd = &fp->rx_chain[index];
2842 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2843 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2844 }
2845
2846 /*
2847 * When a TPA aggregation is completed, loop through the individual mbufs
2848 * of the aggregation, combining them into a single mbuf which will be sent
2849 * up the stack. Refill all freed SGEs with mbufs as we go along.
2850 */
2851 static int
2852 bxe_fill_frag_mbuf(struct bxe_softc *sc,
2853 struct bxe_fastpath *fp,
2854 struct bxe_sw_tpa_info *tpa_info,
2855 uint16_t queue,
2856 uint16_t pages,
2857 struct mbuf *m,
2858 struct eth_end_agg_rx_cqe *cqe,
2859 uint16_t cqe_idx)
2860 {
2861 struct mbuf *m_frag;
2862 uint32_t frag_len, frag_size, i;
2863 uint16_t sge_idx;
2864 int rc = 0;
2865 int j;
2866
2867 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2868
2869 BLOGD(sc, DBG_LRO,
2870 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2871 fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2872
2873 /* make sure the aggregated frame is not too big to handle */
2874 if (pages > 8 * PAGES_PER_SGE) {
2875
2876 uint32_t *tmp = (uint32_t *)cqe;
2877
2878 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2879 "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2880 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2881 tpa_info->len_on_bd, frag_size);
2882
2883 BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2884 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2885
2886 bxe_panic(sc, ("sge page count error\n"));
2887 return (EINVAL);
2888 }
2889
2890 /*
2891 * Scan through the scatter gather list pulling individual mbufs into a
2892 * single mbuf for the host stack.
2893 */
2894 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2895 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2896
2897 /*
2898 * Firmware gives the indices of the SGE as if the ring is an array
2899 * (meaning that the "next" element will consume 2 indices).
2900 */
2901 frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2902
2903 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2904 "sge_idx=%d frag_size=%d frag_len=%d\n",
2905 fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2906
2907 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2908
2909 /* allocate a new mbuf for the SGE */
2910 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2911 if (rc) {
2912 /* Leave all remaining SGEs in the ring! */
2913 return (rc);
2914 }
2915
2916 /* update the fragment length */
2917 m_frag->m_len = frag_len;
2918
2919 /* concatenate the fragment to the head mbuf */
2920 m_cat(m, m_frag);
2921 fp->eth_q_stats.mbuf_alloc_sge--;
2922
2923 /* update the TPA mbuf size and remaining fragment size */
2924 m->m_pkthdr.len += frag_len;
2925 frag_size -= frag_len;
2926 }
2927
2928 BLOGD(sc, DBG_LRO,
2929 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2930 fp->index, queue, frag_size);
2931
2932 return (rc);
2933 }
2934
2935 static inline void
2936 bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2937 {
2938 int i, j;
2939
2940 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2941 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2942
2943 for (j = 0; j < 2; j++) {
2944 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2945 idx--;
2946 }
2947 }
2948 }
2949
2950 static inline void
2951 bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2952 {
2953 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2954 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2955
2956 /*
2957 * Clear the two last indices in the page to 1. These are the indices that
2958 * correspond to the "next" element, hence will never be indicated and
2959 * should be removed from the calculations.
2960 */
2961 bxe_clear_sge_mask_next_elems(fp);
2962 }
2963
2964 static inline void
2965 bxe_update_last_max_sge(struct bxe_fastpath *fp,
2966 uint16_t idx)
2967 {
2968 uint16_t last_max = fp->last_max_sge;
2969
2970 if (SUB_S16(idx, last_max) > 0) {
2971 fp->last_max_sge = idx;
2972 }
2973 }
2974
2975 static inline void
2976 bxe_update_sge_prod(struct bxe_softc *sc,
2977 struct bxe_fastpath *fp,
2978 uint16_t sge_len,
2979 union eth_sgl_or_raw_data *cqe)
2980 {
2981 uint16_t last_max, last_elem, first_elem;
2982 uint16_t delta = 0;
2983 uint16_t i;
2984
2985 if (!sge_len) {
2986 return;
2987 }
2988
2989 /* first mark all used pages */
2990 for (i = 0; i < sge_len; i++) {
2991 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2992 RX_SGE(le16toh(cqe->sgl[i])));
2993 }
2994
2995 BLOGD(sc, DBG_LRO,
2996 "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2997 fp->index, sge_len - 1,
2998 le16toh(cqe->sgl[sge_len - 1]));
2999
3000 /* assume that the last SGE index is the biggest */
3001 bxe_update_last_max_sge(fp,
3002 le16toh(cqe->sgl[sge_len - 1]));
3003
3004 last_max = RX_SGE(fp->last_max_sge);
3005 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3006 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3007
3008 /* if ring is not full */
3009 if (last_elem + 1 != first_elem) {
3010 last_elem++;
3011 }
3012
3013 /* now update the prod */
3014 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3015 if (__predict_true(fp->sge_mask[i])) {
3016 break;
3017 }
3018
3019 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3020 delta += BIT_VEC64_ELEM_SZ;
3021 }
3022
3023 if (delta > 0) {
3024 fp->rx_sge_prod += delta;
3025 /* clear page-end entries */
3026 bxe_clear_sge_mask_next_elems(fp);
3027 }
3028
3029 BLOGD(sc, DBG_LRO,
3030 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3031 fp->index, fp->last_max_sge, fp->rx_sge_prod);
3032 }
3033
3034 /*
3035 * The aggregation on the current TPA queue has completed. Pull the individual
3036 * mbuf fragments together into a single mbuf, perform all necessary checksum
3037 * calculations, and send the resuting mbuf to the stack.
3038 */
3039 static void
3040 bxe_tpa_stop(struct bxe_softc *sc,
3041 struct bxe_fastpath *fp,
3042 struct bxe_sw_tpa_info *tpa_info,
3043 uint16_t queue,
3044 uint16_t pages,
3045 struct eth_end_agg_rx_cqe *cqe,
3046 uint16_t cqe_idx)
3047 {
3048 if_t ifp = sc->ifp;
3049 struct mbuf *m;
3050 int rc = 0;
3051
3052 BLOGD(sc, DBG_LRO,
3053 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3054 fp->index, queue, tpa_info->placement_offset,
3055 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3056
3057 m = tpa_info->bd.m;
3058
3059 /* allocate a replacement before modifying existing mbuf */
3060 rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3061 if (rc) {
3062 /* drop the frame and log an error */
3063 fp->eth_q_stats.rx_soft_errors++;
3064 goto bxe_tpa_stop_exit;
3065 }
3066
3067 /* we have a replacement, fixup the current mbuf */
3068 m_adj(m, tpa_info->placement_offset);
3069 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3070
3071 /* mark the checksums valid (taken care of by the firmware) */
3072 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3073 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3074 m->m_pkthdr.csum_data = 0xffff;
3075 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3076 CSUM_IP_VALID |
3077 CSUM_DATA_VALID |
3078 CSUM_PSEUDO_HDR);
3079
3080 /* aggregate all of the SGEs into a single mbuf */
3081 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3082 if (rc) {
3083 /* drop the packet and log an error */
3084 fp->eth_q_stats.rx_soft_errors++;
3085 m_freem(m);
3086 } else {
3087 if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3088 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3089 m->m_flags |= M_VLANTAG;
3090 }
3091
3092 /* assign packet to this interface interface */
3093 if_setrcvif(m, ifp);
3094
3095 /* specify what RSS queue was used for this flow */
3096 m->m_pkthdr.flowid = fp->index;
3097 BXE_SET_FLOWID(m);
3098
3099 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3100 fp->eth_q_stats.rx_tpa_pkts++;
3101
3102 /* pass the frame to the stack */
3103 if_input(ifp, m);
3104 }
3105
3106 /* we passed an mbuf up the stack or dropped the frame */
3107 fp->eth_q_stats.mbuf_alloc_tpa--;
3108
3109 bxe_tpa_stop_exit:
3110
3111 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3112 fp->rx_tpa_queue_used &= ~(1 << queue);
3113 }
3114
3115 static uint8_t
3116 bxe_service_rxsgl(
3117 struct bxe_fastpath *fp,
3118 uint16_t len,
3119 uint16_t lenonbd,
3120 struct mbuf *m,
3121 struct eth_fast_path_rx_cqe *cqe_fp)
3122 {
3123 struct mbuf *m_frag;
3124 uint16_t frags, frag_len;
3125 uint16_t sge_idx = 0;
3126 uint16_t j;
3127 uint8_t i, rc = 0;
3128 uint32_t frag_size;
3129
3130 /* adjust the mbuf */
3131 m->m_len = lenonbd;
3132
3133 frag_size = len - lenonbd;
3134 frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3135
3136 for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3137 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3138
3139 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3140 frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3141 m_frag->m_len = frag_len;
3142
3143 /* allocate a new mbuf for the SGE */
3144 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3145 if (rc) {
3146 /* Leave all remaining SGEs in the ring! */
3147 return (rc);
3148 }
3149 fp->eth_q_stats.mbuf_alloc_sge--;
3150
3151 /* concatenate the fragment to the head mbuf */
3152 m_cat(m, m_frag);
3153
3154 frag_size -= frag_len;
3155 }
3156
3157 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3158
3159 return rc;
3160 }
3161
3162 static uint8_t
3163 bxe_rxeof(struct bxe_softc *sc,
3164 struct bxe_fastpath *fp)
3165 {
3166 if_t ifp = sc->ifp;
3167 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3168 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3169 int rx_pkts = 0;
3170 int rc = 0;
3171
3172 BXE_FP_RX_LOCK(fp);
3173
3174 /* CQ "next element" is of the size of the regular element */
3175 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3176 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3177 hw_cq_cons++;
3178 }
3179
3180 bd_cons = fp->rx_bd_cons;
3181 bd_prod = fp->rx_bd_prod;
3182 bd_prod_fw = bd_prod;
3183 sw_cq_cons = fp->rx_cq_cons;
3184 sw_cq_prod = fp->rx_cq_prod;
3185
3186 /*
3187 * Memory barrier necessary as speculative reads of the rx
3188 * buffer can be ahead of the index in the status block
3189 */
3190 rmb();
3191
3192 BLOGD(sc, DBG_RX,
3193 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3194 fp->index, hw_cq_cons, sw_cq_cons);
3195
3196 while (sw_cq_cons != hw_cq_cons) {
3197 struct bxe_sw_rx_bd *rx_buf = NULL;
3198 union eth_rx_cqe *cqe;
3199 struct eth_fast_path_rx_cqe *cqe_fp;
3200 uint8_t cqe_fp_flags;
3201 enum eth_rx_cqe_type cqe_fp_type;
3202 uint16_t len, lenonbd, pad;
3203 struct mbuf *m = NULL;
3204
3205 comp_ring_cons = RCQ(sw_cq_cons);
3206 bd_prod = RX_BD(bd_prod);
3207 bd_cons = RX_BD(bd_cons);
3208
3209 cqe = &fp->rcq_chain[comp_ring_cons];
3210 cqe_fp = &cqe->fast_path_cqe;
3211 cqe_fp_flags = cqe_fp->type_error_flags;
3212 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3213
3214 BLOGD(sc, DBG_RX,
3215 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3216 "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3217 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3218 fp->index,
3219 hw_cq_cons,
3220 sw_cq_cons,
3221 bd_prod,
3222 bd_cons,
3223 CQE_TYPE(cqe_fp_flags),
3224 cqe_fp_flags,
3225 cqe_fp->status_flags,
3226 le32toh(cqe_fp->rss_hash_result),
3227 le16toh(cqe_fp->vlan_tag),
3228 le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3229 le16toh(cqe_fp->len_on_bd));
3230
3231 /* is this a slowpath msg? */
3232 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3233 bxe_sp_event(sc, fp, cqe);
3234 goto next_cqe;
3235 }
3236
3237 rx_buf = &fp->rx_mbuf_chain[bd_cons];
3238
3239 if (!CQE_TYPE_FAST(cqe_fp_type)) {
3240 struct bxe_sw_tpa_info *tpa_info;
3241 uint16_t frag_size, pages;
3242 uint8_t queue;
3243
3244 if (CQE_TYPE_START(cqe_fp_type)) {
3245 bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3246 bd_cons, bd_prod, cqe_fp);
3247 m = NULL; /* packet not ready yet */
3248 goto next_rx;
3249 }
3250
3251 KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3252 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3253
3254 queue = cqe->end_agg_cqe.queue_index;
3255 tpa_info = &fp->rx_tpa_info[queue];
3256
3257 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3258 fp->index, queue);
3259
3260 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3261 tpa_info->len_on_bd);
3262 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3263
3264 bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3265 &cqe->end_agg_cqe, comp_ring_cons);
3266
3267 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3268
3269 goto next_cqe;
3270 }
3271
3272 /* non TPA */
3273
3274 /* is this an error packet? */
3275 if (__predict_false(cqe_fp_flags &
3276 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3277 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3278 fp->eth_q_stats.rx_soft_errors++;
3279 goto next_rx;
3280 }
3281
3282 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3283 lenonbd = le16toh(cqe_fp->len_on_bd);
3284 pad = cqe_fp->placement_offset;
3285
3286 m = rx_buf->m;
3287
3288 if (__predict_false(m == NULL)) {
3289 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3290 bd_cons, fp->index);
3291 goto next_rx;
3292 }
3293
3294 /* XXX double copy if packet length under a threshold */
3295
3296 /*
3297 * If all the buffer descriptors are filled with mbufs then fill in
3298 * the current consumer index with a new BD. Else if a maximum Rx
3299 * buffer limit is imposed then fill in the next producer index.
3300 */
3301 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3302 (sc->max_rx_bufs != RX_BD_USABLE) ?
3303 bd_prod : bd_cons);
3304 if (rc != 0) {
3305
3306 /* we simply reuse the received mbuf and don't post it to the stack */
3307 m = NULL;
3308
3309 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3310 fp->index, rc);
3311 fp->eth_q_stats.rx_soft_errors++;
3312
3313 if (sc->max_rx_bufs != RX_BD_USABLE) {
3314 /* copy this consumer index to the producer index */
3315 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3316 sizeof(struct bxe_sw_rx_bd));
3317 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3318 }
3319
3320 goto next_rx;
3321 }
3322
3323 /* current mbuf was detached from the bd */
3324 fp->eth_q_stats.mbuf_alloc_rx--;
3325
3326 /* we allocated a replacement mbuf, fixup the current one */
3327 m_adj(m, pad);
3328 m->m_pkthdr.len = m->m_len = len;
3329
3330 if ((len > 60) && (len > lenonbd)) {
3331 fp->eth_q_stats.rx_bxe_service_rxsgl++;
3332 rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3333 if (rc)
3334 break;
3335 fp->eth_q_stats.rx_jumbo_sge_pkts++;
3336 } else if (lenonbd < len) {
3337 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3338 }
3339
3340 /* assign packet to this interface interface */
3341 if_setrcvif(m, ifp);
3342
3343 /* assume no hardware checksum has complated */
3344 m->m_pkthdr.csum_flags = 0;
3345
3346 /* validate checksum if offload enabled */
3347 if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3348 /* check for a valid IP frame */
3349 if (!(cqe->fast_path_cqe.status_flags &
3350 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3351 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3352 if (__predict_false(cqe_fp_flags &
3353 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3354 fp->eth_q_stats.rx_hw_csum_errors++;
3355 } else {
3356 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3357 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3358 }
3359 }
3360
3361 /* check for a valid TCP/UDP frame */
3362 if (!(cqe->fast_path_cqe.status_flags &
3363 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3364 if (__predict_false(cqe_fp_flags &
3365 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3366 fp->eth_q_stats.rx_hw_csum_errors++;
3367 } else {
3368 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3369 m->m_pkthdr.csum_data = 0xFFFF;
3370 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3371 CSUM_PSEUDO_HDR);
3372 }
3373 }
3374 }
3375
3376 /* if there is a VLAN tag then flag that info */
3377 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3378 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3379 m->m_flags |= M_VLANTAG;
3380 }
3381
3382 /* specify what RSS queue was used for this flow */
3383 m->m_pkthdr.flowid = fp->index;
3384 BXE_SET_FLOWID(m);
3385
3386 next_rx:
3387
3388 bd_cons = RX_BD_NEXT(bd_cons);
3389 bd_prod = RX_BD_NEXT(bd_prod);
3390 bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3391
3392 /* pass the frame to the stack */
3393 if (__predict_true(m != NULL)) {
3394 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3395 rx_pkts++;
3396 if_input(ifp, m);
3397 }
3398
3399 next_cqe:
3400
3401 sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3402 sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3403
3404 /* limit spinning on the queue */
3405 if (rc != 0)
3406 break;
3407
3408 if (rx_pkts == sc->rx_budget) {
3409 fp->eth_q_stats.rx_budget_reached++;
3410 break;
3411 }
3412 } /* while work to do */
3413
3414 fp->rx_bd_cons = bd_cons;
3415 fp->rx_bd_prod = bd_prod_fw;
3416 fp->rx_cq_cons = sw_cq_cons;
3417 fp->rx_cq_prod = sw_cq_prod;
3418
3419 /* Update producers */
3420 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3421
3422 fp->eth_q_stats.rx_pkts += rx_pkts;
3423 fp->eth_q_stats.rx_calls++;
3424
3425 BXE_FP_RX_UNLOCK(fp);
3426
3427 return (sw_cq_cons != hw_cq_cons);
3428 }
3429
3430 static uint16_t
3431 bxe_free_tx_pkt(struct bxe_softc *sc,
3432 struct bxe_fastpath *fp,
3433 uint16_t idx)
3434 {
3435 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3436 struct eth_tx_start_bd *tx_start_bd;
3437 uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3438 uint16_t new_cons;
3439 int nbd;
3440
3441 /* unmap the mbuf from non-paged memory */
3442 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3443
3444 tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3445 nbd = le16toh(tx_start_bd->nbd) - 1;
3446
3447 new_cons = (tx_buf->first_bd + nbd);
3448
3449 /* free the mbuf */
3450 if (__predict_true(tx_buf->m != NULL)) {
3451 m_freem(tx_buf->m);
3452 fp->eth_q_stats.mbuf_alloc_tx--;
3453 } else {
3454 fp->eth_q_stats.tx_chain_lost_mbuf++;
3455 }
3456
3457 tx_buf->m = NULL;
3458 tx_buf->first_bd = 0;
3459
3460 return (new_cons);
3461 }
3462
3463 /* transmit timeout watchdog */
3464 static int
3465 bxe_watchdog(struct bxe_softc *sc,
3466 struct bxe_fastpath *fp)
3467 {
3468 BXE_FP_TX_LOCK(fp);
3469
3470 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3471 BXE_FP_TX_UNLOCK(fp);
3472 return (0);
3473 }
3474
3475 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3476
3477 BXE_FP_TX_UNLOCK(fp);
3478 BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK);
3479 taskqueue_enqueue_timeout(taskqueue_thread,
3480 &sc->sp_err_timeout_task, hz/10);
3481
3482 return (-1);
3483 }
3484
3485 /* processes transmit completions */
3486 static uint8_t
3487 bxe_txeof(struct bxe_softc *sc,
3488 struct bxe_fastpath *fp)
3489 {
3490 if_t ifp = sc->ifp;
3491 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3492 uint16_t tx_bd_avail;
3493
3494 BXE_FP_TX_LOCK_ASSERT(fp);
3495
3496 bd_cons = fp->tx_bd_cons;
3497 hw_cons = le16toh(*fp->tx_cons_sb);
3498 sw_cons = fp->tx_pkt_cons;
3499
3500 while (sw_cons != hw_cons) {
3501 pkt_cons = TX_BD(sw_cons);
3502
3503 BLOGD(sc, DBG_TX,
3504 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3505 fp->index, hw_cons, sw_cons, pkt_cons);
3506
3507 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3508
3509 sw_cons++;
3510 }
3511
3512 fp->tx_pkt_cons = sw_cons;
3513 fp->tx_bd_cons = bd_cons;
3514
3515 BLOGD(sc, DBG_TX,
3516 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3517 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3518
3519 mb();
3520
3521 tx_bd_avail = bxe_tx_avail(sc, fp);
3522
3523 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3524 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3525 } else {
3526 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3527 }
3528
3529 if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3530 /* reset the watchdog timer if there are pending transmits */
3531 fp->watchdog_timer = BXE_TX_TIMEOUT;
3532 return (TRUE);
3533 } else {
3534 /* clear watchdog when there are no pending transmits */
3535 fp->watchdog_timer = 0;
3536 return (FALSE);
3537 }
3538 }
3539
3540 static void
3541 bxe_drain_tx_queues(struct bxe_softc *sc)
3542 {
3543 struct bxe_fastpath *fp;
3544 int i, count;
3545
3546 /* wait until all TX fastpath tasks have completed */
3547 for (i = 0; i < sc->num_queues; i++) {
3548 fp = &sc->fp[i];
3549
3550 count = 1000;
3551
3552 while (bxe_has_tx_work(fp)) {
3553
3554 BXE_FP_TX_LOCK(fp);
3555 bxe_txeof(sc, fp);
3556 BXE_FP_TX_UNLOCK(fp);
3557
3558 if (count == 0) {
3559 BLOGE(sc, "Timeout waiting for fp[%d] "
3560 "transmits to complete!\n", i);
3561 bxe_panic(sc, ("tx drain failure\n"));
3562 return;
3563 }
3564
3565 count--;
3566 DELAY(1000);
3567 rmb();
3568 }
3569 }
3570
3571 return;
3572 }
3573
3574 static int
3575 bxe_del_all_macs(struct bxe_softc *sc,
3576 struct ecore_vlan_mac_obj *mac_obj,
3577 int mac_type,
3578 uint8_t wait_for_comp)
3579 {
3580 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3581 int rc;
3582
3583 /* wait for completion of requested */
3584 if (wait_for_comp) {
3585 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3586 }
3587
3588 /* Set the mac type of addresses we want to clear */
3589 bxe_set_bit(mac_type, &vlan_mac_flags);
3590
3591 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3592 if (rc < 0) {
3593 BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3594 rc, mac_type, wait_for_comp);
3595 }
3596
3597 return (rc);
3598 }
3599
3600 static int
3601 bxe_fill_accept_flags(struct bxe_softc *sc,
3602 uint32_t rx_mode,
3603 unsigned long *rx_accept_flags,
3604 unsigned long *tx_accept_flags)
3605 {
3606 /* Clear the flags first */
3607 *rx_accept_flags = 0;
3608 *tx_accept_flags = 0;
3609
3610 switch (rx_mode) {
3611 case BXE_RX_MODE_NONE:
3612 /*
3613 * 'drop all' supersedes any accept flags that may have been
3614 * passed to the function.
3615 */
3616 break;
3617
3618 case BXE_RX_MODE_NORMAL:
3619 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3620 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3621 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3622
3623 /* internal switching mode */
3624 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3625 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3626 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3627
3628 break;
3629
3630 case BXE_RX_MODE_ALLMULTI:
3631 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3632 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3633 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3634
3635 /* internal switching mode */
3636 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3637 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3638 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3639
3640 break;
3641
3642 case BXE_RX_MODE_PROMISC:
3643 /*
3644 * According to deffinition of SI mode, iface in promisc mode
3645 * should receive matched and unmatched (in resolution of port)
3646 * unicast packets.
3647 */
3648 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3649 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3650 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3651 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3652
3653 /* internal switching mode */
3654 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3655 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3656
3657 if (IS_MF_SI(sc)) {
3658 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3659 } else {
3660 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3661 }
3662
3663 break;
3664
3665 default:
3666 BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3667 return (-1);
3668 }
3669
3670 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3671 if (rx_mode != BXE_RX_MODE_NONE) {
3672 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3673 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3674 }
3675
3676 return (0);
3677 }
3678
3679 static int
3680 bxe_set_q_rx_mode(struct bxe_softc *sc,
3681 uint8_t cl_id,
3682 unsigned long rx_mode_flags,
3683 unsigned long rx_accept_flags,
3684 unsigned long tx_accept_flags,
3685 unsigned long ramrod_flags)
3686 {
3687 struct ecore_rx_mode_ramrod_params ramrod_param;
3688 int rc;
3689
3690 memset(&ramrod_param, 0, sizeof(ramrod_param));
3691
3692 /* Prepare ramrod parameters */
3693 ramrod_param.cid = 0;
3694 ramrod_param.cl_id = cl_id;
3695 ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3696 ramrod_param.func_id = SC_FUNC(sc);
3697
3698 ramrod_param.pstate = &sc->sp_state;
3699 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3700
3701 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3702 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3703
3704 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3705
3706 ramrod_param.ramrod_flags = ramrod_flags;
3707 ramrod_param.rx_mode_flags = rx_mode_flags;
3708
3709 ramrod_param.rx_accept_flags = rx_accept_flags;
3710 ramrod_param.tx_accept_flags = tx_accept_flags;
3711
3712 rc = ecore_config_rx_mode(sc, &ramrod_param);
3713 if (rc < 0) {
3714 BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3715 "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3716 "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3717 (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3718 (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3719 return (rc);
3720 }
3721
3722 return (0);
3723 }
3724
3725 static int
3726 bxe_set_storm_rx_mode(struct bxe_softc *sc)
3727 {
3728 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3729 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3730 int rc;
3731
3732 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3733 &tx_accept_flags);
3734 if (rc) {
3735 return (rc);
3736 }
3737
3738 bxe_set_bit(RAMROD_RX, &ramrod_flags);
3739 bxe_set_bit(RAMROD_TX, &ramrod_flags);
3740
3741 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3742 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3743 rx_accept_flags, tx_accept_flags,
3744 ramrod_flags));
3745 }
3746
3747 /* returns the "mcp load_code" according to global load_count array */
3748 static int
3749 bxe_nic_load_no_mcp(struct bxe_softc *sc)
3750 {
3751 int path = SC_PATH(sc);
3752 int port = SC_PORT(sc);
3753
3754 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3755 path, load_count[path][0], load_count[path][1],
3756 load_count[path][2]);
3757 load_count[path][0]++;
3758 load_count[path][1 + port]++;
3759 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3760 path, load_count[path][0], load_count[path][1],
3761 load_count[path][2]);
3762 if (load_count[path][0] == 1) {
3763 return (FW_MSG_CODE_DRV_LOAD_COMMON);
3764 } else if (load_count[path][1 + port] == 1) {
3765 return (FW_MSG_CODE_DRV_LOAD_PORT);
3766 } else {
3767 return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3768 }
3769 }
3770
3771 /* returns the "mcp load_code" according to global load_count array */
3772 static int
3773 bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3774 {
3775 int port = SC_PORT(sc);
3776 int path = SC_PATH(sc);
3777
3778 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3779 path, load_count[path][0], load_count[path][1],
3780 load_count[path][2]);
3781 load_count[path][0]--;
3782 load_count[path][1 + port]--;
3783 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3784 path, load_count[path][0], load_count[path][1],
3785 load_count[path][2]);
3786 if (load_count[path][0] == 0) {
3787 return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3788 } else if (load_count[path][1 + port] == 0) {
3789 return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3790 } else {
3791 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3792 }
3793 }
3794
3795 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3796 static uint32_t
3797 bxe_send_unload_req(struct bxe_softc *sc,
3798 int unload_mode)
3799 {
3800 uint32_t reset_code = 0;
3801
3802 /* Select the UNLOAD request mode */
3803 if (unload_mode == UNLOAD_NORMAL) {
3804 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3805 } else {
3806 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3807 }
3808
3809 /* Send the request to the MCP */
3810 if (!BXE_NOMCP(sc)) {
3811 reset_code = bxe_fw_command(sc, reset_code, 0);
3812 } else {
3813 reset_code = bxe_nic_unload_no_mcp(sc);
3814 }
3815
3816 return (reset_code);
3817 }
3818
3819 /* send UNLOAD_DONE command to the MCP */
3820 static void
3821 bxe_send_unload_done(struct bxe_softc *sc,
3822 uint8_t keep_link)
3823 {
3824 uint32_t reset_param =
3825 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3826
3827 /* Report UNLOAD_DONE to MCP */
3828 if (!BXE_NOMCP(sc)) {
3829 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3830 }
3831 }
3832
3833 static int
3834 bxe_func_wait_started(struct bxe_softc *sc)
3835 {
3836 int tout = 50;
3837
3838 if (!sc->port.pmf) {
3839 return (0);
3840 }
3841
3842 /*
3843 * (assumption: No Attention from MCP at this stage)
3844 * PMF probably in the middle of TX disable/enable transaction
3845 * 1. Sync IRS for default SB
3846 * 2. Sync SP queue - this guarantees us that attention handling started
3847 * 3. Wait, that TX disable/enable transaction completes
3848 *
3849 * 1+2 guarantee that if DCBX attention was scheduled it already changed
3850 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3851 * received completion for the transaction the state is TX_STOPPED.
3852 * State will return to STARTED after completion of TX_STOPPED-->STARTED
3853 * transaction.
3854 */
3855
3856 /* XXX make sure default SB ISR is done */
3857 /* need a way to synchronize an irq (intr_mtx?) */
3858
3859 /* XXX flush any work queues */
3860
3861 while (ecore_func_get_state(sc, &sc->func_obj) !=
3862 ECORE_F_STATE_STARTED && tout--) {
3863 DELAY(20000);
3864 }
3865
3866 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3867 /*
3868 * Failed to complete the transaction in a "good way"
3869 * Force both transactions with CLR bit.
3870 */
3871 struct ecore_func_state_params func_params = { NULL };
3872
3873 BLOGE(sc, "Unexpected function state! "
3874 "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3875
3876 func_params.f_obj = &sc->func_obj;
3877 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3878
3879 /* STARTED-->TX_STOPPED */
3880 func_params.cmd = ECORE_F_CMD_TX_STOP;
3881 ecore_func_state_change(sc, &func_params);
3882
3883 /* TX_STOPPED-->STARTED */
3884 func_params.cmd = ECORE_F_CMD_TX_START;
3885 return (ecore_func_state_change(sc, &func_params));
3886 }
3887
3888 return (0);
3889 }
3890
3891 static int
3892 bxe_stop_queue(struct bxe_softc *sc,
3893 int index)
3894 {
3895 struct bxe_fastpath *fp = &sc->fp[index];
3896 struct ecore_queue_state_params q_params = { NULL };
3897 int rc;
3898
3899 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3900
3901 q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3902 /* We want to wait for completion in this context */
3903 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3904
3905 /* Stop the primary connection: */
3906
3907 /* ...halt the connection */
3908 q_params.cmd = ECORE_Q_CMD_HALT;
3909 rc = ecore_queue_state_change(sc, &q_params);
3910 if (rc) {
3911 return (rc);
3912 }
3913
3914 /* ...terminate the connection */
3915 q_params.cmd = ECORE_Q_CMD_TERMINATE;
3916 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3917 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3918 rc = ecore_queue_state_change(sc, &q_params);
3919 if (rc) {
3920 return (rc);
3921 }
3922
3923 /* ...delete cfc entry */
3924 q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3925 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3926 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3927 return (ecore_queue_state_change(sc, &q_params));
3928 }
3929
3930 /* wait for the outstanding SP commands */
3931 static inline uint8_t
3932 bxe_wait_sp_comp(struct bxe_softc *sc,
3933 unsigned long mask)
3934 {
3935 unsigned long tmp;
3936 int tout = 5000; /* wait for 5 secs tops */
3937
3938 while (tout--) {
3939 mb();
3940 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3941 return (TRUE);
3942 }
3943
3944 DELAY(1000);
3945 }
3946
3947 mb();
3948
3949 tmp = atomic_load_acq_long(&sc->sp_state);
3950 if (tmp & mask) {
3951 BLOGE(sc, "Filtering completion timed out: "
3952 "sp_state 0x%lx, mask 0x%lx\n",
3953 tmp, mask);
3954 return (FALSE);
3955 }
3956
3957 return (FALSE);
3958 }
3959
3960 static int
3961 bxe_func_stop(struct bxe_softc *sc)
3962 {
3963 struct ecore_func_state_params func_params = { NULL };
3964 int rc;
3965
3966 /* prepare parameters for function state transitions */
3967 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3968 func_params.f_obj = &sc->func_obj;
3969 func_params.cmd = ECORE_F_CMD_STOP;
3970
3971 /*
3972 * Try to stop the function the 'good way'. If it fails (in case
3973 * of a parity error during bxe_chip_cleanup()) and we are
3974 * not in a debug mode, perform a state transaction in order to
3975 * enable further HW_RESET transaction.
3976 */
3977 rc = ecore_func_state_change(sc, &func_params);
3978 if (rc) {
3979 BLOGE(sc, "FUNC_STOP ramrod failed. "
3980 "Running a dry transaction (%d)\n", rc);
3981 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3982 return (ecore_func_state_change(sc, &func_params));
3983 }
3984
3985 return (0);
3986 }
3987
3988 static int
3989 bxe_reset_hw(struct bxe_softc *sc,
3990 uint32_t load_code)
3991 {
3992 struct ecore_func_state_params func_params = { NULL };
3993
3994 /* Prepare parameters for function state transitions */
3995 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3996
3997 func_params.f_obj = &sc->func_obj;
3998 func_params.cmd = ECORE_F_CMD_HW_RESET;
3999
4000 func_params.params.hw_init.load_phase = load_code;
4001
4002 return (ecore_func_state_change(sc, &func_params));
4003 }
4004
4005 static void
4006 bxe_int_disable_sync(struct bxe_softc *sc,
4007 int disable_hw)
4008 {
4009 if (disable_hw) {
4010 /* prevent the HW from sending interrupts */
4011 bxe_int_disable(sc);
4012 }
4013
4014 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4015 /* make sure all ISRs are done */
4016
4017 /* XXX make sure sp_task is not running */
4018 /* cancel and flush work queues */
4019 }
4020
4021 static void
4022 bxe_chip_cleanup(struct bxe_softc *sc,
4023 uint32_t unload_mode,
4024 uint8_t keep_link)
4025 {
4026 int port = SC_PORT(sc);
4027 struct ecore_mcast_ramrod_params rparam = { NULL };
4028 uint32_t reset_code;
4029 int i, rc = 0;
4030
4031 bxe_drain_tx_queues(sc);
4032
4033 /* give HW time to discard old tx messages */
4034 DELAY(1000);
4035
4036 /* Clean all ETH MACs */
4037 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4038 if (rc < 0) {
4039 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4040 }
4041
4042 /* Clean up UC list */
4043 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4044 if (rc < 0) {
4045 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4046 }
4047
4048 /* Disable LLH */
4049 if (!CHIP_IS_E1(sc)) {
4050 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4051 }
4052
4053 /* Set "drop all" to stop Rx */
4054
4055 /*
4056 * We need to take the BXE_MCAST_LOCK() here in order to prevent
4057 * a race between the completion code and this code.
4058 */
4059 BXE_MCAST_LOCK(sc);
4060
4061 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4062 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4063 } else {
4064 bxe_set_storm_rx_mode(sc);
4065 }
4066
4067 /* Clean up multicast configuration */
4068 rparam.mcast_obj = &sc->mcast_obj;
4069 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4070 if (rc < 0) {
4071 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4072 }
4073
4074 BXE_MCAST_UNLOCK(sc);
4075
4076 // XXX bxe_iov_chip_cleanup(sc);
4077
4078 /*
4079 * Send the UNLOAD_REQUEST to the MCP. This will return if
4080 * this function should perform FUNCTION, PORT, or COMMON HW
4081 * reset.
4082 */
4083 reset_code = bxe_send_unload_req(sc, unload_mode);
4084
4085 /*
4086 * (assumption: No Attention from MCP at this stage)
4087 * PMF probably in the middle of TX disable/enable transaction
4088 */
4089 rc = bxe_func_wait_started(sc);
4090 if (rc) {
4091 BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4092 }
4093
4094 /*
4095 * Close multi and leading connections
4096 * Completions for ramrods are collected in a synchronous way
4097 */
4098 for (i = 0; i < sc->num_queues; i++) {
4099 if (bxe_stop_queue(sc, i)) {
4100 goto unload_error;
4101 }
4102 }
4103
4104 /*
4105 * If SP settings didn't get completed so far - something
4106 * very wrong has happen.
4107 */
4108 if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4109 BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4110 }
4111
4112 unload_error:
4113
4114 rc = bxe_func_stop(sc);
4115 if (rc) {
4116 BLOGE(sc, "Function stop failed!(%d)\n", rc);
4117 }
4118
4119 /* disable HW interrupts */
4120 bxe_int_disable_sync(sc, TRUE);
4121
4122 /* detach interrupts */
4123 bxe_interrupt_detach(sc);
4124
4125 /* Reset the chip */
4126 rc = bxe_reset_hw(sc, reset_code);
4127 if (rc) {
4128 BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4129 }
4130
4131 /* Report UNLOAD_DONE to MCP */
4132 bxe_send_unload_done(sc, keep_link);
4133 }
4134
4135 static void
4136 bxe_disable_close_the_gate(struct bxe_softc *sc)
4137 {
4138 uint32_t val;
4139 int port = SC_PORT(sc);
4140
4141 BLOGD(sc, DBG_LOAD,
4142 "Disabling 'close the gates'\n");
4143
4144 if (CHIP_IS_E1(sc)) {
4145 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4146 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4147 val = REG_RD(sc, addr);
4148 val &= ~(0x300);
4149 REG_WR(sc, addr, val);
4150 } else {
4151 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4152 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4153 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4154 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4155 }
4156 }
4157
4158 /*
4159 * Cleans the object that have internal lists without sending
4160 * ramrods. Should be run when interrupts are disabled.
4161 */
4162 static void
4163 bxe_squeeze_objects(struct bxe_softc *sc)
4164 {
4165 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4166 struct ecore_mcast_ramrod_params rparam = { NULL };
4167 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4168 int rc;
4169
4170 /* Cleanup MACs' object first... */
4171
4172 /* Wait for completion of requested */
4173 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4174 /* Perform a dry cleanup */
4175 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4176
4177 /* Clean ETH primary MAC */
4178 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4179 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4180 &ramrod_flags);
4181 if (rc != 0) {
4182 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4183 }
4184
4185 /* Cleanup UC list */
4186 vlan_mac_flags = 0;
4187 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4188 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4189 &ramrod_flags);
4190 if (rc != 0) {
4191 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4192 }
4193
4194 /* Now clean mcast object... */
4195
4196 rparam.mcast_obj = &sc->mcast_obj;
4197 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4198
4199 /* Add a DEL command... */
4200 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4201 if (rc < 0) {
4202 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4203 }
4204
4205 /* now wait until all pending commands are cleared */
4206
4207 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4208 while (rc != 0) {
4209 if (rc < 0) {
4210 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4211 return;
4212 }
4213
4214 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4215 }
4216 }
4217
4218 /* stop the controller */
4219 static __noinline int
4220 bxe_nic_unload(struct bxe_softc *sc,
4221 uint32_t unload_mode,
4222 uint8_t keep_link)
4223 {
4224 uint8_t global = FALSE;
4225 uint32_t val;
4226 int i;
4227
4228 BXE_CORE_LOCK_ASSERT(sc);
4229
4230 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4231
4232 for (i = 0; i < sc->num_queues; i++) {
4233 struct bxe_fastpath *fp;
4234
4235 fp = &sc->fp[i];
4236 fp->watchdog_timer = 0;
4237 BXE_FP_TX_LOCK(fp);
4238 BXE_FP_TX_UNLOCK(fp);
4239 }
4240
4241 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4242
4243 /* mark driver as unloaded in shmem2 */
4244 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4245 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4246 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4247 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4248 }
4249
4250 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4251 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4252
4253 if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
4254 /*
4255 * We can get here if the driver has been unloaded
4256 * during parity error recovery and is either waiting for a
4257 * leader to complete or for other functions to unload and
4258 * then ifconfig down has been issued. In this case we want to
4259 * unload and let other functions to complete a recovery
4260 * process.
4261 */
4262 sc->recovery_state = BXE_RECOVERY_DONE;
4263 sc->is_leader = 0;
4264 bxe_release_leader_lock(sc);
4265 mb();
4266 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4267 }
4268 BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4269 " state = 0x%x\n", sc->recovery_state, sc->state);
4270 return (-1);
4271 }
4272
4273 /*
4274 * Nothing to do during unload if previous bxe_nic_load()
4275 * did not completed successfully - all resourses are released.
4276 */
4277 if ((sc->state == BXE_STATE_CLOSED) ||
4278 (sc->state == BXE_STATE_ERROR)) {
4279 return (0);
4280 }
4281
4282 sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4283 mb();
4284
4285 /* stop tx */
4286 bxe_tx_disable(sc);
4287
4288 sc->rx_mode = BXE_RX_MODE_NONE;
4289 /* XXX set rx mode ??? */
4290
4291 if (IS_PF(sc) && !sc->grcdump_done) {
4292 /* set ALWAYS_ALIVE bit in shmem */
4293 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4294
4295 bxe_drv_pulse(sc);
4296
4297 bxe_stats_handle(sc, STATS_EVENT_STOP);
4298 bxe_save_statistics(sc);
4299 }
4300
4301 /* wait till consumers catch up with producers in all queues */
4302 bxe_drain_tx_queues(sc);
4303
4304 /* if VF indicate to PF this function is going down (PF will delete sp
4305 * elements and clear initializations
4306 */
4307 if (IS_VF(sc)) {
4308 ; /* bxe_vfpf_close_vf(sc); */
4309 } else if (unload_mode != UNLOAD_RECOVERY) {
4310 /* if this is a normal/close unload need to clean up chip */
4311 if (!sc->grcdump_done)
4312 bxe_chip_cleanup(sc, unload_mode, keep_link);
4313 } else {
4314 /* Send the UNLOAD_REQUEST to the MCP */
4315 bxe_send_unload_req(sc, unload_mode);
4316
4317 /*
4318 * Prevent transactions to host from the functions on the
4319 * engine that doesn't reset global blocks in case of global
4320 * attention once gloabl blocks are reset and gates are opened
4321 * (the engine which leader will perform the recovery
4322 * last).
4323 */
4324 if (!CHIP_IS_E1x(sc)) {
4325 bxe_pf_disable(sc);
4326 }
4327
4328 /* disable HW interrupts */
4329 bxe_int_disable_sync(sc, TRUE);
4330
4331 /* detach interrupts */
4332 bxe_interrupt_detach(sc);
4333
4334 /* Report UNLOAD_DONE to MCP */
4335 bxe_send_unload_done(sc, FALSE);
4336 }
4337
4338 /*
4339 * At this stage no more interrupts will arrive so we may safely clean
4340 * the queue'able objects here in case they failed to get cleaned so far.
4341 */
4342 if (IS_PF(sc)) {
4343 bxe_squeeze_objects(sc);
4344 }
4345
4346 /* There should be no more pending SP commands at this stage */
4347 sc->sp_state = 0;
4348
4349 sc->port.pmf = 0;
4350
4351 bxe_free_fp_buffers(sc);
4352
4353 if (IS_PF(sc)) {
4354 bxe_free_mem(sc);
4355 }
4356
4357 bxe_free_fw_stats_mem(sc);
4358
4359 sc->state = BXE_STATE_CLOSED;
4360
4361 /*
4362 * Check if there are pending parity attentions. If there are - set
4363 * RECOVERY_IN_PROGRESS.
4364 */
4365 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4366 bxe_set_reset_in_progress(sc);
4367
4368 /* Set RESET_IS_GLOBAL if needed */
4369 if (global) {
4370 bxe_set_reset_global(sc);
4371 }
4372 }
4373
4374 /*
4375 * The last driver must disable a "close the gate" if there is no
4376 * parity attention or "process kill" pending.
4377 */
4378 if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4379 bxe_reset_is_done(sc, SC_PATH(sc))) {
4380 bxe_disable_close_the_gate(sc);
4381 }
4382
4383 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4384
4385 bxe_link_report(sc);
4386
4387 return (0);
4388 }
4389
4390 /*
4391 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4392 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4393 */
4394 static int
4395 bxe_ifmedia_update(if_t ifp)
4396 {
4397 struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4398 struct ifmedia *ifm;
4399
4400 ifm = &sc->ifmedia;
4401
4402 /* We only support Ethernet media type. */
4403 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4404 return (EINVAL);
4405 }
4406
4407 switch (IFM_SUBTYPE(ifm->ifm_media)) {
4408 case IFM_AUTO:
4409 break;
4410 case IFM_10G_CX4:
4411 case IFM_10G_SR:
4412 case IFM_10G_T:
4413 case IFM_10G_TWINAX:
4414 default:
4415 /* We don't support changing the media type. */
4416 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4417 IFM_SUBTYPE(ifm->ifm_media));
4418 return (EINVAL);
4419 }
4420
4421 return (0);
4422 }
4423
4424 /*
4425 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4426 */
4427 static void
4428 bxe_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
4429 {
4430 struct bxe_softc *sc = if_getsoftc(ifp);
4431
4432 /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..."
4433 line if the IFM_AVALID flag is *NOT* set. So we need to set this
4434 flag unconditionally (irrespective of the admininistrative
4435 'up/down' state of the interface) to ensure that the line is always
4436 displayed.
4437 */
4438 ifmr->ifm_status = IFM_AVALID;
4439
4440 /* Setup the default interface info. */
4441 ifmr->ifm_active = IFM_ETHER;
4442
4443 /* Report link down if the driver isn't running. */
4444 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4445 ifmr->ifm_active |= IFM_NONE;
4446 BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__);
4447 BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n",
4448 __func__, sc->link_vars.link_up);
4449 return;
4450 }
4451
4452
4453 if (sc->link_vars.link_up) {
4454 ifmr->ifm_status |= IFM_ACTIVE;
4455 ifmr->ifm_active |= IFM_FDX;
4456 } else {
4457 ifmr->ifm_active |= IFM_NONE;
4458 BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n",
4459 __func__);
4460 return;
4461 }
4462
4463 ifmr->ifm_active |= sc->media;
4464 return;
4465 }
4466
4467 static void
4468 bxe_handle_chip_tq(void *context,
4469 int pending)
4470 {
4471 struct bxe_softc *sc = (struct bxe_softc *)context;
4472 long work = atomic_load_acq_long(&sc->chip_tq_flags);
4473
4474 switch (work)
4475 {
4476
4477 case CHIP_TQ_REINIT:
4478 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4479 /* restart the interface */
4480 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4481 bxe_periodic_stop(sc);
4482 BXE_CORE_LOCK(sc);
4483 bxe_stop_locked(sc);
4484 bxe_init_locked(sc);
4485 BXE_CORE_UNLOCK(sc);
4486 }
4487 break;
4488
4489 default:
4490 break;
4491 }
4492 }
4493
4494 /*
4495 * Handles any IOCTL calls from the operating system.
4496 *
4497 * Returns:
4498 * 0 = Success, >0 Failure
4499 */
4500 static int
4501 bxe_ioctl(if_t ifp,
4502 u_long command,
4503 caddr_t data)
4504 {
4505 struct bxe_softc *sc = if_getsoftc(ifp);
4506 struct ifreq *ifr = (struct ifreq *)data;
4507 int mask = 0;
4508 int reinit = 0;
4509 int error = 0;
4510
4511 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4512 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4513
4514 switch (command)
4515 {
4516 case SIOCSIFMTU:
4517 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4518 ifr->ifr_mtu);
4519
4520 if (sc->mtu == ifr->ifr_mtu) {
4521 /* nothing to change */
4522 break;
4523 }
4524
4525 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4526 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4527 ifr->ifr_mtu, mtu_min, mtu_max);
4528 error = EINVAL;
4529 break;
4530 }
4531
4532 atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4533 (unsigned long)ifr->ifr_mtu);
4534 /*
4535 atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4536 (unsigned long)ifr->ifr_mtu);
4537 XXX - Not sure why it needs to be atomic
4538 */
4539 if_setmtu(ifp, ifr->ifr_mtu);
4540 reinit = 1;
4541 break;
4542
4543 case SIOCSIFFLAGS:
4544 /* toggle the interface state up or down */
4545 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4546
4547 BXE_CORE_LOCK(sc);
4548 /* check if the interface is up */
4549 if (if_getflags(ifp) & IFF_UP) {
4550 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4551 /* set the receive mode flags */
4552 bxe_set_rx_mode(sc);
4553 } else if(sc->state != BXE_STATE_DISABLED) {
4554 bxe_init_locked(sc);
4555 }
4556 } else {
4557 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4558 bxe_periodic_stop(sc);
4559 bxe_stop_locked(sc);
4560 }
4561 }
4562 BXE_CORE_UNLOCK(sc);
4563
4564 break;
4565
4566 case SIOCADDMULTI:
4567 case SIOCDELMULTI:
4568 /* add/delete multicast addresses */
4569 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4570
4571 /* check if the interface is up */
4572 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4573 /* set the receive mode flags */
4574 BXE_CORE_LOCK(sc);
4575 bxe_set_rx_mode(sc);
4576 BXE_CORE_UNLOCK(sc);
4577 }
4578
4579 break;
4580
4581 case SIOCSIFCAP:
4582 /* find out which capabilities have changed */
4583 mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4584
4585 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4586 mask);
4587
4588 /* toggle the LRO capabilites enable flag */
4589 if (mask & IFCAP_LRO) {
4590 if_togglecapenable(ifp, IFCAP_LRO);
4591 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4592 (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4593 reinit = 1;
4594 }
4595
4596 /* toggle the TXCSUM checksum capabilites enable flag */
4597 if (mask & IFCAP_TXCSUM) {
4598 if_togglecapenable(ifp, IFCAP_TXCSUM);
4599 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4600 (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4601 if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4602 if_sethwassistbits(ifp, (CSUM_IP |
4603 CSUM_TCP |
4604 CSUM_UDP |
4605 CSUM_TSO |
4606 CSUM_TCP_IPV6 |
4607 CSUM_UDP_IPV6), 0);
4608 } else {
4609 if_clearhwassist(ifp); /* XXX */
4610 }
4611 }
4612
4613 /* toggle the RXCSUM checksum capabilities enable flag */
4614 if (mask & IFCAP_RXCSUM) {
4615 if_togglecapenable(ifp, IFCAP_RXCSUM);
4616 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4617 (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4618 if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4619 if_sethwassistbits(ifp, (CSUM_IP |
4620 CSUM_TCP |
4621 CSUM_UDP |
4622 CSUM_TSO |
4623 CSUM_TCP_IPV6 |
4624 CSUM_UDP_IPV6), 0);
4625 } else {
4626 if_clearhwassist(ifp); /* XXX */
4627 }
4628 }
4629
4630 /* toggle TSO4 capabilities enabled flag */
4631 if (mask & IFCAP_TSO4) {
4632 if_togglecapenable(ifp, IFCAP_TSO4);
4633 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4634 (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4635 }
4636
4637 /* toggle TSO6 capabilities enabled flag */
4638 if (mask & IFCAP_TSO6) {
4639 if_togglecapenable(ifp, IFCAP_TSO6);
4640 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4641 (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4642 }
4643
4644 /* toggle VLAN_HWTSO capabilities enabled flag */
4645 if (mask & IFCAP_VLAN_HWTSO) {
4646
4647 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4648 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4649 (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4650 }
4651
4652 /* toggle VLAN_HWCSUM capabilities enabled flag */
4653 if (mask & IFCAP_VLAN_HWCSUM) {
4654 /* XXX investigate this... */
4655 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4656 error = EINVAL;
4657 }
4658
4659 /* toggle VLAN_MTU capabilities enable flag */
4660 if (mask & IFCAP_VLAN_MTU) {
4661 /* XXX investigate this... */
4662 BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4663 error = EINVAL;
4664 }
4665
4666 /* toggle VLAN_HWTAGGING capabilities enabled flag */
4667 if (mask & IFCAP_VLAN_HWTAGGING) {
4668 /* XXX investigate this... */
4669 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4670 error = EINVAL;
4671 }
4672
4673 /* toggle VLAN_HWFILTER capabilities enabled flag */
4674 if (mask & IFCAP_VLAN_HWFILTER) {
4675 /* XXX investigate this... */
4676 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4677 error = EINVAL;
4678 }
4679
4680 /* XXX not yet...
4681 * IFCAP_WOL_MAGIC
4682 */
4683
4684 break;
4685
4686 case SIOCSIFMEDIA:
4687 case SIOCGIFMEDIA:
4688 /* set/get interface media */
4689 BLOGD(sc, DBG_IOCTL,
4690 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4691 (command & 0xff));
4692 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4693 break;
4694
4695 default:
4696 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4697 (command & 0xff));
4698 error = ether_ioctl(ifp, command, data);
4699 break;
4700 }
4701
4702 if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4703 BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4704 "Re-initializing hardware from IOCTL change\n");
4705 bxe_periodic_stop(sc);
4706 BXE_CORE_LOCK(sc);
4707 bxe_stop_locked(sc);
4708 bxe_init_locked(sc);
4709 BXE_CORE_UNLOCK(sc);
4710 }
4711
4712 return (error);
4713 }
4714
4715 static __noinline void
4716 bxe_dump_mbuf(struct bxe_softc *sc,
4717 struct mbuf *m,
4718 uint8_t contents)
4719 {
4720 char * type;
4721 int i = 0;
4722
4723 if (!(sc->debug & DBG_MBUF)) {
4724 return;
4725 }
4726
4727 if (m == NULL) {
4728 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4729 return;
4730 }
4731
4732 while (m) {
4733
4734 BLOGD(sc, DBG_MBUF,
4735 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4736 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4737
4738 if (m->m_flags & M_PKTHDR) {
4739 BLOGD(sc, DBG_MBUF,
4740 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4741 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4742 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4743 }
4744
4745 if (m->m_flags & M_EXT) {
4746 switch (m->m_ext.ext_type) {
4747 case EXT_CLUSTER: type = "EXT_CLUSTER"; break;
4748 case EXT_SFBUF: type = "EXT_SFBUF"; break;
4749 case EXT_JUMBOP: type = "EXT_JUMBOP"; break;
4750 case EXT_JUMBO9: type = "EXT_JUMBO9"; break;
4751 case EXT_JUMBO16: type = "EXT_JUMBO16"; break;
4752 case EXT_PACKET: type = "EXT_PACKET"; break;
4753 case EXT_MBUF: type = "EXT_MBUF"; break;
4754 case EXT_NET_DRV: type = "EXT_NET_DRV"; break;
4755 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break;
4756 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4757 case EXT_EXTREF: type = "EXT_EXTREF"; break;
4758 default: type = "UNKNOWN"; break;
4759 }
4760
4761 BLOGD(sc, DBG_MBUF,
4762 "%02d: - m_ext: %p ext_size=%d type=%s\n",
4763 i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4764 }
4765
4766 if (contents) {
4767 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4768 }
4769
4770 m = m->m_next;
4771 i++;
4772 }
4773 }
4774
4775 /*
4776 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4777 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4778 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4779 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4780 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4781 */
4782 static int
4783 bxe_chktso_window(struct bxe_softc *sc,
4784 int nsegs,
4785 bus_dma_segment_t *segs,
4786 struct mbuf *m)
4787 {
4788 uint32_t num_wnds, wnd_size, wnd_sum;
4789 int32_t frag_idx, wnd_idx;
4790 unsigned short lso_mss;
4791
4792 wnd_sum = 0;
4793 wnd_size = 10;
4794 num_wnds = nsegs - wnd_size;
4795 lso_mss = htole16(m->m_pkthdr.tso_segsz);
4796
4797 /*
4798 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4799 * first window sum of data while skipping the first assuming it is the
4800 * header in FreeBSD.
4801 */
4802 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4803 wnd_sum += htole16(segs[frag_idx].ds_len);
4804 }
4805
4806 /* check the first 10 bd window size */
4807 if (wnd_sum < lso_mss) {
4808 return (1);
4809 }
4810
4811 /* run through the windows */
4812 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4813 /* subtract the first mbuf->m_len of the last wndw(-header) */
4814 wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4815 /* add the next mbuf len to the len of our new window */
4816 wnd_sum += htole16(segs[frag_idx].ds_len);
4817 if (wnd_sum < lso_mss) {
4818 return (1);
4819 }
4820 }
4821
4822 return (0);
4823 }
4824
4825 static uint8_t
4826 bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4827 struct mbuf *m,
4828 uint32_t *parsing_data)
4829 {
4830 struct ether_vlan_header *eh = NULL;
4831 struct ip *ip4 = NULL;
4832 struct ip6_hdr *ip6 = NULL;
4833 caddr_t ip = NULL;
4834 struct tcphdr *th = NULL;
4835 int e_hlen, ip_hlen, l4_off;
4836 uint16_t proto;
4837
4838 if (m->m_pkthdr.csum_flags == CSUM_IP) {
4839 /* no L4 checksum offload needed */
4840 return (0);
4841 }
4842
4843 /* get the Ethernet header */
4844 eh = mtod(m, struct ether_vlan_header *);
4845
4846 /* handle VLAN encapsulation if present */
4847 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4848 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4849 proto = ntohs(eh->evl_proto);
4850 } else {
4851 e_hlen = ETHER_HDR_LEN;
4852 proto = ntohs(eh->evl_encap_proto);
4853 }
4854
4855 switch (proto) {
4856 case ETHERTYPE_IP:
4857 /* get the IP header, if mbuf len < 20 then header in next mbuf */
4858 ip4 = (m->m_len < sizeof(struct ip)) ?
4859 (struct ip *)m->m_next->m_data :
4860 (struct ip *)(m->m_data + e_hlen);
4861 /* ip_hl is number of 32-bit words */
4862 ip_hlen = (ip4->ip_hl << 2);
4863 ip = (caddr_t)ip4;
4864 break;
4865 case ETHERTYPE_IPV6:
4866 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4867 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4868 (struct ip6_hdr *)m->m_next->m_data :
4869 (struct ip6_hdr *)(m->m_data + e_hlen);
4870 /* XXX cannot support offload with IPv6 extensions */
4871 ip_hlen = sizeof(struct ip6_hdr);
4872 ip = (caddr_t)ip6;
4873 break;
4874 default:
4875 /* We can't offload in this case... */
4876 /* XXX error stat ??? */
4877 return (0);
4878 }
4879
4880 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4881 l4_off = (e_hlen + ip_hlen);
4882
4883 *parsing_data |=
4884 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4885 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4886
4887 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4888 CSUM_TSO |
4889 CSUM_TCP_IPV6)) {
4890 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4891 th = (struct tcphdr *)(ip + ip_hlen);
4892 /* th_off is number of 32-bit words */
4893 *parsing_data |= ((th->th_off <<
4894 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4895 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4896 return (l4_off + (th->th_off << 2)); /* entire header length */
4897 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4898 CSUM_UDP_IPV6)) {
4899 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4900 return (l4_off + sizeof(struct udphdr)); /* entire header length */
4901 } else {
4902 /* XXX error stat ??? */
4903 return (0);
4904 }
4905 }
4906
4907 static uint8_t
4908 bxe_set_pbd_csum(struct bxe_fastpath *fp,
4909 struct mbuf *m,
4910 struct eth_tx_parse_bd_e1x *pbd)
4911 {
4912 struct ether_vlan_header *eh = NULL;
4913 struct ip *ip4 = NULL;
4914 struct ip6_hdr *ip6 = NULL;
4915 caddr_t ip = NULL;
4916 struct tcphdr *th = NULL;
4917 struct udphdr *uh = NULL;
4918 int e_hlen, ip_hlen;
4919 uint16_t proto;
4920 uint8_t hlen;
4921 uint16_t tmp_csum;
4922 uint32_t *tmp_uh;
4923
4924 /* get the Ethernet header */
4925 eh = mtod(m, struct ether_vlan_header *);
4926
4927 /* handle VLAN encapsulation if present */
4928 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4929 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4930 proto = ntohs(eh->evl_proto);
4931 } else {
4932 e_hlen = ETHER_HDR_LEN;
4933 proto = ntohs(eh->evl_encap_proto);
4934 }
4935
4936 switch (proto) {
4937 case ETHERTYPE_IP:
4938 /* get the IP header, if mbuf len < 20 then header in next mbuf */
4939 ip4 = (m->m_len < sizeof(struct ip)) ?
4940 (struct ip *)m->m_next->m_data :
4941 (struct ip *)(m->m_data + e_hlen);
4942 /* ip_hl is number of 32-bit words */
4943 ip_hlen = (ip4->ip_hl << 1);
4944 ip = (caddr_t)ip4;
4945 break;
4946 case ETHERTYPE_IPV6:
4947 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4948 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4949 (struct ip6_hdr *)m->m_next->m_data :
4950 (struct ip6_hdr *)(m->m_data + e_hlen);
4951 /* XXX cannot support offload with IPv6 extensions */
4952 ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4953 ip = (caddr_t)ip6;
4954 break;
4955 default:
4956 /* We can't offload in this case... */
4957 /* XXX error stat ??? */
4958 return (0);
4959 }
4960
4961 hlen = (e_hlen >> 1);
4962
4963 /* note that rest of global_data is indirectly zeroed here */
4964 if (m->m_flags & M_VLANTAG) {
4965 pbd->global_data =
4966 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4967 } else {
4968 pbd->global_data = htole16(hlen);
4969 }
4970
4971 pbd->ip_hlen_w = ip_hlen;
4972
4973 hlen += pbd->ip_hlen_w;
4974
4975 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4976
4977 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4978 CSUM_TSO |
4979 CSUM_TCP_IPV6)) {
4980 th = (struct tcphdr *)(ip + (ip_hlen << 1));
4981 /* th_off is number of 32-bit words */
4982 hlen += (uint16_t)(th->th_off << 1);
4983 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4984 CSUM_UDP_IPV6)) {
4985 uh = (struct udphdr *)(ip + (ip_hlen << 1));
4986 hlen += (sizeof(struct udphdr) / 2);
4987 } else {
4988 /* valid case as only CSUM_IP was set */
4989 return (0);
4990 }
4991
4992 pbd->total_hlen_w = htole16(hlen);
4993
4994 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4995 CSUM_TSO |
4996 CSUM_TCP_IPV6)) {
4997 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4998 pbd->tcp_pseudo_csum = ntohs(th->th_sum);
4999 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
5000 CSUM_UDP_IPV6)) {
5001 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5002
5003 /*
5004 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5005 * checksums and does not know anything about the UDP header and where
5006 * the checksum field is located. It only knows about TCP. Therefore
5007 * we "lie" to the hardware for outgoing UDP packets w/ checksum
5008 * offload. Since the checksum field offset for TCP is 16 bytes and
5009 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5010 * bytes less than the start of the UDP header. This allows the
5011 * hardware to write the checksum in the correct spot. But the
5012 * hardware will compute a checksum which includes the last 10 bytes
5013 * of the IP header. To correct this we tweak the stack computed
5014 * pseudo checksum by folding in the calculation of the inverse
5015 * checksum for those final 10 bytes of the IP header. This allows
5016 * the correct checksum to be computed by the hardware.
5017 */
5018
5019 /* set pointer 10 bytes before UDP header */
5020 tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5021
5022 /* calculate a pseudo header checksum over the first 10 bytes */
5023 tmp_csum = in_pseudo(*tmp_uh,
5024 *(tmp_uh + 1),
5025 *(uint16_t *)(tmp_uh + 2));
5026
5027 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5028 }
5029
5030 return (hlen * 2); /* entire header length, number of bytes */
5031 }
5032
5033 static void
5034 bxe_set_pbd_lso_e2(struct mbuf *m,
5035 uint32_t *parsing_data)
5036 {
5037 *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5038 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5039 ETH_TX_PARSE_BD_E2_LSO_MSS);
5040
5041 /* XXX test for IPv6 with extension header... */
5042 }
5043
5044 static void
5045 bxe_set_pbd_lso(struct mbuf *m,
5046 struct eth_tx_parse_bd_e1x *pbd)
5047 {
5048 struct ether_vlan_header *eh = NULL;
5049 struct ip *ip = NULL;
5050 struct tcphdr *th = NULL;
5051 int e_hlen;
5052
5053 /* get the Ethernet header */
5054 eh = mtod(m, struct ether_vlan_header *);
5055
5056 /* handle VLAN encapsulation if present */
5057 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5058 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5059
5060 /* get the IP and TCP header, with LSO entire header in first mbuf */
5061 /* XXX assuming IPv4 */
5062 ip = (struct ip *)(m->m_data + e_hlen);
5063 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5064
5065 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5066 pbd->tcp_send_seq = ntohl(th->th_seq);
5067 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5068
5069 #if 1
5070 /* XXX IPv4 */
5071 pbd->ip_id = ntohs(ip->ip_id);
5072 pbd->tcp_pseudo_csum =
5073 ntohs(in_pseudo(ip->ip_src.s_addr,
5074 ip->ip_dst.s_addr,
5075 htons(IPPROTO_TCP)));
5076 #else
5077 /* XXX IPv6 */
5078 pbd->tcp_pseudo_csum =
5079 ntohs(in_pseudo(&ip6->ip6_src,
5080 &ip6->ip6_dst,
5081 htons(IPPROTO_TCP)));
5082 #endif
5083
5084 pbd->global_data |=
5085 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5086 }
5087
5088 /*
5089 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5090 * visible to the controller.
5091 *
5092 * If an mbuf is submitted to this routine and cannot be given to the
5093 * controller (e.g. it has too many fragments) then the function may free
5094 * the mbuf and return to the caller.
5095 *
5096 * Returns:
5097 * 0 = Success, !0 = Failure
5098 * Note the side effect that an mbuf may be freed if it causes a problem.
5099 */
5100 static int
5101 bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5102 {
5103 bus_dma_segment_t segs[32];
5104 struct mbuf *m0;
5105 struct bxe_sw_tx_bd *tx_buf;
5106 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5107 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5108 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5109 struct eth_tx_bd *tx_data_bd;
5110 struct eth_tx_bd *tx_total_pkt_size_bd;
5111 struct eth_tx_start_bd *tx_start_bd;
5112 uint16_t bd_prod, pkt_prod, total_pkt_size;
5113 uint8_t mac_type;
5114 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5115 struct bxe_softc *sc;
5116 uint16_t tx_bd_avail;
5117 struct ether_vlan_header *eh;
5118 uint32_t pbd_e2_parsing_data = 0;
5119 uint8_t hlen = 0;
5120 int tmp_bd;
5121 int i;
5122
5123 sc = fp->sc;
5124
5125 M_ASSERTPKTHDR(*m_head);
5126
5127 m0 = *m_head;
5128 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5129 tx_start_bd = NULL;
5130 tx_data_bd = NULL;
5131 tx_total_pkt_size_bd = NULL;
5132
5133 /* get the H/W pointer for packets and BDs */
5134 pkt_prod = fp->tx_pkt_prod;
5135 bd_prod = fp->tx_bd_prod;
5136
5137 mac_type = UNICAST_ADDRESS;
5138
5139 /* map the mbuf into the next open DMAable memory */
5140 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5141 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5142 tx_buf->m_map, m0,
5143 segs, &nsegs, BUS_DMA_NOWAIT);
5144
5145 /* mapping errors */
5146 if(__predict_false(error != 0)) {
5147 fp->eth_q_stats.tx_dma_mapping_failure++;
5148 if (error == ENOMEM) {
5149 /* resource issue, try again later */
5150 rc = ENOMEM;
5151 } else if (error == EFBIG) {
5152 /* possibly recoverable with defragmentation */
5153 fp->eth_q_stats.mbuf_defrag_attempts++;
5154 m0 = m_defrag(*m_head, M_NOWAIT);
5155 if (m0 == NULL) {
5156 fp->eth_q_stats.mbuf_defrag_failures++;
5157 rc = ENOBUFS;
5158 } else {
5159 /* defrag successful, try mapping again */
5160 *m_head = m0;
5161 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5162 tx_buf->m_map, m0,
5163 segs, &nsegs, BUS_DMA_NOWAIT);
5164 if (error) {
5165 fp->eth_q_stats.tx_dma_mapping_failure++;
5166 rc = error;
5167 }
5168 }
5169 } else {
5170 /* unknown, unrecoverable mapping error */
5171 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5172 bxe_dump_mbuf(sc, m0, FALSE);
5173 rc = error;
5174 }
5175
5176 goto bxe_tx_encap_continue;
5177 }
5178
5179 tx_bd_avail = bxe_tx_avail(sc, fp);
5180
5181 /* make sure there is enough room in the send queue */
5182 if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5183 /* Recoverable, try again later. */
5184 fp->eth_q_stats.tx_hw_queue_full++;
5185 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5186 rc = ENOMEM;
5187 goto bxe_tx_encap_continue;
5188 }
5189
5190 /* capture the current H/W TX chain high watermark */
5191 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5192 (TX_BD_USABLE - tx_bd_avail))) {
5193 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5194 }
5195
5196 /* make sure it fits in the packet window */
5197 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5198 /*
5199 * The mbuf may be to big for the controller to handle. If the frame
5200 * is a TSO frame we'll need to do an additional check.
5201 */
5202 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5203 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5204 goto bxe_tx_encap_continue; /* OK to send */
5205 } else {
5206 fp->eth_q_stats.tx_window_violation_tso++;
5207 }
5208 } else {
5209 fp->eth_q_stats.tx_window_violation_std++;
5210 }
5211
5212 /* lets try to defragment this mbuf and remap it */
5213 fp->eth_q_stats.mbuf_defrag_attempts++;
5214 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5215
5216 m0 = m_defrag(*m_head, M_NOWAIT);
5217 if (m0 == NULL) {
5218 fp->eth_q_stats.mbuf_defrag_failures++;
5219 /* Ugh, just drop the frame... :( */
5220 rc = ENOBUFS;
5221 } else {
5222 /* defrag successful, try mapping again */
5223 *m_head = m0;
5224 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5225 tx_buf->m_map, m0,
5226 segs, &nsegs, BUS_DMA_NOWAIT);
5227 if (error) {
5228 fp->eth_q_stats.tx_dma_mapping_failure++;
5229 /* No sense in trying to defrag/copy chain, drop it. :( */
5230 rc = error;
5231 } else {
5232 /* if the chain is still too long then drop it */
5233 if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5234 /*
5235 * in case TSO is enabled nsegs should be checked against
5236 * BXE_TSO_MAX_SEGMENTS
5237 */
5238 if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5239 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5240 fp->eth_q_stats.nsegs_path1_errors++;
5241 rc = ENODEV;
5242 }
5243 } else {
5244 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5245 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5246 fp->eth_q_stats.nsegs_path2_errors++;
5247 rc = ENODEV;
5248 }
5249 }
5250 }
5251 }
5252 }
5253
5254 bxe_tx_encap_continue:
5255
5256 /* Check for errors */
5257 if (rc) {
5258 if (rc == ENOMEM) {
5259 /* recoverable try again later */
5260 } else {
5261 fp->eth_q_stats.tx_soft_errors++;
5262 fp->eth_q_stats.mbuf_alloc_tx--;
5263 m_freem(*m_head);
5264 *m_head = NULL;
5265 }
5266
5267 return (rc);
5268 }
5269
5270 /* set flag according to packet type (UNICAST_ADDRESS is default) */
5271 if (m0->m_flags & M_BCAST) {
5272 mac_type = BROADCAST_ADDRESS;
5273 } else if (m0->m_flags & M_MCAST) {
5274 mac_type = MULTICAST_ADDRESS;
5275 }
5276
5277 /* store the mbuf into the mbuf ring */
5278 tx_buf->m = m0;
5279 tx_buf->first_bd = fp->tx_bd_prod;
5280 tx_buf->flags = 0;
5281
5282 /* prepare the first transmit (start) BD for the mbuf */
5283 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5284
5285 BLOGD(sc, DBG_TX,
5286 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5287 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5288
5289 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5290 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5291 tx_start_bd->nbytes = htole16(segs[0].ds_len);
5292 total_pkt_size += tx_start_bd->nbytes;
5293 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5294
5295 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5296
5297 /* all frames have at least Start BD + Parsing BD */
5298 nbds = nsegs + 1;
5299 tx_start_bd->nbd = htole16(nbds);
5300
5301 if (m0->m_flags & M_VLANTAG) {
5302 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5303 tx_start_bd->bd_flags.as_bitfield |=
5304 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5305 } else {
5306 /* vf tx, start bd must hold the ethertype for fw to enforce it */
5307 if (IS_VF(sc)) {
5308 /* map ethernet header to find type and header length */
5309 eh = mtod(m0, struct ether_vlan_header *);
5310 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5311 } else {
5312 /* used by FW for packet accounting */
5313 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5314 }
5315 }
5316
5317 /*
5318 * add a parsing BD from the chain. The parsing BD is always added
5319 * though it is only used for TSO and chksum
5320 */
5321 bd_prod = TX_BD_NEXT(bd_prod);
5322
5323 if (m0->m_pkthdr.csum_flags) {
5324 if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5325 fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5326 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5327 }
5328
5329 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5330 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5331 ETH_TX_BD_FLAGS_L4_CSUM);
5332 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5333 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5334 ETH_TX_BD_FLAGS_IS_UDP |
5335 ETH_TX_BD_FLAGS_L4_CSUM);
5336 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5337 (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5338 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5339 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5340 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5341 ETH_TX_BD_FLAGS_IS_UDP);
5342 }
5343 }
5344
5345 if (!CHIP_IS_E1x(sc)) {
5346 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5347 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5348
5349 if (m0->m_pkthdr.csum_flags) {
5350 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5351 }
5352
5353 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5354 mac_type);
5355 } else {
5356 uint16_t global_data = 0;
5357
5358 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5359 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5360
5361 if (m0->m_pkthdr.csum_flags) {
5362 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5363 }
5364
5365 SET_FLAG(global_data,
5366 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5367 pbd_e1x->global_data |= htole16(global_data);
5368 }
5369
5370 /* setup the parsing BD with TSO specific info */
5371 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5372 fp->eth_q_stats.tx_ofld_frames_lso++;
5373 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5374
5375 if (__predict_false(tx_start_bd->nbytes > hlen)) {
5376 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5377
5378 /* split the first BD into header/data making the fw job easy */
5379 nbds++;
5380 tx_start_bd->nbd = htole16(nbds);
5381 tx_start_bd->nbytes = htole16(hlen);
5382
5383 bd_prod = TX_BD_NEXT(bd_prod);
5384
5385 /* new transmit BD after the tx_parse_bd */
5386 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5387 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5388 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5389 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen);
5390 if (tx_total_pkt_size_bd == NULL) {
5391 tx_total_pkt_size_bd = tx_data_bd;
5392 }
5393
5394 BLOGD(sc, DBG_TX,
5395 "TSO split header size is %d (%x:%x) nbds %d\n",
5396 le16toh(tx_start_bd->nbytes),
5397 le32toh(tx_start_bd->addr_hi),
5398 le32toh(tx_start_bd->addr_lo),
5399 nbds);
5400 }
5401
5402 if (!CHIP_IS_E1x(sc)) {
5403 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5404 } else {
5405 bxe_set_pbd_lso(m0, pbd_e1x);
5406 }
5407 }
5408
5409 if (pbd_e2_parsing_data) {
5410 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5411 }
5412
5413 /* prepare remaining BDs, start tx bd contains first seg/frag */
5414 for (i = 1; i < nsegs ; i++) {
5415 bd_prod = TX_BD_NEXT(bd_prod);
5416 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5417 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5418 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5419 tx_data_bd->nbytes = htole16(segs[i].ds_len);
5420 if (tx_total_pkt_size_bd == NULL) {
5421 tx_total_pkt_size_bd = tx_data_bd;
5422 }
5423 total_pkt_size += tx_data_bd->nbytes;
5424 }
5425
5426 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5427
5428 if (tx_total_pkt_size_bd != NULL) {
5429 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5430 }
5431
5432 if (__predict_false(sc->debug & DBG_TX)) {
5433 tmp_bd = tx_buf->first_bd;
5434 for (i = 0; i < nbds; i++)
5435 {
5436 if (i == 0) {
5437 BLOGD(sc, DBG_TX,
5438 "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5439 "bd_flags=0x%x hdr_nbds=%d\n",
5440 tx_start_bd,
5441 tmp_bd,
5442 le16toh(tx_start_bd->nbd),
5443 le16toh(tx_start_bd->vlan_or_ethertype),
5444 tx_start_bd->bd_flags.as_bitfield,
5445 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5446 } else if (i == 1) {
5447 if (pbd_e1x) {
5448 BLOGD(sc, DBG_TX,
5449 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5450 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5451 "tcp_seq=%u total_hlen_w=%u\n",
5452 pbd_e1x,
5453 tmp_bd,
5454 pbd_e1x->global_data,
5455 pbd_e1x->ip_hlen_w,
5456 pbd_e1x->ip_id,
5457 pbd_e1x->lso_mss,
5458 pbd_e1x->tcp_flags,
5459 pbd_e1x->tcp_pseudo_csum,
5460 pbd_e1x->tcp_send_seq,
5461 le16toh(pbd_e1x->total_hlen_w));
5462 } else { /* if (pbd_e2) */
5463 BLOGD(sc, DBG_TX,
5464 "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5465 "src=%02x:%02x:%02x parsing_data=0x%x\n",
5466 pbd_e2,
5467 tmp_bd,
5468 pbd_e2->data.mac_addr.dst_hi,
5469 pbd_e2->data.mac_addr.dst_mid,
5470 pbd_e2->data.mac_addr.dst_lo,
5471 pbd_e2->data.mac_addr.src_hi,
5472 pbd_e2->data.mac_addr.src_mid,
5473 pbd_e2->data.mac_addr.src_lo,
5474 pbd_e2->parsing_data);
5475 }
5476 }
5477
5478 if (i != 1) { /* skip parse db as it doesn't hold data */
5479 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5480 BLOGD(sc, DBG_TX,
5481 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5482 tx_data_bd,
5483 tmp_bd,
5484 le16toh(tx_data_bd->nbytes),
5485 le32toh(tx_data_bd->addr_hi),
5486 le32toh(tx_data_bd->addr_lo));
5487 }
5488
5489 tmp_bd = TX_BD_NEXT(tmp_bd);
5490 }
5491 }
5492
5493 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5494
5495 /* update TX BD producer index value for next TX */
5496 bd_prod = TX_BD_NEXT(bd_prod);
5497
5498 /*
5499 * If the chain of tx_bd's describing this frame is adjacent to or spans
5500 * an eth_tx_next_bd element then we need to increment the nbds value.
5501 */
5502 if (TX_BD_IDX(bd_prod) < nbds) {
5503 nbds++;
5504 }
5505
5506 /* don't allow reordering of writes for nbd and packets */
5507 mb();
5508
5509 fp->tx_db.data.prod += nbds;
5510
5511 /* producer points to the next free tx_bd at this point */
5512 fp->tx_pkt_prod++;
5513 fp->tx_bd_prod = bd_prod;
5514
5515 DOORBELL(sc, fp->index, fp->tx_db.raw);
5516
5517 fp->eth_q_stats.tx_pkts++;
5518
5519 /* Prevent speculative reads from getting ahead of the status block. */
5520 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5521 0, 0, BUS_SPACE_BARRIER_READ);
5522
5523 /* Prevent speculative reads from getting ahead of the doorbell. */
5524 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5525 0, 0, BUS_SPACE_BARRIER_READ);
5526
5527 return (0);
5528 }
5529
5530 static void
5531 bxe_tx_start_locked(struct bxe_softc *sc,
5532 if_t ifp,
5533 struct bxe_fastpath *fp)
5534 {
5535 struct mbuf *m = NULL;
5536 int tx_count = 0;
5537 uint16_t tx_bd_avail;
5538
5539 BXE_FP_TX_LOCK_ASSERT(fp);
5540
5541 /* keep adding entries while there are frames to send */
5542 while (!if_sendq_empty(ifp)) {
5543
5544 /*
5545 * check for any frames to send
5546 * dequeue can still be NULL even if queue is not empty
5547 */
5548 m = if_dequeue(ifp);
5549 if (__predict_false(m == NULL)) {
5550 break;
5551 }
5552
5553 /* the mbuf now belongs to us */
5554 fp->eth_q_stats.mbuf_alloc_tx++;
5555
5556 /*
5557 * Put the frame into the transmit ring. If we don't have room,
5558 * place the mbuf back at the head of the TX queue, set the
5559 * OACTIVE flag, and wait for the NIC to drain the chain.
5560 */
5561 if (__predict_false(bxe_tx_encap(fp, &m))) {
5562 fp->eth_q_stats.tx_encap_failures++;
5563 if (m != NULL) {
5564 /* mark the TX queue as full and return the frame */
5565 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5566 if_sendq_prepend(ifp, m);
5567 fp->eth_q_stats.mbuf_alloc_tx--;
5568 fp->eth_q_stats.tx_queue_xoff++;
5569 }
5570
5571 /* stop looking for more work */
5572 break;
5573 }
5574
5575 /* the frame was enqueued successfully */
5576 tx_count++;
5577
5578 /* send a copy of the frame to any BPF listeners. */
5579 if_etherbpfmtap(ifp, m);
5580
5581 tx_bd_avail = bxe_tx_avail(sc, fp);
5582
5583 /* handle any completions if we're running low */
5584 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5585 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5586 bxe_txeof(sc, fp);
5587 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5588 break;
5589 }
5590 }
5591 }
5592
5593 /* all TX packets were dequeued and/or the tx ring is full */
5594 if (tx_count > 0) {
5595 /* reset the TX watchdog timeout timer */
5596 fp->watchdog_timer = BXE_TX_TIMEOUT;
5597 }
5598 }
5599
5600 /* Legacy (non-RSS) dispatch routine */
5601 static void
5602 bxe_tx_start(if_t ifp)
5603 {
5604 struct bxe_softc *sc;
5605 struct bxe_fastpath *fp;
5606
5607 sc = if_getsoftc(ifp);
5608
5609 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5610 BLOGW(sc, "Interface not running, ignoring transmit request\n");
5611 return;
5612 }
5613
5614 if (!sc->link_vars.link_up) {
5615 BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5616 return;
5617 }
5618
5619 fp = &sc->fp[0];
5620
5621 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5622 fp->eth_q_stats.tx_queue_full_return++;
5623 return;
5624 }
5625
5626 BXE_FP_TX_LOCK(fp);
5627 bxe_tx_start_locked(sc, ifp, fp);
5628 BXE_FP_TX_UNLOCK(fp);
5629 }
5630
5631 static int
5632 bxe_tx_mq_start_locked(struct bxe_softc *sc,
5633 if_t ifp,
5634 struct bxe_fastpath *fp,
5635 struct mbuf *m)
5636 {
5637 struct buf_ring *tx_br = fp->tx_br;
5638 struct mbuf *next;
5639 int depth, rc, tx_count;
5640 uint16_t tx_bd_avail;
5641
5642 rc = tx_count = 0;
5643
5644 BXE_FP_TX_LOCK_ASSERT(fp);
5645
5646 if (sc->state != BXE_STATE_OPEN) {
5647 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5648 return ENETDOWN;
5649 }
5650
5651 if (!tx_br) {
5652 BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5653 return (EINVAL);
5654 }
5655
5656 if (m != NULL) {
5657 rc = drbr_enqueue(ifp, tx_br, m);
5658 if (rc != 0) {
5659 fp->eth_q_stats.tx_soft_errors++;
5660 goto bxe_tx_mq_start_locked_exit;
5661 }
5662 }
5663
5664 if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5665 fp->eth_q_stats.tx_request_link_down_failures++;
5666 goto bxe_tx_mq_start_locked_exit;
5667 }
5668
5669 /* fetch the depth of the driver queue */
5670 depth = drbr_inuse(ifp, tx_br);
5671 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5672 fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5673 }
5674
5675 /* keep adding entries while there are frames to send */
5676 while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5677 /* handle any completions if we're running low */
5678 tx_bd_avail = bxe_tx_avail(sc, fp);
5679 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5680 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5681 bxe_txeof(sc, fp);
5682 tx_bd_avail = bxe_tx_avail(sc, fp);
5683 if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5684 fp->eth_q_stats.bd_avail_too_less_failures++;
5685 m_freem(next);
5686 drbr_advance(ifp, tx_br);
5687 rc = ENOBUFS;
5688 break;
5689 }
5690 }
5691
5692 /* the mbuf now belongs to us */
5693 fp->eth_q_stats.mbuf_alloc_tx++;
5694
5695 /*
5696 * Put the frame into the transmit ring. If we don't have room,
5697 * place the mbuf back at the head of the TX queue, set the
5698 * OACTIVE flag, and wait for the NIC to drain the chain.
5699 */
5700 rc = bxe_tx_encap(fp, &next);
5701 if (__predict_false(rc != 0)) {
5702 fp->eth_q_stats.tx_encap_failures++;
5703 if (next != NULL) {
5704 /* mark the TX queue as full and save the frame */
5705 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5706 drbr_putback(ifp, tx_br, next);
5707 fp->eth_q_stats.mbuf_alloc_tx--;
5708 fp->eth_q_stats.tx_frames_deferred++;
5709 } else
5710 drbr_advance(ifp, tx_br);
5711
5712 /* stop looking for more work */
5713 break;
5714 }
5715
5716 /* the transmit frame was enqueued successfully */
5717 tx_count++;
5718
5719 /* send a copy of the frame to any BPF listeners */
5720 if_etherbpfmtap(ifp, next);
5721
5722 drbr_advance(ifp, tx_br);
5723 }
5724
5725 /* all TX packets were dequeued and/or the tx ring is full */
5726 if (tx_count > 0) {
5727 /* reset the TX watchdog timeout timer */
5728 fp->watchdog_timer = BXE_TX_TIMEOUT;
5729 }
5730
5731 bxe_tx_mq_start_locked_exit:
5732 /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5733 if (!drbr_empty(ifp, tx_br)) {
5734 fp->eth_q_stats.tx_mq_not_empty++;
5735 taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5736 }
5737
5738 return (rc);
5739 }
5740
5741 static void
5742 bxe_tx_mq_start_deferred(void *arg,
5743 int pending)
5744 {
5745 struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5746 struct bxe_softc *sc = fp->sc;
5747 if_t ifp = sc->ifp;
5748
5749 BXE_FP_TX_LOCK(fp);
5750 bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5751 BXE_FP_TX_UNLOCK(fp);
5752 }
5753
5754 /* Multiqueue (TSS) dispatch routine. */
5755 static int
5756 bxe_tx_mq_start(if_t ifp,
5757 struct mbuf *m)
5758 {
5759 struct bxe_softc *sc = if_getsoftc(ifp);
5760 struct bxe_fastpath *fp;
5761 int fp_index, rc;
5762
5763 fp_index = 0; /* default is the first queue */
5764
5765 /* check if flowid is set */
5766
5767 if (BXE_VALID_FLOWID(m))
5768 fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5769
5770 fp = &sc->fp[fp_index];
5771
5772 if (sc->state != BXE_STATE_OPEN) {
5773 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5774 return ENETDOWN;
5775 }
5776
5777 if (BXE_FP_TX_TRYLOCK(fp)) {
5778 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5779 BXE_FP_TX_UNLOCK(fp);
5780 } else {
5781 rc = drbr_enqueue(ifp, fp->tx_br, m);
5782 taskqueue_enqueue(fp->tq, &fp->tx_task);
5783 }
5784
5785 return (rc);
5786 }
5787
5788 static void
5789 bxe_mq_flush(if_t ifp)
5790 {
5791 struct bxe_softc *sc = if_getsoftc(ifp);
5792 struct bxe_fastpath *fp;
5793 struct mbuf *m;
5794 int i;
5795
5796 for (i = 0; i < sc->num_queues; i++) {
5797 fp = &sc->fp[i];
5798
5799 if (fp->state != BXE_FP_STATE_IRQ) {
5800 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5801 fp->index, fp->state);
5802 continue;
5803 }
5804
5805 if (fp->tx_br != NULL) {
5806 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5807 BXE_FP_TX_LOCK(fp);
5808 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5809 m_freem(m);
5810 }
5811 BXE_FP_TX_UNLOCK(fp);
5812 }
5813 }
5814
5815 if_qflush(ifp);
5816 }
5817
5818 static uint16_t
5819 bxe_cid_ilt_lines(struct bxe_softc *sc)
5820 {
5821 if (IS_SRIOV(sc)) {
5822 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5823 }
5824 return (L2_ILT_LINES(sc));
5825 }
5826
5827 static void
5828 bxe_ilt_set_info(struct bxe_softc *sc)
5829 {
5830 struct ilt_client_info *ilt_client;
5831 struct ecore_ilt *ilt = sc->ilt;
5832 uint16_t line = 0;
5833
5834 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5835 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5836
5837 /* CDU */
5838 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5839 ilt_client->client_num = ILT_CLIENT_CDU;
5840 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5841 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5842 ilt_client->start = line;
5843 line += bxe_cid_ilt_lines(sc);
5844
5845 if (CNIC_SUPPORT(sc)) {
5846 line += CNIC_ILT_LINES;
5847 }
5848
5849 ilt_client->end = (line - 1);
5850
5851 BLOGD(sc, DBG_LOAD,
5852 "ilt client[CDU]: start %d, end %d, "
5853 "psz 0x%x, flags 0x%x, hw psz %d\n",
5854 ilt_client->start, ilt_client->end,
5855 ilt_client->page_size,
5856 ilt_client->flags,
5857 ilog2(ilt_client->page_size >> 12));
5858
5859 /* QM */
5860 if (QM_INIT(sc->qm_cid_count)) {
5861 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5862 ilt_client->client_num = ILT_CLIENT_QM;
5863 ilt_client->page_size = QM_ILT_PAGE_SZ;
5864 ilt_client->flags = 0;
5865 ilt_client->start = line;
5866
5867 /* 4 bytes for each cid */
5868 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5869 QM_ILT_PAGE_SZ);
5870
5871 ilt_client->end = (line - 1);
5872
5873 BLOGD(sc, DBG_LOAD,
5874 "ilt client[QM]: start %d, end %d, "
5875 "psz 0x%x, flags 0x%x, hw psz %d\n",
5876 ilt_client->start, ilt_client->end,
5877 ilt_client->page_size, ilt_client->flags,
5878 ilog2(ilt_client->page_size >> 12));
5879 }
5880
5881 if (CNIC_SUPPORT(sc)) {
5882 /* SRC */
5883 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5884 ilt_client->client_num = ILT_CLIENT_SRC;
5885 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5886 ilt_client->flags = 0;
5887 ilt_client->start = line;
5888 line += SRC_ILT_LINES;
5889 ilt_client->end = (line - 1);
5890
5891 BLOGD(sc, DBG_LOAD,
5892 "ilt client[SRC]: start %d, end %d, "
5893 "psz 0x%x, flags 0x%x, hw psz %d\n",
5894 ilt_client->start, ilt_client->end,
5895 ilt_client->page_size, ilt_client->flags,
5896 ilog2(ilt_client->page_size >> 12));
5897
5898 /* TM */
5899 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5900 ilt_client->client_num = ILT_CLIENT_TM;
5901 ilt_client->page_size = TM_ILT_PAGE_SZ;
5902 ilt_client->flags = 0;
5903 ilt_client->start = line;
5904 line += TM_ILT_LINES;
5905 ilt_client->end = (line - 1);
5906
5907 BLOGD(sc, DBG_LOAD,
5908 "ilt client[TM]: start %d, end %d, "
5909 "psz 0x%x, flags 0x%x, hw psz %d\n",
5910 ilt_client->start, ilt_client->end,
5911 ilt_client->page_size, ilt_client->flags,
5912 ilog2(ilt_client->page_size >> 12));
5913 }
5914
5915 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5916 }
5917
5918 static void
5919 bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5920 {
5921 int i;
5922 uint32_t rx_buf_size;
5923
5924 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5925
5926 for (i = 0; i < sc->num_queues; i++) {
5927 if(rx_buf_size <= MCLBYTES){
5928 sc->fp[i].rx_buf_size = rx_buf_size;
5929 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5930 }else if (rx_buf_size <= MJUMPAGESIZE){
5931 sc->fp[i].rx_buf_size = rx_buf_size;
5932 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5933 }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5934 sc->fp[i].rx_buf_size = MCLBYTES;
5935 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5936 }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5937 sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5938 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5939 }else {
5940 sc->fp[i].rx_buf_size = MCLBYTES;
5941 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5942 }
5943 }
5944 }
5945
5946 static int
5947 bxe_alloc_ilt_mem(struct bxe_softc *sc)
5948 {
5949 int rc = 0;
5950
5951 if ((sc->ilt =
5952 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5953 M_BXE_ILT,
5954 (M_NOWAIT | M_ZERO))) == NULL) {
5955 rc = 1;
5956 }
5957
5958 return (rc);
5959 }
5960
5961 static int
5962 bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5963 {
5964 int rc = 0;
5965
5966 if ((sc->ilt->lines =
5967 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5968 M_BXE_ILT,
5969 (M_NOWAIT | M_ZERO))) == NULL) {
5970 rc = 1;
5971 }
5972
5973 return (rc);
5974 }
5975
5976 static void
5977 bxe_free_ilt_mem(struct bxe_softc *sc)
5978 {
5979 if (sc->ilt != NULL) {
5980 free(sc->ilt, M_BXE_ILT);
5981 sc->ilt = NULL;
5982 }
5983 }
5984
5985 static void
5986 bxe_free_ilt_lines_mem(struct bxe_softc *sc)
5987 {
5988 if (sc->ilt->lines != NULL) {
5989 free(sc->ilt->lines, M_BXE_ILT);
5990 sc->ilt->lines = NULL;
5991 }
5992 }
5993
5994 static void
5995 bxe_free_mem(struct bxe_softc *sc)
5996 {
5997 int i;
5998
5999 for (i = 0; i < L2_ILT_LINES(sc); i++) {
6000 bxe_dma_free(sc, &sc->context[i].vcxt_dma);
6001 sc->context[i].vcxt = NULL;
6002 sc->context[i].size = 0;
6003 }
6004
6005 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6006
6007 bxe_free_ilt_lines_mem(sc);
6008
6009 }
6010
6011 static int
6012 bxe_alloc_mem(struct bxe_softc *sc)
6013 {
6014
6015 int context_size;
6016 int allocated;
6017 int i;
6018
6019 /*
6020 * Allocate memory for CDU context:
6021 * This memory is allocated separately and not in the generic ILT
6022 * functions because CDU differs in few aspects:
6023 * 1. There can be multiple entities allocating memory for context -
6024 * regular L2, CNIC, and SRIOV drivers. Each separately controls
6025 * its own ILT lines.
6026 * 2. Since CDU page-size is not a single 4KB page (which is the case
6027 * for the other ILT clients), to be efficient we want to support
6028 * allocation of sub-page-size in the last entry.
6029 * 3. Context pointers are used by the driver to pass to FW / update
6030 * the context (for the other ILT clients the pointers are used just to
6031 * free the memory during unload).
6032 */
6033 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6034 for (i = 0, allocated = 0; allocated < context_size; i++) {
6035 sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6036 (context_size - allocated));
6037
6038 if (bxe_dma_alloc(sc, sc->context[i].size,
6039 &sc->context[i].vcxt_dma,
6040 "cdu context") != 0) {
6041 bxe_free_mem(sc);
6042 return (-1);
6043 }
6044
6045 sc->context[i].vcxt =
6046 (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6047
6048 allocated += sc->context[i].size;
6049 }
6050
6051 bxe_alloc_ilt_lines_mem(sc);
6052
6053 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6054 sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6055 {
6056 for (i = 0; i < 4; i++) {
6057 BLOGD(sc, DBG_LOAD,
6058 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6059 i,
6060 sc->ilt->clients[i].page_size,
6061 sc->ilt->clients[i].start,
6062 sc->ilt->clients[i].end,
6063 sc->ilt->clients[i].client_num,
6064 sc->ilt->clients[i].flags);
6065 }
6066 }
6067 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6068 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6069 bxe_free_mem(sc);
6070 return (-1);
6071 }
6072
6073 return (0);
6074 }
6075
6076 static void
6077 bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6078 {
6079 int i;
6080
6081 if (fp->rx_mbuf_tag == NULL) {
6082 return;
6083 }
6084
6085 /* free all mbufs and unload all maps */
6086 for (i = 0; i < RX_BD_TOTAL; i++) {
6087 if (fp->rx_mbuf_chain[i].m_map != NULL) {
6088 bus_dmamap_sync(fp->rx_mbuf_tag,
6089 fp->rx_mbuf_chain[i].m_map,
6090 BUS_DMASYNC_POSTREAD);
6091 bus_dmamap_unload(fp->rx_mbuf_tag,
6092 fp->rx_mbuf_chain[i].m_map);
6093 }
6094
6095 if (fp->rx_mbuf_chain[i].m != NULL) {
6096 m_freem(fp->rx_mbuf_chain[i].m);
6097 fp->rx_mbuf_chain[i].m = NULL;
6098 fp->eth_q_stats.mbuf_alloc_rx--;
6099 }
6100 }
6101 }
6102
6103 static void
6104 bxe_free_tpa_pool(struct bxe_fastpath *fp)
6105 {
6106 struct bxe_softc *sc;
6107 int i, max_agg_queues;
6108
6109 sc = fp->sc;
6110
6111 if (fp->rx_mbuf_tag == NULL) {
6112 return;
6113 }
6114
6115 max_agg_queues = MAX_AGG_QS(sc);
6116
6117 /* release all mbufs and unload all DMA maps in the TPA pool */
6118 for (i = 0; i < max_agg_queues; i++) {
6119 if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6120 bus_dmamap_sync(fp->rx_mbuf_tag,
6121 fp->rx_tpa_info[i].bd.m_map,
6122 BUS_DMASYNC_POSTREAD);
6123 bus_dmamap_unload(fp->rx_mbuf_tag,
6124 fp->rx_tpa_info[i].bd.m_map);
6125 }
6126
6127 if (fp->rx_tpa_info[i].bd.m != NULL) {
6128 m_freem(fp->rx_tpa_info[i].bd.m);
6129 fp->rx_tpa_info[i].bd.m = NULL;
6130 fp->eth_q_stats.mbuf_alloc_tpa--;
6131 }
6132 }
6133 }
6134
6135 static void
6136 bxe_free_sge_chain(struct bxe_fastpath *fp)
6137 {
6138 int i;
6139
6140 if (fp->rx_sge_mbuf_tag == NULL) {
6141 return;
6142 }
6143
6144 /* rree all mbufs and unload all maps */
6145 for (i = 0; i < RX_SGE_TOTAL; i++) {
6146 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6147 bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6148 fp->rx_sge_mbuf_chain[i].m_map,
6149 BUS_DMASYNC_POSTREAD);
6150 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6151 fp->rx_sge_mbuf_chain[i].m_map);
6152 }
6153
6154 if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6155 m_freem(fp->rx_sge_mbuf_chain[i].m);
6156 fp->rx_sge_mbuf_chain[i].m = NULL;
6157 fp->eth_q_stats.mbuf_alloc_sge--;
6158 }
6159 }
6160 }
6161
6162 static void
6163 bxe_free_fp_buffers(struct bxe_softc *sc)
6164 {
6165 struct bxe_fastpath *fp;
6166 int i;
6167
6168 for (i = 0; i < sc->num_queues; i++) {
6169 fp = &sc->fp[i];
6170
6171 if (fp->tx_br != NULL) {
6172 /* just in case bxe_mq_flush() wasn't called */
6173 if (mtx_initialized(&fp->tx_mtx)) {
6174 struct mbuf *m;
6175
6176 BXE_FP_TX_LOCK(fp);
6177 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6178 m_freem(m);
6179 BXE_FP_TX_UNLOCK(fp);
6180 }
6181 }
6182
6183 /* free all RX buffers */
6184 bxe_free_rx_bd_chain(fp);
6185 bxe_free_tpa_pool(fp);
6186 bxe_free_sge_chain(fp);
6187
6188 if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6189 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6190 fp->eth_q_stats.mbuf_alloc_rx);
6191 }
6192
6193 if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6194 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6195 fp->eth_q_stats.mbuf_alloc_sge);
6196 }
6197
6198 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6199 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6200 fp->eth_q_stats.mbuf_alloc_tpa);
6201 }
6202
6203 if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6204 BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6205 fp->eth_q_stats.mbuf_alloc_tx);
6206 }
6207
6208 /* XXX verify all mbufs were reclaimed */
6209 }
6210 }
6211
6212 static int
6213 bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6214 uint16_t prev_index,
6215 uint16_t index)
6216 {
6217 struct bxe_sw_rx_bd *rx_buf;
6218 struct eth_rx_bd *rx_bd;
6219 bus_dma_segment_t segs[1];
6220 bus_dmamap_t map;
6221 struct mbuf *m;
6222 int nsegs, rc;
6223
6224 rc = 0;
6225
6226 /* allocate the new RX BD mbuf */
6227 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6228 if (__predict_false(m == NULL)) {
6229 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6230 return (ENOBUFS);
6231 }
6232
6233 fp->eth_q_stats.mbuf_alloc_rx++;
6234
6235 /* initialize the mbuf buffer length */
6236 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6237
6238 /* map the mbuf into non-paged pool */
6239 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6240 fp->rx_mbuf_spare_map,
6241 m, segs, &nsegs, BUS_DMA_NOWAIT);
6242 if (__predict_false(rc != 0)) {
6243 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6244 m_freem(m);
6245 fp->eth_q_stats.mbuf_alloc_rx--;
6246 return (rc);
6247 }
6248
6249 /* all mbufs must map to a single segment */
6250 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6251
6252 /* release any existing RX BD mbuf mappings */
6253
6254 if (prev_index != index) {
6255 rx_buf = &fp->rx_mbuf_chain[prev_index];
6256
6257 if (rx_buf->m_map != NULL) {
6258 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6259 BUS_DMASYNC_POSTREAD);
6260 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6261 }
6262
6263 /*
6264 * We only get here from bxe_rxeof() when the maximum number
6265 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6266 * holds the mbuf in the prev_index so it's OK to NULL it out
6267 * here without concern of a memory leak.
6268 */
6269 fp->rx_mbuf_chain[prev_index].m = NULL;
6270 }
6271
6272 rx_buf = &fp->rx_mbuf_chain[index];
6273
6274 if (rx_buf->m_map != NULL) {
6275 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6276 BUS_DMASYNC_POSTREAD);
6277 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6278 }
6279
6280 /* save the mbuf and mapping info for a future packet */
6281 map = (prev_index != index) ?
6282 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6283 rx_buf->m_map = fp->rx_mbuf_spare_map;
6284 fp->rx_mbuf_spare_map = map;
6285 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6286 BUS_DMASYNC_PREREAD);
6287 rx_buf->m = m;
6288
6289 rx_bd = &fp->rx_chain[index];
6290 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6291 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6292
6293 return (rc);
6294 }
6295
6296 static int
6297 bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6298 int queue)
6299 {
6300 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6301 bus_dma_segment_t segs[1];
6302 bus_dmamap_t map;
6303 struct mbuf *m;
6304 int nsegs;
6305 int rc = 0;
6306
6307 /* allocate the new TPA mbuf */
6308 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6309 if (__predict_false(m == NULL)) {
6310 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6311 return (ENOBUFS);
6312 }
6313
6314 fp->eth_q_stats.mbuf_alloc_tpa++;
6315
6316 /* initialize the mbuf buffer length */
6317 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6318
6319 /* map the mbuf into non-paged pool */
6320 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6321 fp->rx_tpa_info_mbuf_spare_map,
6322 m, segs, &nsegs, BUS_DMA_NOWAIT);
6323 if (__predict_false(rc != 0)) {
6324 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6325 m_free(m);
6326 fp->eth_q_stats.mbuf_alloc_tpa--;
6327 return (rc);
6328 }
6329
6330 /* all mbufs must map to a single segment */
6331 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6332
6333 /* release any existing TPA mbuf mapping */
6334 if (tpa_info->bd.m_map != NULL) {
6335 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6336 BUS_DMASYNC_POSTREAD);
6337 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6338 }
6339
6340 /* save the mbuf and mapping info for the TPA mbuf */
6341 map = tpa_info->bd.m_map;
6342 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6343 fp->rx_tpa_info_mbuf_spare_map = map;
6344 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6345 BUS_DMASYNC_PREREAD);
6346 tpa_info->bd.m = m;
6347 tpa_info->seg = segs[0];
6348
6349 return (rc);
6350 }
6351
6352 /*
6353 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6354 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6355 * chain.
6356 */
6357 static int
6358 bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6359 uint16_t index)
6360 {
6361 struct bxe_sw_rx_bd *sge_buf;
6362 struct eth_rx_sge *sge;
6363 bus_dma_segment_t segs[1];
6364 bus_dmamap_t map;
6365 struct mbuf *m;
6366 int nsegs;
6367 int rc = 0;
6368
6369 /* allocate a new SGE mbuf */
6370 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6371 if (__predict_false(m == NULL)) {
6372 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6373 return (ENOMEM);
6374 }
6375
6376 fp->eth_q_stats.mbuf_alloc_sge++;
6377
6378 /* initialize the mbuf buffer length */
6379 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6380
6381 /* map the SGE mbuf into non-paged pool */
6382 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6383 fp->rx_sge_mbuf_spare_map,
6384 m, segs, &nsegs, BUS_DMA_NOWAIT);
6385 if (__predict_false(rc != 0)) {
6386 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6387 m_freem(m);
6388 fp->eth_q_stats.mbuf_alloc_sge--;
6389 return (rc);
6390 }
6391
6392 /* all mbufs must map to a single segment */
6393 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6394
6395 sge_buf = &fp->rx_sge_mbuf_chain[index];
6396
6397 /* release any existing SGE mbuf mapping */
6398 if (sge_buf->m_map != NULL) {
6399 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6400 BUS_DMASYNC_POSTREAD);
6401 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6402 }
6403
6404 /* save the mbuf and mapping info for a future packet */
6405 map = sge_buf->m_map;
6406 sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6407 fp->rx_sge_mbuf_spare_map = map;
6408 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6409 BUS_DMASYNC_PREREAD);
6410 sge_buf->m = m;
6411
6412 sge = &fp->rx_sge_chain[index];
6413 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6414 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6415
6416 return (rc);
6417 }
6418
6419 static __noinline int
6420 bxe_alloc_fp_buffers(struct bxe_softc *sc)
6421 {
6422 struct bxe_fastpath *fp;
6423 int i, j, rc = 0;
6424 int ring_prod, cqe_ring_prod;
6425 int max_agg_queues;
6426
6427 for (i = 0; i < sc->num_queues; i++) {
6428 fp = &sc->fp[i];
6429
6430 ring_prod = cqe_ring_prod = 0;
6431 fp->rx_bd_cons = 0;
6432 fp->rx_cq_cons = 0;
6433
6434 /* allocate buffers for the RX BDs in RX BD chain */
6435 for (j = 0; j < sc->max_rx_bufs; j++) {
6436 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6437 if (rc != 0) {
6438 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6439 i, rc);
6440 goto bxe_alloc_fp_buffers_error;
6441 }
6442
6443 ring_prod = RX_BD_NEXT(ring_prod);
6444 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6445 }
6446
6447 fp->rx_bd_prod = ring_prod;
6448 fp->rx_cq_prod = cqe_ring_prod;
6449 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6450
6451 max_agg_queues = MAX_AGG_QS(sc);
6452
6453 fp->tpa_enable = TRUE;
6454
6455 /* fill the TPA pool */
6456 for (j = 0; j < max_agg_queues; j++) {
6457 rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6458 if (rc != 0) {
6459 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6460 i, j);
6461 fp->tpa_enable = FALSE;
6462 goto bxe_alloc_fp_buffers_error;
6463 }
6464
6465 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6466 }
6467
6468 if (fp->tpa_enable) {
6469 /* fill the RX SGE chain */
6470 ring_prod = 0;
6471 for (j = 0; j < RX_SGE_USABLE; j++) {
6472 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6473 if (rc != 0) {
6474 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6475 i, ring_prod);
6476 fp->tpa_enable = FALSE;
6477 ring_prod = 0;
6478 goto bxe_alloc_fp_buffers_error;
6479 }
6480
6481 ring_prod = RX_SGE_NEXT(ring_prod);
6482 }
6483
6484 fp->rx_sge_prod = ring_prod;
6485 }
6486 }
6487
6488 return (0);
6489
6490 bxe_alloc_fp_buffers_error:
6491
6492 /* unwind what was already allocated */
6493 bxe_free_rx_bd_chain(fp);
6494 bxe_free_tpa_pool(fp);
6495 bxe_free_sge_chain(fp);
6496
6497 return (ENOBUFS);
6498 }
6499
6500 static void
6501 bxe_free_fw_stats_mem(struct bxe_softc *sc)
6502 {
6503 bxe_dma_free(sc, &sc->fw_stats_dma);
6504
6505 sc->fw_stats_num = 0;
6506
6507 sc->fw_stats_req_size = 0;
6508 sc->fw_stats_req = NULL;
6509 sc->fw_stats_req_mapping = 0;
6510
6511 sc->fw_stats_data_size = 0;
6512 sc->fw_stats_data = NULL;
6513 sc->fw_stats_data_mapping = 0;
6514 }
6515
6516 static int
6517 bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6518 {
6519 uint8_t num_queue_stats;
6520 int num_groups;
6521
6522 /* number of queues for statistics is number of eth queues */
6523 num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6524
6525 /*
6526 * Total number of FW statistics requests =
6527 * 1 for port stats + 1 for PF stats + num of queues
6528 */
6529 sc->fw_stats_num = (2 + num_queue_stats);
6530
6531 /*
6532 * Request is built from stats_query_header and an array of
6533 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6534 * rules. The real number or requests is configured in the
6535 * stats_query_header.
6536 */
6537 num_groups =
6538 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6539 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6540
6541 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6542 sc->fw_stats_num, num_groups);
6543
6544 sc->fw_stats_req_size =
6545 (sizeof(struct stats_query_header) +
6546 (num_groups * sizeof(struct stats_query_cmd_group)));
6547
6548 /*
6549 * Data for statistics requests + stats_counter.
6550 * stats_counter holds per-STORM counters that are incremented when
6551 * STORM has finished with the current request. Memory for FCoE
6552 * offloaded statistics are counted anyway, even if they will not be sent.
6553 * VF stats are not accounted for here as the data of VF stats is stored
6554 * in memory allocated by the VF, not here.
6555 */
6556 sc->fw_stats_data_size =
6557 (sizeof(struct stats_counter) +
6558 sizeof(struct per_port_stats) +
6559 sizeof(struct per_pf_stats) +
6560 /* sizeof(struct fcoe_statistics_params) + */
6561 (sizeof(struct per_queue_stats) * num_queue_stats));
6562
6563 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6564 &sc->fw_stats_dma, "fw stats") != 0) {
6565 bxe_free_fw_stats_mem(sc);
6566 return (-1);
6567 }
6568
6569 /* set up the shortcuts */
6570
6571 sc->fw_stats_req =
6572 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6573 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6574
6575 sc->fw_stats_data =
6576 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6577 sc->fw_stats_req_size);
6578 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6579 sc->fw_stats_req_size);
6580
6581 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6582 (uintmax_t)sc->fw_stats_req_mapping);
6583
6584 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6585 (uintmax_t)sc->fw_stats_data_mapping);
6586
6587 return (0);
6588 }
6589
6590 /*
6591 * Bits map:
6592 * 0-7 - Engine0 load counter.
6593 * 8-15 - Engine1 load counter.
6594 * 16 - Engine0 RESET_IN_PROGRESS bit.
6595 * 17 - Engine1 RESET_IN_PROGRESS bit.
6596 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
6597 * function on the engine
6598 * 19 - Engine1 ONE_IS_LOADED.
6599 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
6600 * leader to complete (check for both RESET_IN_PROGRESS bits and not
6601 * for just the one belonging to its engine).
6602 */
6603 #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
6604 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff
6605 #define BXE_PATH0_LOAD_CNT_SHIFT 0
6606 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00
6607 #define BXE_PATH1_LOAD_CNT_SHIFT 8
6608 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6609 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6610 #define BXE_GLOBAL_RESET_BIT 0x00040000
6611
6612 /* set the GLOBAL_RESET bit, should be run under rtnl lock */
6613 static void
6614 bxe_set_reset_global(struct bxe_softc *sc)
6615 {
6616 uint32_t val;
6617 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6618 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6619 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6620 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6621 }
6622
6623 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6624 static void
6625 bxe_clear_reset_global(struct bxe_softc *sc)
6626 {
6627 uint32_t val;
6628 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6629 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6630 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6631 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6632 }
6633
6634 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6635 static uint8_t
6636 bxe_reset_is_global(struct bxe_softc *sc)
6637 {
6638 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6639 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6640 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6641 }
6642
6643 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6644 static void
6645 bxe_set_reset_done(struct bxe_softc *sc)
6646 {
6647 uint32_t val;
6648 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6649 BXE_PATH0_RST_IN_PROG_BIT;
6650
6651 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6652
6653 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6654 /* Clear the bit */
6655 val &= ~bit;
6656 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6657
6658 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6659 }
6660
6661 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6662 static void
6663 bxe_set_reset_in_progress(struct bxe_softc *sc)
6664 {
6665 uint32_t val;
6666 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6667 BXE_PATH0_RST_IN_PROG_BIT;
6668
6669 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6670
6671 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6672 /* Set the bit */
6673 val |= bit;
6674 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6675
6676 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6677 }
6678
6679 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6680 static uint8_t
6681 bxe_reset_is_done(struct bxe_softc *sc,
6682 int engine)
6683 {
6684 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6685 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6686 BXE_PATH0_RST_IN_PROG_BIT;
6687
6688 /* return false if bit is set */
6689 return (val & bit) ? FALSE : TRUE;
6690 }
6691
6692 /* get the load status for an engine, should be run under rtnl lock */
6693 static uint8_t
6694 bxe_get_load_status(struct bxe_softc *sc,
6695 int engine)
6696 {
6697 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6698 BXE_PATH0_LOAD_CNT_MASK;
6699 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6700 BXE_PATH0_LOAD_CNT_SHIFT;
6701 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6702
6703 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6704
6705 val = ((val & mask) >> shift);
6706
6707 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6708
6709 return (val != 0);
6710 }
6711
6712 /* set pf load mark */
6713 /* XXX needs to be under rtnl lock */
6714 static void
6715 bxe_set_pf_load(struct bxe_softc *sc)
6716 {
6717 uint32_t val;
6718 uint32_t val1;
6719 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6720 BXE_PATH0_LOAD_CNT_MASK;
6721 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6722 BXE_PATH0_LOAD_CNT_SHIFT;
6723
6724 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6725
6726 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6727 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6728
6729 /* get the current counter value */
6730 val1 = ((val & mask) >> shift);
6731
6732 /* set bit of this PF */
6733 val1 |= (1 << SC_ABS_FUNC(sc));
6734
6735 /* clear the old value */
6736 val &= ~mask;
6737
6738 /* set the new one */
6739 val |= ((val1 << shift) & mask);
6740
6741 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6742
6743 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6744 }
6745
6746 /* clear pf load mark */
6747 /* XXX needs to be under rtnl lock */
6748 static uint8_t
6749 bxe_clear_pf_load(struct bxe_softc *sc)
6750 {
6751 uint32_t val1, val;
6752 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6753 BXE_PATH0_LOAD_CNT_MASK;
6754 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6755 BXE_PATH0_LOAD_CNT_SHIFT;
6756
6757 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6758 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6759 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6760
6761 /* get the current counter value */
6762 val1 = (val & mask) >> shift;
6763
6764 /* clear bit of that PF */
6765 val1 &= ~(1 << SC_ABS_FUNC(sc));
6766
6767 /* clear the old value */
6768 val &= ~mask;
6769
6770 /* set the new one */
6771 val |= ((val1 << shift) & mask);
6772
6773 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6774 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6775 return (val1 != 0);
6776 }
6777
6778 /* send load requrest to mcp and analyze response */
6779 static int
6780 bxe_nic_load_request(struct bxe_softc *sc,
6781 uint32_t *load_code)
6782 {
6783 /* init fw_seq */
6784 sc->fw_seq =
6785 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6786 DRV_MSG_SEQ_NUMBER_MASK);
6787
6788 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6789
6790 /* get the current FW pulse sequence */
6791 sc->fw_drv_pulse_wr_seq =
6792 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6793 DRV_PULSE_SEQ_MASK);
6794
6795 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6796 sc->fw_drv_pulse_wr_seq);
6797
6798 /* load request */
6799 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6800 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6801
6802 /* if the MCP fails to respond we must abort */
6803 if (!(*load_code)) {
6804 BLOGE(sc, "MCP response failure!\n");
6805 return (-1);
6806 }
6807
6808 /* if MCP refused then must abort */
6809 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6810 BLOGE(sc, "MCP refused load request\n");
6811 return (-1);
6812 }
6813
6814 return (0);
6815 }
6816
6817 /*
6818 * Check whether another PF has already loaded FW to chip. In virtualized
6819 * environments a pf from anoth VM may have already initialized the device
6820 * including loading FW.
6821 */
6822 static int
6823 bxe_nic_load_analyze_req(struct bxe_softc *sc,
6824 uint32_t load_code)
6825 {
6826 uint32_t my_fw, loaded_fw;
6827
6828 /* is another pf loaded on this engine? */
6829 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6830 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6831 /* build my FW version dword */
6832 my_fw = (BCM_5710_FW_MAJOR_VERSION +
6833 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6834 (BCM_5710_FW_REVISION_VERSION << 16) +
6835 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6836
6837 /* read loaded FW from chip */
6838 loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6839 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6840 loaded_fw, my_fw);
6841
6842 /* abort nic load if version mismatch */
6843 if (my_fw != loaded_fw) {
6844 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6845 loaded_fw, my_fw);
6846 return (-1);
6847 }
6848 }
6849
6850 return (0);
6851 }
6852
6853 /* mark PMF if applicable */
6854 static void
6855 bxe_nic_load_pmf(struct bxe_softc *sc,
6856 uint32_t load_code)
6857 {
6858 uint32_t ncsi_oem_data_addr;
6859
6860 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6861 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6862 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6863 /*
6864 * Barrier here for ordering between the writing to sc->port.pmf here
6865 * and reading it from the periodic task.
6866 */
6867 sc->port.pmf = 1;
6868 mb();
6869 } else {
6870 sc->port.pmf = 0;
6871 }
6872
6873 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6874
6875 /* XXX needed? */
6876 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6877 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6878 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6879 if (ncsi_oem_data_addr) {
6880 REG_WR(sc,
6881 (ncsi_oem_data_addr +
6882 offsetof(struct glob_ncsi_oem_data, driver_version)),
6883 0);
6884 }
6885 }
6886 }
6887 }
6888
6889 static void
6890 bxe_read_mf_cfg(struct bxe_softc *sc)
6891 {
6892 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6893 int abs_func;
6894 int vn;
6895
6896 if (BXE_NOMCP(sc)) {
6897 return; /* what should be the default bvalue in this case */
6898 }
6899
6900 /*
6901 * The formula for computing the absolute function number is...
6902 * For 2 port configuration (4 functions per port):
6903 * abs_func = 2 * vn + SC_PORT + SC_PATH
6904 * For 4 port configuration (2 functions per port):
6905 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6906 */
6907 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6908 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6909 if (abs_func >= E1H_FUNC_MAX) {
6910 break;
6911 }
6912 sc->devinfo.mf_info.mf_config[vn] =
6913 MFCFG_RD(sc, func_mf_config[abs_func].config);
6914 }
6915
6916 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6917 FUNC_MF_CFG_FUNC_DISABLED) {
6918 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6919 sc->flags |= BXE_MF_FUNC_DIS;
6920 } else {
6921 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6922 sc->flags &= ~BXE_MF_FUNC_DIS;
6923 }
6924 }
6925
6926 /* acquire split MCP access lock register */
6927 static int bxe_acquire_alr(struct bxe_softc *sc)
6928 {
6929 uint32_t j, val;
6930
6931 for (j = 0; j < 1000; j++) {
6932 val = (1UL << 31);
6933 REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6934 val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6935 if (val & (1L << 31))
6936 break;
6937
6938 DELAY(5000);
6939 }
6940
6941 if (!(val & (1L << 31))) {
6942 BLOGE(sc, "Cannot acquire MCP access lock register\n");
6943 return (-1);
6944 }
6945
6946 return (0);
6947 }
6948
6949 /* release split MCP access lock register */
6950 static void bxe_release_alr(struct bxe_softc *sc)
6951 {
6952 REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6953 }
6954
6955 static void
6956 bxe_fan_failure(struct bxe_softc *sc)
6957 {
6958 int port = SC_PORT(sc);
6959 uint32_t ext_phy_config;
6960
6961 /* mark the failure */
6962 ext_phy_config =
6963 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6964
6965 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6966 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6967 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6968 ext_phy_config);
6969
6970 /* log the failure */
6971 BLOGW(sc, "Fan Failure has caused the driver to shutdown "
6972 "the card to prevent permanent damage. "
6973 "Please contact OEM Support for assistance\n");
6974
6975 /* XXX */
6976 #if 1
6977 bxe_panic(sc, ("Schedule task to handle fan failure\n"));
6978 #else
6979 /*
6980 * Schedule device reset (unload)
6981 * This is due to some boards consuming sufficient power when driver is
6982 * up to overheat if fan fails.
6983 */
6984 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
6985 schedule_delayed_work(&sc->sp_rtnl_task, 0);
6986 #endif
6987 }
6988
6989 /* this function is called upon a link interrupt */
6990 static void
6991 bxe_link_attn(struct bxe_softc *sc)
6992 {
6993 uint32_t pause_enabled = 0;
6994 struct host_port_stats *pstats;
6995 int cmng_fns;
6996 struct bxe_fastpath *fp;
6997 int i;
6998
6999 /* Make sure that we are synced with the current statistics */
7000 bxe_stats_handle(sc, STATS_EVENT_STOP);
7001 BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
7002 elink_link_update(&sc->link_params, &sc->link_vars);
7003
7004 if (sc->link_vars.link_up) {
7005
7006 /* dropless flow control */
7007 if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7008 pause_enabled = 0;
7009
7010 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7011 pause_enabled = 1;
7012 }
7013
7014 REG_WR(sc,
7015 (BAR_USTRORM_INTMEM +
7016 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7017 pause_enabled);
7018 }
7019
7020 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7021 pstats = BXE_SP(sc, port_stats);
7022 /* reset old mac stats */
7023 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7024 }
7025
7026 if (sc->state == BXE_STATE_OPEN) {
7027 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7028 /* Restart tx when the link comes back. */
7029 FOR_EACH_ETH_QUEUE(sc, i) {
7030 fp = &sc->fp[i];
7031 taskqueue_enqueue(fp->tq, &fp->tx_task);
7032 }
7033 }
7034
7035 }
7036
7037 if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7038 cmng_fns = bxe_get_cmng_fns_mode(sc);
7039
7040 if (cmng_fns != CMNG_FNS_NONE) {
7041 bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7042 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7043 } else {
7044 /* rate shaping and fairness are disabled */
7045 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7046 }
7047 }
7048
7049 bxe_link_report_locked(sc);
7050
7051 if (IS_MF(sc)) {
7052 ; // XXX bxe_link_sync_notify(sc);
7053 }
7054 }
7055
7056 static void
7057 bxe_attn_int_asserted(struct bxe_softc *sc,
7058 uint32_t asserted)
7059 {
7060 int port = SC_PORT(sc);
7061 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7062 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7063 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7064 NIG_REG_MASK_INTERRUPT_PORT0;
7065 uint32_t aeu_mask;
7066 uint32_t nig_mask = 0;
7067 uint32_t reg_addr;
7068 uint32_t igu_acked;
7069 uint32_t cnt;
7070
7071 if (sc->attn_state & asserted) {
7072 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7073 }
7074
7075 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7076
7077 aeu_mask = REG_RD(sc, aeu_addr);
7078
7079 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7080 aeu_mask, asserted);
7081
7082 aeu_mask &= ~(asserted & 0x3ff);
7083
7084 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7085
7086 REG_WR(sc, aeu_addr, aeu_mask);
7087
7088 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7089
7090 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7091 sc->attn_state |= asserted;
7092 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7093
7094 if (asserted & ATTN_HARD_WIRED_MASK) {
7095 if (asserted & ATTN_NIG_FOR_FUNC) {
7096
7097 bxe_acquire_phy_lock(sc);
7098 /* save nig interrupt mask */
7099 nig_mask = REG_RD(sc, nig_int_mask_addr);
7100
7101 /* If nig_mask is not set, no need to call the update function */
7102 if (nig_mask) {
7103 REG_WR(sc, nig_int_mask_addr, 0);
7104
7105 bxe_link_attn(sc);
7106 }
7107
7108 /* handle unicore attn? */
7109 }
7110
7111 if (asserted & ATTN_SW_TIMER_4_FUNC) {
7112 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7113 }
7114
7115 if (asserted & GPIO_2_FUNC) {
7116 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7117 }
7118
7119 if (asserted & GPIO_3_FUNC) {
7120 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7121 }
7122
7123 if (asserted & GPIO_4_FUNC) {
7124 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7125 }
7126
7127 if (port == 0) {
7128 if (asserted & ATTN_GENERAL_ATTN_1) {
7129 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7130 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7131 }
7132 if (asserted & ATTN_GENERAL_ATTN_2) {
7133 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7134 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7135 }
7136 if (asserted & ATTN_GENERAL_ATTN_3) {
7137 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7138 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7139 }
7140 } else {
7141 if (asserted & ATTN_GENERAL_ATTN_4) {
7142 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7143 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7144 }
7145 if (asserted & ATTN_GENERAL_ATTN_5) {
7146 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7147 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7148 }
7149 if (asserted & ATTN_GENERAL_ATTN_6) {
7150 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7151 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7152 }
7153 }
7154 } /* hardwired */
7155
7156 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7157 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7158 } else {
7159 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7160 }
7161
7162 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7163 asserted,
7164 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7165 REG_WR(sc, reg_addr, asserted);
7166
7167 /* now set back the mask */
7168 if (asserted & ATTN_NIG_FOR_FUNC) {
7169 /*
7170 * Verify that IGU ack through BAR was written before restoring
7171 * NIG mask. This loop should exit after 2-3 iterations max.
7172 */
7173 if (sc->devinfo.int_block != INT_BLOCK_HC) {
7174 cnt = 0;
7175
7176 do {
7177 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7178 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7179 (++cnt < MAX_IGU_ATTN_ACK_TO));
7180
7181 if (!igu_acked) {
7182 BLOGE(sc, "Failed to verify IGU ack on time\n");
7183 }
7184
7185 mb();
7186 }
7187
7188 REG_WR(sc, nig_int_mask_addr, nig_mask);
7189
7190 bxe_release_phy_lock(sc);
7191 }
7192 }
7193
7194 static void
7195 bxe_print_next_block(struct bxe_softc *sc,
7196 int idx,
7197 const char *blk)
7198 {
7199 BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7200 }
7201
7202 static int
7203 bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7204 uint32_t sig,
7205 int par_num,
7206 uint8_t print)
7207 {
7208 uint32_t cur_bit = 0;
7209 int i = 0;
7210
7211 for (i = 0; sig; i++) {
7212 cur_bit = ((uint32_t)0x1 << i);
7213 if (sig & cur_bit) {
7214 switch (cur_bit) {
7215 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7216 if (print)
7217 bxe_print_next_block(sc, par_num++, "BRB");
7218 break;
7219 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7220 if (print)
7221 bxe_print_next_block(sc, par_num++, "PARSER");
7222 break;
7223 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7224 if (print)
7225 bxe_print_next_block(sc, par_num++, "TSDM");
7226 break;
7227 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7228 if (print)
7229 bxe_print_next_block(sc, par_num++, "SEARCHER");
7230 break;
7231 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7232 if (print)
7233 bxe_print_next_block(sc, par_num++, "TCM");
7234 break;
7235 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7236 if (print)
7237 bxe_print_next_block(sc, par_num++, "TSEMI");
7238 break;
7239 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7240 if (print)
7241 bxe_print_next_block(sc, par_num++, "XPB");
7242 break;
7243 }
7244
7245 /* Clear the bit */
7246 sig &= ~cur_bit;
7247 }
7248 }
7249
7250 return (par_num);
7251 }
7252
7253 static int
7254 bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7255 uint32_t sig,
7256 int par_num,
7257 uint8_t *global,
7258 uint8_t print)
7259 {
7260 int i = 0;
7261 uint32_t cur_bit = 0;
7262 for (i = 0; sig; i++) {
7263 cur_bit = ((uint32_t)0x1 << i);
7264 if (sig & cur_bit) {
7265 switch (cur_bit) {
7266 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7267 if (print)
7268 bxe_print_next_block(sc, par_num++, "PBF");
7269 break;
7270 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7271 if (print)
7272 bxe_print_next_block(sc, par_num++, "QM");
7273 break;
7274 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7275 if (print)
7276 bxe_print_next_block(sc, par_num++, "TM");
7277 break;
7278 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7279 if (print)
7280 bxe_print_next_block(sc, par_num++, "XSDM");
7281 break;
7282 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7283 if (print)
7284 bxe_print_next_block(sc, par_num++, "XCM");
7285 break;
7286 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7287 if (print)
7288 bxe_print_next_block(sc, par_num++, "XSEMI");
7289 break;
7290 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7291 if (print)
7292 bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7293 break;
7294 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7295 if (print)
7296 bxe_print_next_block(sc, par_num++, "NIG");
7297 break;
7298 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7299 if (print)
7300 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7301 *global = TRUE;
7302 break;
7303 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7304 if (print)
7305 bxe_print_next_block(sc, par_num++, "DEBUG");
7306 break;
7307 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7308 if (print)
7309 bxe_print_next_block(sc, par_num++, "USDM");
7310 break;
7311 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7312 if (print)
7313 bxe_print_next_block(sc, par_num++, "UCM");
7314 break;
7315 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7316 if (print)
7317 bxe_print_next_block(sc, par_num++, "USEMI");
7318 break;
7319 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7320 if (print)
7321 bxe_print_next_block(sc, par_num++, "UPB");
7322 break;
7323 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7324 if (print)
7325 bxe_print_next_block(sc, par_num++, "CSDM");
7326 break;
7327 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7328 if (print)
7329 bxe_print_next_block(sc, par_num++, "CCM");
7330 break;
7331 }
7332
7333 /* Clear the bit */
7334 sig &= ~cur_bit;
7335 }
7336 }
7337
7338 return (par_num);
7339 }
7340
7341 static int
7342 bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7343 uint32_t sig,
7344 int par_num,
7345 uint8_t print)
7346 {
7347 uint32_t cur_bit = 0;
7348 int i = 0;
7349
7350 for (i = 0; sig; i++) {
7351 cur_bit = ((uint32_t)0x1 << i);
7352 if (sig & cur_bit) {
7353 switch (cur_bit) {
7354 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7355 if (print)
7356 bxe_print_next_block(sc, par_num++, "CSEMI");
7357 break;
7358 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7359 if (print)
7360 bxe_print_next_block(sc, par_num++, "PXP");
7361 break;
7362 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7363 if (print)
7364 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7365 break;
7366 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7367 if (print)
7368 bxe_print_next_block(sc, par_num++, "CFC");
7369 break;
7370 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7371 if (print)
7372 bxe_print_next_block(sc, par_num++, "CDU");
7373 break;
7374 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7375 if (print)
7376 bxe_print_next_block(sc, par_num++, "DMAE");
7377 break;
7378 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7379 if (print)
7380 bxe_print_next_block(sc, par_num++, "IGU");
7381 break;
7382 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7383 if (print)
7384 bxe_print_next_block(sc, par_num++, "MISC");
7385 break;
7386 }
7387
7388 /* Clear the bit */
7389 sig &= ~cur_bit;
7390 }
7391 }
7392
7393 return (par_num);
7394 }
7395
7396 static int
7397 bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7398 uint32_t sig,
7399 int par_num,
7400 uint8_t *global,
7401 uint8_t print)
7402 {
7403 uint32_t cur_bit = 0;
7404 int i = 0;
7405
7406 for (i = 0; sig; i++) {
7407 cur_bit = ((uint32_t)0x1 << i);
7408 if (sig & cur_bit) {
7409 switch (cur_bit) {
7410 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7411 if (print)
7412 bxe_print_next_block(sc, par_num++, "MCP ROM");
7413 *global = TRUE;
7414 break;
7415 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7416 if (print)
7417 bxe_print_next_block(sc, par_num++,
7418 "MCP UMP RX");
7419 *global = TRUE;
7420 break;
7421 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7422 if (print)
7423 bxe_print_next_block(sc, par_num++,
7424 "MCP UMP TX");
7425 *global = TRUE;
7426 break;
7427 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7428 if (print)
7429 bxe_print_next_block(sc, par_num++,
7430 "MCP SCPAD");
7431 *global = TRUE;
7432 break;
7433 }
7434
7435 /* Clear the bit */
7436 sig &= ~cur_bit;
7437 }
7438 }
7439
7440 return (par_num);
7441 }
7442
7443 static int
7444 bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7445 uint32_t sig,
7446 int par_num,
7447 uint8_t print)
7448 {
7449 uint32_t cur_bit = 0;
7450 int i = 0;
7451
7452 for (i = 0; sig; i++) {
7453 cur_bit = ((uint32_t)0x1 << i);
7454 if (sig & cur_bit) {
7455 switch (cur_bit) {
7456 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7457 if (print)
7458 bxe_print_next_block(sc, par_num++, "PGLUE_B");
7459 break;
7460 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7461 if (print)
7462 bxe_print_next_block(sc, par_num++, "ATC");
7463 break;
7464 }
7465
7466 /* Clear the bit */
7467 sig &= ~cur_bit;
7468 }
7469 }
7470
7471 return (par_num);
7472 }
7473
7474 static uint8_t
7475 bxe_parity_attn(struct bxe_softc *sc,
7476 uint8_t *global,
7477 uint8_t print,
7478 uint32_t *sig)
7479 {
7480 int par_num = 0;
7481
7482 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7483 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7484 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7485 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7486 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7487 BLOGE(sc, "Parity error: HW block parity attention:\n"
7488 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7489 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7490 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7491 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7492 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7493 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7494
7495 if (print)
7496 BLOGI(sc, "Parity errors detected in blocks: ");
7497
7498 par_num =
7499 bxe_check_blocks_with_parity0(sc, sig[0] &
7500 HW_PRTY_ASSERT_SET_0,
7501 par_num, print);
7502 par_num =
7503 bxe_check_blocks_with_parity1(sc, sig[1] &
7504 HW_PRTY_ASSERT_SET_1,
7505 par_num, global, print);
7506 par_num =
7507 bxe_check_blocks_with_parity2(sc, sig[2] &
7508 HW_PRTY_ASSERT_SET_2,
7509 par_num, print);
7510 par_num =
7511 bxe_check_blocks_with_parity3(sc, sig[3] &
7512 HW_PRTY_ASSERT_SET_3,
7513 par_num, global, print);
7514 par_num =
7515 bxe_check_blocks_with_parity4(sc, sig[4] &
7516 HW_PRTY_ASSERT_SET_4,
7517 par_num, print);
7518
7519 if (print)
7520 BLOGI(sc, "\n");
7521
7522 if( *global == TRUE ) {
7523 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
7524 }
7525
7526 return (TRUE);
7527 }
7528
7529 return (FALSE);
7530 }
7531
7532 static uint8_t
7533 bxe_chk_parity_attn(struct bxe_softc *sc,
7534 uint8_t *global,
7535 uint8_t print)
7536 {
7537 struct attn_route attn = { {0} };
7538 int port = SC_PORT(sc);
7539
7540 if(sc->state != BXE_STATE_OPEN)
7541 return FALSE;
7542
7543 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7544 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7545 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7546 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7547
7548 /*
7549 * Since MCP attentions can't be disabled inside the block, we need to
7550 * read AEU registers to see whether they're currently disabled
7551 */
7552 attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7553 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7554 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7555 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7556
7557
7558 if (!CHIP_IS_E1x(sc))
7559 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7560
7561 return (bxe_parity_attn(sc, global, print, attn.sig));
7562 }
7563
7564 static void
7565 bxe_attn_int_deasserted4(struct bxe_softc *sc,
7566 uint32_t attn)
7567 {
7568 uint32_t val;
7569 boolean_t err_flg = FALSE;
7570
7571 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7572 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7573 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7574 err_flg = TRUE;
7575 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7576 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7577 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7578 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7579 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7580 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7581 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7582 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7583 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7584 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7585 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7586 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7587 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7588 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7589 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7590 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7591 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7592 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7593 }
7594
7595 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7596 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7597 BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7598 err_flg = TRUE;
7599 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7600 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7601 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7602 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7603 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7604 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7605 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7606 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7607 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7608 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7609 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7610 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7611 }
7612
7613 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7614 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7615 BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7616 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7617 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7618 err_flg = TRUE;
7619 }
7620 if (err_flg) {
7621 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
7622 taskqueue_enqueue_timeout(taskqueue_thread,
7623 &sc->sp_err_timeout_task, hz/10);
7624 }
7625
7626 }
7627
7628 static void
7629 bxe_e1h_disable(struct bxe_softc *sc)
7630 {
7631 int port = SC_PORT(sc);
7632
7633 bxe_tx_disable(sc);
7634
7635 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7636 }
7637
7638 static void
7639 bxe_e1h_enable(struct bxe_softc *sc)
7640 {
7641 int port = SC_PORT(sc);
7642
7643 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7644
7645 // XXX bxe_tx_enable(sc);
7646 }
7647
7648 /*
7649 * called due to MCP event (on pmf):
7650 * reread new bandwidth configuration
7651 * configure FW
7652 * notify others function about the change
7653 */
7654 static void
7655 bxe_config_mf_bw(struct bxe_softc *sc)
7656 {
7657 if (sc->link_vars.link_up) {
7658 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7659 // XXX bxe_link_sync_notify(sc);
7660 }
7661
7662 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7663 }
7664
7665 static void
7666 bxe_set_mf_bw(struct bxe_softc *sc)
7667 {
7668 bxe_config_mf_bw(sc);
7669 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7670 }
7671
7672 static void
7673 bxe_handle_eee_event(struct bxe_softc *sc)
7674 {
7675 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7676 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7677 }
7678
7679 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7680
7681 static void
7682 bxe_drv_info_ether_stat(struct bxe_softc *sc)
7683 {
7684 struct eth_stats_info *ether_stat =
7685 &sc->sp->drv_info_to_mcp.ether_stat;
7686
7687 strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7688 ETH_STAT_INFO_VERSION_LEN);
7689
7690 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7691 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7692 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7693 ether_stat->mac_local + MAC_PAD,
7694 MAC_PAD, ETH_ALEN);
7695
7696 ether_stat->mtu_size = sc->mtu;
7697
7698 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7699 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7700 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7701 }
7702
7703 // XXX ether_stat->feature_flags |= ???;
7704
7705 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7706
7707 ether_stat->txq_size = sc->tx_ring_size;
7708 ether_stat->rxq_size = sc->rx_ring_size;
7709 }
7710
7711 static void
7712 bxe_handle_drv_info_req(struct bxe_softc *sc)
7713 {
7714 enum drv_info_opcode op_code;
7715 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7716
7717 /* if drv_info version supported by MFW doesn't match - send NACK */
7718 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7719 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7720 return;
7721 }
7722
7723 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7724 DRV_INFO_CONTROL_OP_CODE_SHIFT);
7725
7726 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7727
7728 switch (op_code) {
7729 case ETH_STATS_OPCODE:
7730 bxe_drv_info_ether_stat(sc);
7731 break;
7732 case FCOE_STATS_OPCODE:
7733 case ISCSI_STATS_OPCODE:
7734 default:
7735 /* if op code isn't supported - send NACK */
7736 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7737 return;
7738 }
7739
7740 /*
7741 * If we got drv_info attn from MFW then these fields are defined in
7742 * shmem2 for sure
7743 */
7744 SHMEM2_WR(sc, drv_info_host_addr_lo,
7745 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7746 SHMEM2_WR(sc, drv_info_host_addr_hi,
7747 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7748
7749 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7750 }
7751
7752 static void
7753 bxe_dcc_event(struct bxe_softc *sc,
7754 uint32_t dcc_event)
7755 {
7756 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7757
7758 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7759 /*
7760 * This is the only place besides the function initialization
7761 * where the sc->flags can change so it is done without any
7762 * locks
7763 */
7764 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7765 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7766 sc->flags |= BXE_MF_FUNC_DIS;
7767 bxe_e1h_disable(sc);
7768 } else {
7769 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7770 sc->flags &= ~BXE_MF_FUNC_DIS;
7771 bxe_e1h_enable(sc);
7772 }
7773 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7774 }
7775
7776 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7777 bxe_config_mf_bw(sc);
7778 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7779 }
7780
7781 /* Report results to MCP */
7782 if (dcc_event)
7783 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7784 else
7785 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7786 }
7787
7788 static void
7789 bxe_pmf_update(struct bxe_softc *sc)
7790 {
7791 int port = SC_PORT(sc);
7792 uint32_t val;
7793
7794 sc->port.pmf = 1;
7795 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7796
7797 /*
7798 * We need the mb() to ensure the ordering between the writing to
7799 * sc->port.pmf here and reading it from the bxe_periodic_task().
7800 */
7801 mb();
7802
7803 /* queue a periodic task */
7804 // XXX schedule task...
7805
7806 // XXX bxe_dcbx_pmf_update(sc);
7807
7808 /* enable nig attention */
7809 val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7810 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7811 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7812 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7813 } else if (!CHIP_IS_E1x(sc)) {
7814 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7815 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7816 }
7817
7818 bxe_stats_handle(sc, STATS_EVENT_PMF);
7819 }
7820
7821 static int
7822 bxe_mc_assert(struct bxe_softc *sc)
7823 {
7824 char last_idx;
7825 int i, rc = 0;
7826 uint32_t row0, row1, row2, row3;
7827
7828 /* XSTORM */
7829 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7830 if (last_idx)
7831 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7832
7833 /* print the asserts */
7834 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7835
7836 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7837 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7838 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7839 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7840
7841 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7842 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7843 i, row3, row2, row1, row0);
7844 rc++;
7845 } else {
7846 break;
7847 }
7848 }
7849
7850 /* TSTORM */
7851 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7852 if (last_idx) {
7853 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7854 }
7855
7856 /* print the asserts */
7857 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7858
7859 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7860 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7861 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7862 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7863
7864 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7865 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7866 i, row3, row2, row1, row0);
7867 rc++;
7868 } else {
7869 break;
7870 }
7871 }
7872
7873 /* CSTORM */
7874 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7875 if (last_idx) {
7876 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7877 }
7878
7879 /* print the asserts */
7880 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7881
7882 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7883 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7884 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7885 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7886
7887 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7888 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7889 i, row3, row2, row1, row0);
7890 rc++;
7891 } else {
7892 break;
7893 }
7894 }
7895
7896 /* USTORM */
7897 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7898 if (last_idx) {
7899 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7900 }
7901
7902 /* print the asserts */
7903 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7904
7905 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7906 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7907 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7908 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7909
7910 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7911 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7912 i, row3, row2, row1, row0);
7913 rc++;
7914 } else {
7915 break;
7916 }
7917 }
7918
7919 return (rc);
7920 }
7921
7922 static void
7923 bxe_attn_int_deasserted3(struct bxe_softc *sc,
7924 uint32_t attn)
7925 {
7926 int func = SC_FUNC(sc);
7927 uint32_t val;
7928
7929 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7930
7931 if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7932
7933 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7934 bxe_read_mf_cfg(sc);
7935 sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7936 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7937 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7938
7939 if (val & DRV_STATUS_DCC_EVENT_MASK)
7940 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7941
7942 if (val & DRV_STATUS_SET_MF_BW)
7943 bxe_set_mf_bw(sc);
7944
7945 if (val & DRV_STATUS_DRV_INFO_REQ)
7946 bxe_handle_drv_info_req(sc);
7947
7948 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7949 bxe_pmf_update(sc);
7950
7951 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7952 bxe_handle_eee_event(sc);
7953
7954 if (sc->link_vars.periodic_flags &
7955 ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7956 /* sync with link */
7957 bxe_acquire_phy_lock(sc);
7958 sc->link_vars.periodic_flags &=
7959 ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7960 bxe_release_phy_lock(sc);
7961 if (IS_MF(sc))
7962 ; // XXX bxe_link_sync_notify(sc);
7963 bxe_link_report(sc);
7964 }
7965
7966 /*
7967 * Always call it here: bxe_link_report() will
7968 * prevent the link indication duplication.
7969 */
7970 bxe_link_status_update(sc);
7971
7972 } else if (attn & BXE_MC_ASSERT_BITS) {
7973
7974 BLOGE(sc, "MC assert!\n");
7975 bxe_mc_assert(sc);
7976 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7977 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7978 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7979 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7980 bxe_int_disable(sc);
7981 BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT);
7982 taskqueue_enqueue_timeout(taskqueue_thread,
7983 &sc->sp_err_timeout_task, hz/10);
7984
7985 } else if (attn & BXE_MCP_ASSERT) {
7986
7987 BLOGE(sc, "MCP assert!\n");
7988 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
7989 BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT);
7990 taskqueue_enqueue_timeout(taskqueue_thread,
7991 &sc->sp_err_timeout_task, hz/10);
7992 bxe_int_disable(sc); /*avoid repetive assert alert */
7993
7994
7995 } else {
7996 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
7997 }
7998 }
7999
8000 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
8001 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8002 if (attn & BXE_GRC_TIMEOUT) {
8003 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8004 BLOGE(sc, "GRC time-out 0x%08x\n", val);
8005 }
8006 if (attn & BXE_GRC_RSV) {
8007 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8008 BLOGE(sc, "GRC reserved 0x%08x\n", val);
8009 }
8010 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8011 }
8012 }
8013
8014 static void
8015 bxe_attn_int_deasserted2(struct bxe_softc *sc,
8016 uint32_t attn)
8017 {
8018 int port = SC_PORT(sc);
8019 int reg_offset;
8020 uint32_t val0, mask0, val1, mask1;
8021 uint32_t val;
8022 boolean_t err_flg = FALSE;
8023
8024 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8025 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8026 BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8027 /* CFC error attention */
8028 if (val & 0x2) {
8029 BLOGE(sc, "FATAL error from CFC\n");
8030 err_flg = TRUE;
8031 }
8032 }
8033
8034 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8035 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8036 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8037 /* RQ_USDMDP_FIFO_OVERFLOW */
8038 if (val & 0x18000) {
8039 BLOGE(sc, "FATAL error from PXP\n");
8040 err_flg = TRUE;
8041 }
8042
8043 if (!CHIP_IS_E1x(sc)) {
8044 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8045 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8046 err_flg = TRUE;
8047 }
8048 }
8049
8050 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8051 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8052
8053 if (attn & AEU_PXP2_HW_INT_BIT) {
8054 /* CQ47854 workaround do not panic on
8055 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8056 */
8057 if (!CHIP_IS_E1x(sc)) {
8058 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8059 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8060 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8061 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8062 /*
8063 * If the only PXP2_EOP_ERROR_BIT is set in
8064 * STS0 and STS1 - clear it
8065 *
8066 * probably we lose additional attentions between
8067 * STS0 and STS_CLR0, in this case user will not
8068 * be notified about them
8069 */
8070 if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8071 !(val1 & mask1))
8072 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8073
8074 /* print the register, since no one can restore it */
8075 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8076
8077 /*
8078 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8079 * then notify
8080 */
8081 if (val0 & PXP2_EOP_ERROR_BIT) {
8082 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8083 err_flg = TRUE;
8084
8085 /*
8086 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8087 * set then clear attention from PXP2 block without panic
8088 */
8089 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8090 ((val1 & mask1) == 0))
8091 attn &= ~AEU_PXP2_HW_INT_BIT;
8092 }
8093 }
8094 }
8095
8096 if (attn & HW_INTERRUT_ASSERT_SET_2) {
8097 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8098 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8099
8100 val = REG_RD(sc, reg_offset);
8101 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8102 REG_WR(sc, reg_offset, val);
8103
8104 BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8105 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8106 err_flg = TRUE;
8107 bxe_panic(sc, ("HW block attention set2\n"));
8108 }
8109 if(err_flg) {
8110 BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
8111 taskqueue_enqueue_timeout(taskqueue_thread,
8112 &sc->sp_err_timeout_task, hz/10);
8113 }
8114
8115 }
8116
8117 static void
8118 bxe_attn_int_deasserted1(struct bxe_softc *sc,
8119 uint32_t attn)
8120 {
8121 int port = SC_PORT(sc);
8122 int reg_offset;
8123 uint32_t val;
8124 boolean_t err_flg = FALSE;
8125
8126 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8127 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8128 BLOGE(sc, "DB hw attention 0x%08x\n", val);
8129 /* DORQ discard attention */
8130 if (val & 0x2) {
8131 BLOGE(sc, "FATAL error from DORQ\n");
8132 err_flg = TRUE;
8133 }
8134 }
8135
8136 if (attn & HW_INTERRUT_ASSERT_SET_1) {
8137 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8138 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8139
8140 val = REG_RD(sc, reg_offset);
8141 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8142 REG_WR(sc, reg_offset, val);
8143
8144 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8145 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8146 err_flg = TRUE;
8147 bxe_panic(sc, ("HW block attention set1\n"));
8148 }
8149 if(err_flg) {
8150 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8151 taskqueue_enqueue_timeout(taskqueue_thread,
8152 &sc->sp_err_timeout_task, hz/10);
8153 }
8154
8155 }
8156
8157 static void
8158 bxe_attn_int_deasserted0(struct bxe_softc *sc,
8159 uint32_t attn)
8160 {
8161 int port = SC_PORT(sc);
8162 int reg_offset;
8163 uint32_t val;
8164
8165 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8166 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8167
8168 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8169 val = REG_RD(sc, reg_offset);
8170 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8171 REG_WR(sc, reg_offset, val);
8172
8173 BLOGW(sc, "SPIO5 hw attention\n");
8174
8175 /* Fan failure attention */
8176 elink_hw_reset_phy(&sc->link_params);
8177 bxe_fan_failure(sc);
8178 }
8179
8180 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8181 bxe_acquire_phy_lock(sc);
8182 elink_handle_module_detect_int(&sc->link_params);
8183 bxe_release_phy_lock(sc);
8184 }
8185
8186 if (attn & HW_INTERRUT_ASSERT_SET_0) {
8187 val = REG_RD(sc, reg_offset);
8188 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8189 REG_WR(sc, reg_offset, val);
8190
8191
8192 BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8193 taskqueue_enqueue_timeout(taskqueue_thread,
8194 &sc->sp_err_timeout_task, hz/10);
8195
8196 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8197 (attn & HW_INTERRUT_ASSERT_SET_0)));
8198 }
8199 }
8200
8201 static void
8202 bxe_attn_int_deasserted(struct bxe_softc *sc,
8203 uint32_t deasserted)
8204 {
8205 struct attn_route attn;
8206 struct attn_route *group_mask;
8207 int port = SC_PORT(sc);
8208 int index;
8209 uint32_t reg_addr;
8210 uint32_t val;
8211 uint32_t aeu_mask;
8212 uint8_t global = FALSE;
8213
8214 /*
8215 * Need to take HW lock because MCP or other port might also
8216 * try to handle this event.
8217 */
8218 bxe_acquire_alr(sc);
8219
8220 if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8221 /* XXX
8222 * In case of parity errors don't handle attentions so that
8223 * other function would "see" parity errors.
8224 */
8225 // XXX schedule a recovery task...
8226 /* disable HW interrupts */
8227 bxe_int_disable(sc);
8228 BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY);
8229 taskqueue_enqueue_timeout(taskqueue_thread,
8230 &sc->sp_err_timeout_task, hz/10);
8231 bxe_release_alr(sc);
8232 return;
8233 }
8234
8235 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8236 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8237 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8238 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8239 if (!CHIP_IS_E1x(sc)) {
8240 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8241 } else {
8242 attn.sig[4] = 0;
8243 }
8244
8245 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8246 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8247
8248 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8249 if (deasserted & (1 << index)) {
8250 group_mask = &sc->attn_group[index];
8251
8252 BLOGD(sc, DBG_INTR,
8253 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8254 group_mask->sig[0], group_mask->sig[1],
8255 group_mask->sig[2], group_mask->sig[3],
8256 group_mask->sig[4]);
8257
8258 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8259 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8260 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8261 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8262 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8263 }
8264 }
8265
8266 bxe_release_alr(sc);
8267
8268 if (sc->devinfo.int_block == INT_BLOCK_HC) {
8269 reg_addr = (HC_REG_COMMAND_REG + port*32 +
8270 COMMAND_REG_ATTN_BITS_CLR);
8271 } else {
8272 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8273 }
8274
8275 val = ~deasserted;
8276 BLOGD(sc, DBG_INTR,
8277 "about to mask 0x%08x at %s addr 0x%08x\n", val,
8278 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8279 REG_WR(sc, reg_addr, val);
8280
8281 if (~sc->attn_state & deasserted) {
8282 BLOGE(sc, "IGU error\n");
8283 }
8284
8285 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8286 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8287
8288 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8289
8290 aeu_mask = REG_RD(sc, reg_addr);
8291
8292 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8293 aeu_mask, deasserted);
8294 aeu_mask |= (deasserted & 0x3ff);
8295 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8296
8297 REG_WR(sc, reg_addr, aeu_mask);
8298 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8299
8300 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8301 sc->attn_state &= ~deasserted;
8302 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8303 }
8304
8305 static void
8306 bxe_attn_int(struct bxe_softc *sc)
8307 {
8308 /* read local copy of bits */
8309 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8310 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8311 uint32_t attn_state = sc->attn_state;
8312
8313 /* look for changed bits */
8314 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state;
8315 uint32_t deasserted = ~attn_bits & attn_ack & attn_state;
8316
8317 BLOGD(sc, DBG_INTR,
8318 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8319 attn_bits, attn_ack, asserted, deasserted);
8320
8321 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8322 BLOGE(sc, "BAD attention state\n");
8323 }
8324
8325 /* handle bits that were raised */
8326 if (asserted) {
8327 bxe_attn_int_asserted(sc, asserted);
8328 }
8329
8330 if (deasserted) {
8331 bxe_attn_int_deasserted(sc, deasserted);
8332 }
8333 }
8334
8335 static uint16_t
8336 bxe_update_dsb_idx(struct bxe_softc *sc)
8337 {
8338 struct host_sp_status_block *def_sb = sc->def_sb;
8339 uint16_t rc = 0;
8340
8341 mb(); /* status block is written to by the chip */
8342
8343 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8344 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8345 rc |= BXE_DEF_SB_ATT_IDX;
8346 }
8347
8348 if (sc->def_idx != def_sb->sp_sb.running_index) {
8349 sc->def_idx = def_sb->sp_sb.running_index;
8350 rc |= BXE_DEF_SB_IDX;
8351 }
8352
8353 mb();
8354
8355 return (rc);
8356 }
8357
8358 static inline struct ecore_queue_sp_obj *
8359 bxe_cid_to_q_obj(struct bxe_softc *sc,
8360 uint32_t cid)
8361 {
8362 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8363 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8364 }
8365
8366 static void
8367 bxe_handle_mcast_eqe(struct bxe_softc *sc)
8368 {
8369 struct ecore_mcast_ramrod_params rparam;
8370 int rc;
8371
8372 memset(&rparam, 0, sizeof(rparam));
8373
8374 rparam.mcast_obj = &sc->mcast_obj;
8375
8376 BXE_MCAST_LOCK(sc);
8377
8378 /* clear pending state for the last command */
8379 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8380
8381 /* if there are pending mcast commands - send them */
8382 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8383 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8384 if (rc < 0) {
8385 BLOGD(sc, DBG_SP,
8386 "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8387 }
8388 }
8389
8390 BXE_MCAST_UNLOCK(sc);
8391 }
8392
8393 static void
8394 bxe_handle_classification_eqe(struct bxe_softc *sc,
8395 union event_ring_elem *elem)
8396 {
8397 unsigned long ramrod_flags = 0;
8398 int rc = 0;
8399 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8400 struct ecore_vlan_mac_obj *vlan_mac_obj;
8401
8402 /* always push next commands out, don't wait here */
8403 bit_set(&ramrod_flags, RAMROD_CONT);
8404
8405 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8406 case ECORE_FILTER_MAC_PENDING:
8407 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8408 vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8409 break;
8410
8411 case ECORE_FILTER_MCAST_PENDING:
8412 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8413 /*
8414 * This is only relevant for 57710 where multicast MACs are
8415 * configured as unicast MACs using the same ramrod.
8416 */
8417 bxe_handle_mcast_eqe(sc);
8418 return;
8419
8420 default:
8421 BLOGE(sc, "Unsupported classification command: %d\n",
8422 elem->message.data.eth_event.echo);
8423 return;
8424 }
8425
8426 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8427
8428 if (rc < 0) {
8429 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8430 } else if (rc > 0) {
8431 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8432 }
8433 }
8434
8435 static void
8436 bxe_handle_rx_mode_eqe(struct bxe_softc *sc,
8437 union event_ring_elem *elem)
8438 {
8439 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8440
8441 /* send rx_mode command again if was requested */
8442 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8443 &sc->sp_state)) {
8444 bxe_set_storm_rx_mode(sc);
8445 }
8446 }
8447
8448 static void
8449 bxe_update_eq_prod(struct bxe_softc *sc,
8450 uint16_t prod)
8451 {
8452 storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8453 wmb(); /* keep prod updates ordered */
8454 }
8455
8456 static void
8457 bxe_eq_int(struct bxe_softc *sc)
8458 {
8459 uint16_t hw_cons, sw_cons, sw_prod;
8460 union event_ring_elem *elem;
8461 uint8_t echo;
8462 uint32_t cid;
8463 uint8_t opcode;
8464 int spqe_cnt = 0;
8465 struct ecore_queue_sp_obj *q_obj;
8466 struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8467 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8468
8469 hw_cons = le16toh(*sc->eq_cons_sb);
8470
8471 /*
8472 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8473 * when we get to the next-page we need to adjust so the loop
8474 * condition below will be met. The next element is the size of a
8475 * regular element and hence incrementing by 1
8476 */
8477 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8478 hw_cons++;
8479 }
8480
8481 /*
8482 * This function may never run in parallel with itself for a
8483 * specific sc and no need for a read memory barrier here.
8484 */
8485 sw_cons = sc->eq_cons;
8486 sw_prod = sc->eq_prod;
8487
8488 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8489 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8490
8491 for (;
8492 sw_cons != hw_cons;
8493 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8494
8495 elem = &sc->eq[EQ_DESC(sw_cons)];
8496
8497 /* elem CID originates from FW, actually LE */
8498 cid = SW_CID(elem->message.data.cfc_del_event.cid);
8499 opcode = elem->message.opcode;
8500
8501 /* handle eq element */
8502 switch (opcode) {
8503
8504 case EVENT_RING_OPCODE_STAT_QUERY:
8505 BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8506 sc->stats_comp++);
8507 /* nothing to do with stats comp */
8508 goto next_spqe;
8509
8510 case EVENT_RING_OPCODE_CFC_DEL:
8511 /* handle according to cid range */
8512 /* we may want to verify here that the sc state is HALTING */
8513 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8514 q_obj = bxe_cid_to_q_obj(sc, cid);
8515 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8516 break;
8517 }
8518 goto next_spqe;
8519
8520 case EVENT_RING_OPCODE_STOP_TRAFFIC:
8521 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8522 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8523 break;
8524 }
8525 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8526 goto next_spqe;
8527
8528 case EVENT_RING_OPCODE_START_TRAFFIC:
8529 BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8530 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8531 break;
8532 }
8533 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8534 goto next_spqe;
8535
8536 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8537 echo = elem->message.data.function_update_event.echo;
8538 if (echo == SWITCH_UPDATE) {
8539 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8540 if (f_obj->complete_cmd(sc, f_obj,
8541 ECORE_F_CMD_SWITCH_UPDATE)) {
8542 break;
8543 }
8544 }
8545 else {
8546 BLOGD(sc, DBG_SP,
8547 "AFEX: ramrod completed FUNCTION_UPDATE\n");
8548 }
8549 goto next_spqe;
8550
8551 case EVENT_RING_OPCODE_FORWARD_SETUP:
8552 q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8553 if (q_obj->complete_cmd(sc, q_obj,
8554 ECORE_Q_CMD_SETUP_TX_ONLY)) {
8555 break;
8556 }
8557 goto next_spqe;
8558
8559 case EVENT_RING_OPCODE_FUNCTION_START:
8560 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8561 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8562 break;
8563 }
8564 goto next_spqe;
8565
8566 case EVENT_RING_OPCODE_FUNCTION_STOP:
8567 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8568 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8569 break;
8570 }
8571 goto next_spqe;
8572 }
8573
8574 switch (opcode | sc->state) {
8575 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8576 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8577 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8578 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8579 rss_raw->clear_pending(rss_raw);
8580 break;
8581
8582 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8583 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8584 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8585 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8586 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8587 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8588 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8589 bxe_handle_classification_eqe(sc, elem);
8590 break;
8591
8592 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8593 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8594 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8595 BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8596 bxe_handle_mcast_eqe(sc);
8597 break;
8598
8599 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8600 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8601 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8602 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8603 bxe_handle_rx_mode_eqe(sc, elem);
8604 break;
8605
8606 default:
8607 /* unknown event log error and continue */
8608 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8609 elem->message.opcode, sc->state);
8610 }
8611
8612 next_spqe:
8613 spqe_cnt++;
8614 } /* for */
8615
8616 mb();
8617 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8618
8619 sc->eq_cons = sw_cons;
8620 sc->eq_prod = sw_prod;
8621
8622 /* make sure that above mem writes were issued towards the memory */
8623 wmb();
8624
8625 /* update producer */
8626 bxe_update_eq_prod(sc, sc->eq_prod);
8627 }
8628
8629 static void
8630 bxe_handle_sp_tq(void *context,
8631 int pending)
8632 {
8633 struct bxe_softc *sc = (struct bxe_softc *)context;
8634 uint16_t status;
8635
8636 BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8637
8638 /* what work needs to be performed? */
8639 status = bxe_update_dsb_idx(sc);
8640
8641 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8642
8643 /* HW attentions */
8644 if (status & BXE_DEF_SB_ATT_IDX) {
8645 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8646 bxe_attn_int(sc);
8647 status &= ~BXE_DEF_SB_ATT_IDX;
8648 }
8649
8650 /* SP events: STAT_QUERY and others */
8651 if (status & BXE_DEF_SB_IDX) {
8652 /* handle EQ completions */
8653 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8654 bxe_eq_int(sc);
8655 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8656 le16toh(sc->def_idx), IGU_INT_NOP, 1);
8657 status &= ~BXE_DEF_SB_IDX;
8658 }
8659
8660 /* if status is non zero then something went wrong */
8661 if (__predict_false(status)) {
8662 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8663 }
8664
8665 /* ack status block only if something was actually handled */
8666 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8667 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8668
8669 /*
8670 * Must be called after the EQ processing (since eq leads to sriov
8671 * ramrod completion flows).
8672 * This flow may have been scheduled by the arrival of a ramrod
8673 * completion, or by the sriov code rescheduling itself.
8674 */
8675 // XXX bxe_iov_sp_task(sc);
8676
8677 }
8678
8679 static void
8680 bxe_handle_fp_tq(void *context,
8681 int pending)
8682 {
8683 struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8684 struct bxe_softc *sc = fp->sc;
8685 /* uint8_t more_tx = FALSE; */
8686 uint8_t more_rx = FALSE;
8687
8688 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8689
8690 /* XXX
8691 * IFF_DRV_RUNNING state can't be checked here since we process
8692 * slowpath events on a client queue during setup. Instead
8693 * we need to add a "process/continue" flag here that the driver
8694 * can use to tell the task here not to do anything.
8695 */
8696 #if 0
8697 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8698 return;
8699 }
8700 #endif
8701
8702 /* update the fastpath index */
8703 bxe_update_fp_sb_idx(fp);
8704
8705 /* XXX add loop here if ever support multiple tx CoS */
8706 /* fp->txdata[cos] */
8707 if (bxe_has_tx_work(fp)) {
8708 BXE_FP_TX_LOCK(fp);
8709 /* more_tx = */ bxe_txeof(sc, fp);
8710 BXE_FP_TX_UNLOCK(fp);
8711 }
8712
8713 if (bxe_has_rx_work(fp)) {
8714 more_rx = bxe_rxeof(sc, fp);
8715 }
8716
8717 if (more_rx /*|| more_tx*/) {
8718 /* still more work to do */
8719 taskqueue_enqueue(fp->tq, &fp->tq_task);
8720 return;
8721 }
8722
8723 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8724 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8725 }
8726
8727 static void
8728 bxe_task_fp(struct bxe_fastpath *fp)
8729 {
8730 struct bxe_softc *sc = fp->sc;
8731 /* uint8_t more_tx = FALSE; */
8732 uint8_t more_rx = FALSE;
8733
8734 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8735
8736 /* update the fastpath index */
8737 bxe_update_fp_sb_idx(fp);
8738
8739 /* XXX add loop here if ever support multiple tx CoS */
8740 /* fp->txdata[cos] */
8741 if (bxe_has_tx_work(fp)) {
8742 BXE_FP_TX_LOCK(fp);
8743 /* more_tx = */ bxe_txeof(sc, fp);
8744 BXE_FP_TX_UNLOCK(fp);
8745 }
8746
8747 if (bxe_has_rx_work(fp)) {
8748 more_rx = bxe_rxeof(sc, fp);
8749 }
8750
8751 if (more_rx /*|| more_tx*/) {
8752 /* still more work to do, bail out if this ISR and process later */
8753 taskqueue_enqueue(fp->tq, &fp->tq_task);
8754 return;
8755 }
8756
8757 /*
8758 * Here we write the fastpath index taken before doing any tx or rx work.
8759 * It is very well possible other hw events occurred up to this point and
8760 * they were actually processed accordingly above. Since we're going to
8761 * write an older fastpath index, an interrupt is coming which we might
8762 * not do any work in.
8763 */
8764 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8765 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8766 }
8767
8768 /*
8769 * Legacy interrupt entry point.
8770 *
8771 * Verifies that the controller generated the interrupt and
8772 * then calls a separate routine to handle the various
8773 * interrupt causes: link, RX, and TX.
8774 */
8775 static void
8776 bxe_intr_legacy(void *xsc)
8777 {
8778 struct bxe_softc *sc = (struct bxe_softc *)xsc;
8779 struct bxe_fastpath *fp;
8780 uint16_t status, mask;
8781 int i;
8782
8783 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8784
8785 /*
8786 * 0 for ustorm, 1 for cstorm
8787 * the bits returned from ack_int() are 0-15
8788 * bit 0 = attention status block
8789 * bit 1 = fast path status block
8790 * a mask of 0x2 or more = tx/rx event
8791 * a mask of 1 = slow path event
8792 */
8793
8794 status = bxe_ack_int(sc);
8795
8796 /* the interrupt is not for us */
8797 if (__predict_false(status == 0)) {
8798 BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8799 return;
8800 }
8801
8802 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8803
8804 FOR_EACH_ETH_QUEUE(sc, i) {
8805 fp = &sc->fp[i];
8806 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8807 if (status & mask) {
8808 /* acknowledge and disable further fastpath interrupts */
8809 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8810 bxe_task_fp(fp);
8811 status &= ~mask;
8812 }
8813 }
8814
8815 if (__predict_false(status & 0x1)) {
8816 /* acknowledge and disable further slowpath interrupts */
8817 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8818
8819 /* schedule slowpath handler */
8820 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8821
8822 status &= ~0x1;
8823 }
8824
8825 if (__predict_false(status)) {
8826 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8827 }
8828 }
8829
8830 /* slowpath interrupt entry point */
8831 static void
8832 bxe_intr_sp(void *xsc)
8833 {
8834 struct bxe_softc *sc = (struct bxe_softc *)xsc;
8835
8836 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8837
8838 /* acknowledge and disable further slowpath interrupts */
8839 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8840
8841 /* schedule slowpath handler */
8842 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8843 }
8844
8845 /* fastpath interrupt entry point */
8846 static void
8847 bxe_intr_fp(void *xfp)
8848 {
8849 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8850 struct bxe_softc *sc = fp->sc;
8851
8852 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8853
8854 BLOGD(sc, DBG_INTR,
8855 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8856 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8857
8858 /* acknowledge and disable further fastpath interrupts */
8859 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8860
8861 bxe_task_fp(fp);
8862 }
8863
8864 /* Release all interrupts allocated by the driver. */
8865 static void
8866 bxe_interrupt_free(struct bxe_softc *sc)
8867 {
8868 int i;
8869
8870 switch (sc->interrupt_mode) {
8871 case INTR_MODE_INTX:
8872 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8873 if (sc->intr[0].resource != NULL) {
8874 bus_release_resource(sc->dev,
8875 SYS_RES_IRQ,
8876 sc->intr[0].rid,
8877 sc->intr[0].resource);
8878 }
8879 break;
8880 case INTR_MODE_MSI:
8881 for (i = 0; i < sc->intr_count; i++) {
8882 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8883 if (sc->intr[i].resource && sc->intr[i].rid) {
8884 bus_release_resource(sc->dev,
8885 SYS_RES_IRQ,
8886 sc->intr[i].rid,
8887 sc->intr[i].resource);
8888 }
8889 }
8890 pci_release_msi(sc->dev);
8891 break;
8892 case INTR_MODE_MSIX:
8893 for (i = 0; i < sc->intr_count; i++) {
8894 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8895 if (sc->intr[i].resource && sc->intr[i].rid) {
8896 bus_release_resource(sc->dev,
8897 SYS_RES_IRQ,
8898 sc->intr[i].rid,
8899 sc->intr[i].resource);
8900 }
8901 }
8902 pci_release_msi(sc->dev);
8903 break;
8904 default:
8905 /* nothing to do as initial allocation failed */
8906 break;
8907 }
8908 }
8909
8910 /*
8911 * This function determines and allocates the appropriate
8912 * interrupt based on system capabilites and user request.
8913 *
8914 * The user may force a particular interrupt mode, specify
8915 * the number of receive queues, specify the method for
8916 * distribuitng received frames to receive queues, or use
8917 * the default settings which will automatically select the
8918 * best supported combination. In addition, the OS may or
8919 * may not support certain combinations of these settings.
8920 * This routine attempts to reconcile the settings requested
8921 * by the user with the capabilites available from the system
8922 * to select the optimal combination of features.
8923 *
8924 * Returns:
8925 * 0 = Success, !0 = Failure.
8926 */
8927 static int
8928 bxe_interrupt_alloc(struct bxe_softc *sc)
8929 {
8930 int msix_count = 0;
8931 int msi_count = 0;
8932 int num_requested = 0;
8933 int num_allocated = 0;
8934 int rid, i, j;
8935 int rc;
8936
8937 /* get the number of available MSI/MSI-X interrupts from the OS */
8938 if (sc->interrupt_mode > 0) {
8939 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8940 msix_count = pci_msix_count(sc->dev);
8941 }
8942
8943 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8944 msi_count = pci_msi_count(sc->dev);
8945 }
8946
8947 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8948 msi_count, msix_count);
8949 }
8950
8951 do { /* try allocating MSI-X interrupt resources (at least 2) */
8952 if (sc->interrupt_mode != INTR_MODE_MSIX) {
8953 break;
8954 }
8955
8956 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8957 (msix_count < 2)) {
8958 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8959 break;
8960 }
8961
8962 /* ask for the necessary number of MSI-X vectors */
8963 num_requested = min((sc->num_queues + 1), msix_count);
8964
8965 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8966
8967 num_allocated = num_requested;
8968 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8969 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8970 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8971 break;
8972 }
8973
8974 if (num_allocated < 2) { /* possible? */
8975 BLOGE(sc, "MSI-X allocation less than 2!\n");
8976 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8977 pci_release_msi(sc->dev);
8978 break;
8979 }
8980
8981 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8982 num_requested, num_allocated);
8983
8984 /* best effort so use the number of vectors allocated to us */
8985 sc->intr_count = num_allocated;
8986 sc->num_queues = num_allocated - 1;
8987
8988 rid = 1; /* initial resource identifier */
8989
8990 /* allocate the MSI-X vectors */
8991 for (i = 0; i < num_allocated; i++) {
8992 sc->intr[i].rid = (rid + i);
8993
8994 if ((sc->intr[i].resource =
8995 bus_alloc_resource_any(sc->dev,
8996 SYS_RES_IRQ,
8997 &sc->intr[i].rid,
8998 RF_ACTIVE)) == NULL) {
8999 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
9000 i, (rid + i));
9001
9002 for (j = (i - 1); j >= 0; j--) {
9003 bus_release_resource(sc->dev,
9004 SYS_RES_IRQ,
9005 sc->intr[j].rid,
9006 sc->intr[j].resource);
9007 }
9008
9009 sc->intr_count = 0;
9010 sc->num_queues = 0;
9011 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9012 pci_release_msi(sc->dev);
9013 break;
9014 }
9015
9016 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9017 }
9018 } while (0);
9019
9020 do { /* try allocating MSI vector resources (at least 2) */
9021 if (sc->interrupt_mode != INTR_MODE_MSI) {
9022 break;
9023 }
9024
9025 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9026 (msi_count < 1)) {
9027 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9028 break;
9029 }
9030
9031 /* ask for a single MSI vector */
9032 num_requested = 1;
9033
9034 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9035
9036 num_allocated = num_requested;
9037 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9038 BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9039 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9040 break;
9041 }
9042
9043 if (num_allocated != 1) { /* possible? */
9044 BLOGE(sc, "MSI allocation is not 1!\n");
9045 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9046 pci_release_msi(sc->dev);
9047 break;
9048 }
9049
9050 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9051 num_requested, num_allocated);
9052
9053 /* best effort so use the number of vectors allocated to us */
9054 sc->intr_count = num_allocated;
9055 sc->num_queues = num_allocated;
9056
9057 rid = 1; /* initial resource identifier */
9058
9059 sc->intr[0].rid = rid;
9060
9061 if ((sc->intr[0].resource =
9062 bus_alloc_resource_any(sc->dev,
9063 SYS_RES_IRQ,
9064 &sc->intr[0].rid,
9065 RF_ACTIVE)) == NULL) {
9066 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9067 sc->intr_count = 0;
9068 sc->num_queues = 0;
9069 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9070 pci_release_msi(sc->dev);
9071 break;
9072 }
9073
9074 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9075 } while (0);
9076
9077 do { /* try allocating INTx vector resources */
9078 if (sc->interrupt_mode != INTR_MODE_INTX) {
9079 break;
9080 }
9081
9082 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9083
9084 /* only one vector for INTx */
9085 sc->intr_count = 1;
9086 sc->num_queues = 1;
9087
9088 rid = 0; /* initial resource identifier */
9089
9090 sc->intr[0].rid = rid;
9091
9092 if ((sc->intr[0].resource =
9093 bus_alloc_resource_any(sc->dev,
9094 SYS_RES_IRQ,
9095 &sc->intr[0].rid,
9096 (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9097 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9098 sc->intr_count = 0;
9099 sc->num_queues = 0;
9100 sc->interrupt_mode = -1; /* Failed! */
9101 break;
9102 }
9103
9104 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9105 } while (0);
9106
9107 if (sc->interrupt_mode == -1) {
9108 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9109 rc = 1;
9110 } else {
9111 BLOGD(sc, DBG_LOAD,
9112 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9113 sc->interrupt_mode, sc->num_queues);
9114 rc = 0;
9115 }
9116
9117 return (rc);
9118 }
9119
9120 static void
9121 bxe_interrupt_detach(struct bxe_softc *sc)
9122 {
9123 struct bxe_fastpath *fp;
9124 int i;
9125
9126 /* release interrupt resources */
9127 for (i = 0; i < sc->intr_count; i++) {
9128 if (sc->intr[i].resource && sc->intr[i].tag) {
9129 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9130 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9131 }
9132 }
9133
9134 for (i = 0; i < sc->num_queues; i++) {
9135 fp = &sc->fp[i];
9136 if (fp->tq) {
9137 taskqueue_drain(fp->tq, &fp->tq_task);
9138 taskqueue_drain(fp->tq, &fp->tx_task);
9139 while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9140 NULL))
9141 taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9142 }
9143
9144 for (i = 0; i < sc->num_queues; i++) {
9145 fp = &sc->fp[i];
9146 if (fp->tq != NULL) {
9147 taskqueue_free(fp->tq);
9148 fp->tq = NULL;
9149 }
9150 }
9151 }
9152
9153 if (sc->sp_tq) {
9154 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9155 taskqueue_free(sc->sp_tq);
9156 sc->sp_tq = NULL;
9157 }
9158 }
9159
9160 /*
9161 * Enables interrupts and attach to the ISR.
9162 *
9163 * When using multiple MSI/MSI-X vectors the first vector
9164 * is used for slowpath operations while all remaining
9165 * vectors are used for fastpath operations. If only a
9166 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9167 * ISR must look for both slowpath and fastpath completions.
9168 */
9169 static int
9170 bxe_interrupt_attach(struct bxe_softc *sc)
9171 {
9172 struct bxe_fastpath *fp;
9173 int rc = 0;
9174 int i;
9175
9176 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9177 "bxe%d_sp_tq", sc->unit);
9178 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9179 sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9180 taskqueue_thread_enqueue,
9181 &sc->sp_tq);
9182 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9183 "%s", sc->sp_tq_name);
9184
9185
9186 for (i = 0; i < sc->num_queues; i++) {
9187 fp = &sc->fp[i];
9188 snprintf(fp->tq_name, sizeof(fp->tq_name),
9189 "bxe%d_fp%d_tq", sc->unit, i);
9190 NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9191 TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9192 fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9193 taskqueue_thread_enqueue,
9194 &fp->tq);
9195 TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9196 bxe_tx_mq_start_deferred, fp);
9197 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9198 "%s", fp->tq_name);
9199 }
9200
9201 /* setup interrupt handlers */
9202 if (sc->interrupt_mode == INTR_MODE_MSIX) {
9203 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9204
9205 /*
9206 * Setup the interrupt handler. Note that we pass the driver instance
9207 * to the interrupt handler for the slowpath.
9208 */
9209 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9210 (INTR_TYPE_NET | INTR_MPSAFE),
9211 NULL, bxe_intr_sp, sc,
9212 &sc->intr[0].tag)) != 0) {
9213 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9214 goto bxe_interrupt_attach_exit;
9215 }
9216
9217 bus_describe_intr(sc->dev, sc->intr[0].resource,
9218 sc->intr[0].tag, "sp");
9219
9220 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9221
9222 /* initialize the fastpath vectors (note the first was used for sp) */
9223 for (i = 0; i < sc->num_queues; i++) {
9224 fp = &sc->fp[i];
9225 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9226
9227 /*
9228 * Setup the interrupt handler. Note that we pass the
9229 * fastpath context to the interrupt handler in this
9230 * case.
9231 */
9232 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9233 (INTR_TYPE_NET | INTR_MPSAFE),
9234 NULL, bxe_intr_fp, fp,
9235 &sc->intr[i + 1].tag)) != 0) {
9236 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9237 (i + 1), rc);
9238 goto bxe_interrupt_attach_exit;
9239 }
9240
9241 bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9242 sc->intr[i + 1].tag, "fp%02d", i);
9243
9244 /* bind the fastpath instance to a cpu */
9245 if (sc->num_queues > 1) {
9246 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9247 }
9248
9249 fp->state = BXE_FP_STATE_IRQ;
9250 }
9251 } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9252 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9253
9254 /*
9255 * Setup the interrupt handler. Note that we pass the
9256 * driver instance to the interrupt handler which
9257 * will handle both the slowpath and fastpath.
9258 */
9259 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9260 (INTR_TYPE_NET | INTR_MPSAFE),
9261 NULL, bxe_intr_legacy, sc,
9262 &sc->intr[0].tag)) != 0) {
9263 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9264 goto bxe_interrupt_attach_exit;
9265 }
9266
9267 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9268 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9269
9270 /*
9271 * Setup the interrupt handler. Note that we pass the
9272 * driver instance to the interrupt handler which
9273 * will handle both the slowpath and fastpath.
9274 */
9275 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9276 (INTR_TYPE_NET | INTR_MPSAFE),
9277 NULL, bxe_intr_legacy, sc,
9278 &sc->intr[0].tag)) != 0) {
9279 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9280 goto bxe_interrupt_attach_exit;
9281 }
9282 }
9283
9284 bxe_interrupt_attach_exit:
9285
9286 return (rc);
9287 }
9288
9289 static int bxe_init_hw_common_chip(struct bxe_softc *sc);
9290 static int bxe_init_hw_common(struct bxe_softc *sc);
9291 static int bxe_init_hw_port(struct bxe_softc *sc);
9292 static int bxe_init_hw_func(struct bxe_softc *sc);
9293 static void bxe_reset_common(struct bxe_softc *sc);
9294 static void bxe_reset_port(struct bxe_softc *sc);
9295 static void bxe_reset_func(struct bxe_softc *sc);
9296 static int bxe_gunzip_init(struct bxe_softc *sc);
9297 static void bxe_gunzip_end(struct bxe_softc *sc);
9298 static int bxe_init_firmware(struct bxe_softc *sc);
9299 static void bxe_release_firmware(struct bxe_softc *sc);
9300
9301 static struct
9302 ecore_func_sp_drv_ops bxe_func_sp_drv = {
9303 .init_hw_cmn_chip = bxe_init_hw_common_chip,
9304 .init_hw_cmn = bxe_init_hw_common,
9305 .init_hw_port = bxe_init_hw_port,
9306 .init_hw_func = bxe_init_hw_func,
9307
9308 .reset_hw_cmn = bxe_reset_common,
9309 .reset_hw_port = bxe_reset_port,
9310 .reset_hw_func = bxe_reset_func,
9311
9312 .gunzip_init = bxe_gunzip_init,
9313 .gunzip_end = bxe_gunzip_end,
9314
9315 .init_fw = bxe_init_firmware,
9316 .release_fw = bxe_release_firmware,
9317 };
9318
9319 static void
9320 bxe_init_func_obj(struct bxe_softc *sc)
9321 {
9322 sc->dmae_ready = 0;
9323
9324 ecore_init_func_obj(sc,
9325 &sc->func_obj,
9326 BXE_SP(sc, func_rdata),
9327 BXE_SP_MAPPING(sc, func_rdata),
9328 BXE_SP(sc, func_afex_rdata),
9329 BXE_SP_MAPPING(sc, func_afex_rdata),
9330 &bxe_func_sp_drv);
9331 }
9332
9333 static int
9334 bxe_init_hw(struct bxe_softc *sc,
9335 uint32_t load_code)
9336 {
9337 struct ecore_func_state_params func_params = { NULL };
9338 int rc;
9339
9340 /* prepare the parameters for function state transitions */
9341 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9342
9343 func_params.f_obj = &sc->func_obj;
9344 func_params.cmd = ECORE_F_CMD_HW_INIT;
9345
9346 func_params.params.hw_init.load_phase = load_code;
9347
9348 /*
9349 * Via a plethora of function pointers, we will eventually reach
9350 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9351 */
9352 rc = ecore_func_state_change(sc, &func_params);
9353
9354 return (rc);
9355 }
9356
9357 static void
9358 bxe_fill(struct bxe_softc *sc,
9359 uint32_t addr,
9360 int fill,
9361 uint32_t len)
9362 {
9363 uint32_t i;
9364
9365 if (!(len % 4) && !(addr % 4)) {
9366 for (i = 0; i < len; i += 4) {
9367 REG_WR(sc, (addr + i), fill);
9368 }
9369 } else {
9370 for (i = 0; i < len; i++) {
9371 REG_WR8(sc, (addr + i), fill);
9372 }
9373 }
9374 }
9375
9376 /* writes FP SP data to FW - data_size in dwords */
9377 static void
9378 bxe_wr_fp_sb_data(struct bxe_softc *sc,
9379 int fw_sb_id,
9380 uint32_t *sb_data_p,
9381 uint32_t data_size)
9382 {
9383 int index;
9384
9385 for (index = 0; index < data_size; index++) {
9386 REG_WR(sc,
9387 (BAR_CSTRORM_INTMEM +
9388 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9389 (sizeof(uint32_t) * index)),
9390 *(sb_data_p + index));
9391 }
9392 }
9393
9394 static void
9395 bxe_zero_fp_sb(struct bxe_softc *sc,
9396 int fw_sb_id)
9397 {
9398 struct hc_status_block_data_e2 sb_data_e2;
9399 struct hc_status_block_data_e1x sb_data_e1x;
9400 uint32_t *sb_data_p;
9401 uint32_t data_size = 0;
9402
9403 if (!CHIP_IS_E1x(sc)) {
9404 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9405 sb_data_e2.common.state = SB_DISABLED;
9406 sb_data_e2.common.p_func.vf_valid = FALSE;
9407 sb_data_p = (uint32_t *)&sb_data_e2;
9408 data_size = (sizeof(struct hc_status_block_data_e2) /
9409 sizeof(uint32_t));
9410 } else {
9411 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9412 sb_data_e1x.common.state = SB_DISABLED;
9413 sb_data_e1x.common.p_func.vf_valid = FALSE;
9414 sb_data_p = (uint32_t *)&sb_data_e1x;
9415 data_size = (sizeof(struct hc_status_block_data_e1x) /
9416 sizeof(uint32_t));
9417 }
9418
9419 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9420
9421 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9422 0, CSTORM_STATUS_BLOCK_SIZE);
9423 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9424 0, CSTORM_SYNC_BLOCK_SIZE);
9425 }
9426
9427 static void
9428 bxe_wr_sp_sb_data(struct bxe_softc *sc,
9429 struct hc_sp_status_block_data *sp_sb_data)
9430 {
9431 int i;
9432
9433 for (i = 0;
9434 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9435 i++) {
9436 REG_WR(sc,
9437 (BAR_CSTRORM_INTMEM +
9438 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9439 (i * sizeof(uint32_t))),
9440 *((uint32_t *)sp_sb_data + i));
9441 }
9442 }
9443
9444 static void
9445 bxe_zero_sp_sb(struct bxe_softc *sc)
9446 {
9447 struct hc_sp_status_block_data sp_sb_data;
9448
9449 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9450
9451 sp_sb_data.state = SB_DISABLED;
9452 sp_sb_data.p_func.vf_valid = FALSE;
9453
9454 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9455
9456 bxe_fill(sc,
9457 (BAR_CSTRORM_INTMEM +
9458 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9459 0, CSTORM_SP_STATUS_BLOCK_SIZE);
9460 bxe_fill(sc,
9461 (BAR_CSTRORM_INTMEM +
9462 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9463 0, CSTORM_SP_SYNC_BLOCK_SIZE);
9464 }
9465
9466 static void
9467 bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9468 int igu_sb_id,
9469 int igu_seg_id)
9470 {
9471 hc_sm->igu_sb_id = igu_sb_id;
9472 hc_sm->igu_seg_id = igu_seg_id;
9473 hc_sm->timer_value = 0xFF;
9474 hc_sm->time_to_expire = 0xFFFFFFFF;
9475 }
9476
9477 static void
9478 bxe_map_sb_state_machines(struct hc_index_data *index_data)
9479 {
9480 /* zero out state machine indices */
9481
9482 /* rx indices */
9483 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9484
9485 /* tx indices */
9486 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9487 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9488 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9489 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9490
9491 /* map indices */
9492
9493 /* rx indices */
9494 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9495 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9496
9497 /* tx indices */
9498 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9499 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9500 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9501 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9502 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9503 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9504 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9505 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9506 }
9507
9508 static void
9509 bxe_init_sb(struct bxe_softc *sc,
9510 bus_addr_t busaddr,
9511 int vfid,
9512 uint8_t vf_valid,
9513 int fw_sb_id,
9514 int igu_sb_id)
9515 {
9516 struct hc_status_block_data_e2 sb_data_e2;
9517 struct hc_status_block_data_e1x sb_data_e1x;
9518 struct hc_status_block_sm *hc_sm_p;
9519 uint32_t *sb_data_p;
9520 int igu_seg_id;
9521 int data_size;
9522
9523 if (CHIP_INT_MODE_IS_BC(sc)) {
9524 igu_seg_id = HC_SEG_ACCESS_NORM;
9525 } else {
9526 igu_seg_id = IGU_SEG_ACCESS_NORM;
9527 }
9528
9529 bxe_zero_fp_sb(sc, fw_sb_id);
9530
9531 if (!CHIP_IS_E1x(sc)) {
9532 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9533 sb_data_e2.common.state = SB_ENABLED;
9534 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9535 sb_data_e2.common.p_func.vf_id = vfid;
9536 sb_data_e2.common.p_func.vf_valid = vf_valid;
9537 sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9538 sb_data_e2.common.same_igu_sb_1b = TRUE;
9539 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9540 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9541 hc_sm_p = sb_data_e2.common.state_machine;
9542 sb_data_p = (uint32_t *)&sb_data_e2;
9543 data_size = (sizeof(struct hc_status_block_data_e2) /
9544 sizeof(uint32_t));
9545 bxe_map_sb_state_machines(sb_data_e2.index_data);
9546 } else {
9547 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9548 sb_data_e1x.common.state = SB_ENABLED;
9549 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9550 sb_data_e1x.common.p_func.vf_id = 0xff;
9551 sb_data_e1x.common.p_func.vf_valid = FALSE;
9552 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9553 sb_data_e1x.common.same_igu_sb_1b = TRUE;
9554 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9555 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9556 hc_sm_p = sb_data_e1x.common.state_machine;
9557 sb_data_p = (uint32_t *)&sb_data_e1x;
9558 data_size = (sizeof(struct hc_status_block_data_e1x) /
9559 sizeof(uint32_t));
9560 bxe_map_sb_state_machines(sb_data_e1x.index_data);
9561 }
9562
9563 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9564 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9565
9566 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9567
9568 /* write indices to HW - PCI guarantees endianity of regpairs */
9569 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9570 }
9571
9572 static inline uint8_t
9573 bxe_fp_qzone_id(struct bxe_fastpath *fp)
9574 {
9575 if (CHIP_IS_E1x(fp->sc)) {
9576 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9577 } else {
9578 return (fp->cl_id);
9579 }
9580 }
9581
9582 static inline uint32_t
9583 bxe_rx_ustorm_prods_offset(struct bxe_softc *sc,
9584 struct bxe_fastpath *fp)
9585 {
9586 uint32_t offset = BAR_USTRORM_INTMEM;
9587
9588 if (!CHIP_IS_E1x(sc)) {
9589 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9590 } else {
9591 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9592 }
9593
9594 return (offset);
9595 }
9596
9597 static void
9598 bxe_init_eth_fp(struct bxe_softc *sc,
9599 int idx)
9600 {
9601 struct bxe_fastpath *fp = &sc->fp[idx];
9602 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9603 unsigned long q_type = 0;
9604 int cos;
9605
9606 fp->sc = sc;
9607 fp->index = idx;
9608
9609 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9610 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9611
9612 fp->cl_id = (CHIP_IS_E1x(sc)) ?
9613 (SC_L_ID(sc) + idx) :
9614 /* want client ID same as IGU SB ID for non-E1 */
9615 fp->igu_sb_id;
9616 fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9617
9618 /* setup sb indices */
9619 if (!CHIP_IS_E1x(sc)) {
9620 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values;
9621 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9622 } else {
9623 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values;
9624 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9625 }
9626
9627 /* init shortcut */
9628 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9629
9630 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9631
9632 /*
9633 * XXX If multiple CoS is ever supported then each fastpath structure
9634 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9635 */
9636 for (cos = 0; cos < sc->max_cos; cos++) {
9637 cids[cos] = idx;
9638 }
9639 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9640
9641 /* nothing more for a VF to do */
9642 if (IS_VF(sc)) {
9643 return;
9644 }
9645
9646 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9647 fp->fw_sb_id, fp->igu_sb_id);
9648
9649 bxe_update_fp_sb_idx(fp);
9650
9651 /* Configure Queue State object */
9652 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9653 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9654
9655 ecore_init_queue_obj(sc,
9656 &sc->sp_objs[idx].q_obj,
9657 fp->cl_id,
9658 cids,
9659 sc->max_cos,
9660 SC_FUNC(sc),
9661 BXE_SP(sc, q_rdata),
9662 BXE_SP_MAPPING(sc, q_rdata),
9663 q_type);
9664
9665 /* configure classification DBs */
9666 ecore_init_mac_obj(sc,
9667 &sc->sp_objs[idx].mac_obj,
9668 fp->cl_id,
9669 idx,
9670 SC_FUNC(sc),
9671 BXE_SP(sc, mac_rdata),
9672 BXE_SP_MAPPING(sc, mac_rdata),
9673 ECORE_FILTER_MAC_PENDING,
9674 &sc->sp_state,
9675 ECORE_OBJ_TYPE_RX_TX,
9676 &sc->macs_pool);
9677
9678 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9679 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9680 }
9681
9682 static inline void
9683 bxe_update_rx_prod(struct bxe_softc *sc,
9684 struct bxe_fastpath *fp,
9685 uint16_t rx_bd_prod,
9686 uint16_t rx_cq_prod,
9687 uint16_t rx_sge_prod)
9688 {
9689 struct ustorm_eth_rx_producers rx_prods = { 0 };
9690 uint32_t i;
9691
9692 /* update producers */
9693 rx_prods.bd_prod = rx_bd_prod;
9694 rx_prods.cqe_prod = rx_cq_prod;
9695 rx_prods.sge_prod = rx_sge_prod;
9696
9697 /*
9698 * Make sure that the BD and SGE data is updated before updating the
9699 * producers since FW might read the BD/SGE right after the producer
9700 * is updated.
9701 * This is only applicable for weak-ordered memory model archs such
9702 * as IA-64. The following barrier is also mandatory since FW will
9703 * assumes BDs must have buffers.
9704 */
9705 wmb();
9706
9707 for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9708 REG_WR(sc,
9709 (fp->ustorm_rx_prods_offset + (i * 4)),
9710 ((uint32_t *)&rx_prods)[i]);
9711 }
9712
9713 wmb(); /* keep prod updates ordered */
9714
9715 BLOGD(sc, DBG_RX,
9716 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9717 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9718 }
9719
9720 static void
9721 bxe_init_rx_rings(struct bxe_softc *sc)
9722 {
9723 struct bxe_fastpath *fp;
9724 int i;
9725
9726 for (i = 0; i < sc->num_queues; i++) {
9727 fp = &sc->fp[i];
9728
9729 fp->rx_bd_cons = 0;
9730
9731 /*
9732 * Activate the BD ring...
9733 * Warning, this will generate an interrupt (to the TSTORM)
9734 * so this can only be done after the chip is initialized
9735 */
9736 bxe_update_rx_prod(sc, fp,
9737 fp->rx_bd_prod,
9738 fp->rx_cq_prod,
9739 fp->rx_sge_prod);
9740
9741 if (i != 0) {
9742 continue;
9743 }
9744
9745 if (CHIP_IS_E1(sc)) {
9746 REG_WR(sc,
9747 (BAR_USTRORM_INTMEM +
9748 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9749 U64_LO(fp->rcq_dma.paddr));
9750 REG_WR(sc,
9751 (BAR_USTRORM_INTMEM +
9752 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9753 U64_HI(fp->rcq_dma.paddr));
9754 }
9755 }
9756 }
9757
9758 static void
9759 bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9760 {
9761 SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9762 fp->tx_db.data.zero_fill1 = 0;
9763 fp->tx_db.data.prod = 0;
9764
9765 fp->tx_pkt_prod = 0;
9766 fp->tx_pkt_cons = 0;
9767 fp->tx_bd_prod = 0;
9768 fp->tx_bd_cons = 0;
9769 fp->eth_q_stats.tx_pkts = 0;
9770 }
9771
9772 static inline void
9773 bxe_init_tx_rings(struct bxe_softc *sc)
9774 {
9775 int i;
9776
9777 for (i = 0; i < sc->num_queues; i++) {
9778 bxe_init_tx_ring_one(&sc->fp[i]);
9779 }
9780 }
9781
9782 static void
9783 bxe_init_def_sb(struct bxe_softc *sc)
9784 {
9785 struct host_sp_status_block *def_sb = sc->def_sb;
9786 bus_addr_t mapping = sc->def_sb_dma.paddr;
9787 int igu_sp_sb_index;
9788 int igu_seg_id;
9789 int port = SC_PORT(sc);
9790 int func = SC_FUNC(sc);
9791 int reg_offset, reg_offset_en5;
9792 uint64_t section;
9793 int index, sindex;
9794 struct hc_sp_status_block_data sp_sb_data;
9795
9796 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9797
9798 if (CHIP_INT_MODE_IS_BC(sc)) {
9799 igu_sp_sb_index = DEF_SB_IGU_ID;
9800 igu_seg_id = HC_SEG_ACCESS_DEF;
9801 } else {
9802 igu_sp_sb_index = sc->igu_dsb_id;
9803 igu_seg_id = IGU_SEG_ACCESS_DEF;
9804 }
9805
9806 /* attentions */
9807 section = ((uint64_t)mapping +
9808 offsetof(struct host_sp_status_block, atten_status_block));
9809 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9810 sc->attn_state = 0;
9811
9812 reg_offset = (port) ?
9813 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9814 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9815 reg_offset_en5 = (port) ?
9816 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9817 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9818
9819 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9820 /* take care of sig[0]..sig[4] */
9821 for (sindex = 0; sindex < 4; sindex++) {
9822 sc->attn_group[index].sig[sindex] =
9823 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9824 }
9825
9826 if (!CHIP_IS_E1x(sc)) {
9827 /*
9828 * enable5 is separate from the rest of the registers,
9829 * and the address skip is 4 and not 16 between the
9830 * different groups
9831 */
9832 sc->attn_group[index].sig[4] =
9833 REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9834 } else {
9835 sc->attn_group[index].sig[4] = 0;
9836 }
9837 }
9838
9839 if (sc->devinfo.int_block == INT_BLOCK_HC) {
9840 reg_offset = (port) ?
9841 HC_REG_ATTN_MSG1_ADDR_L :
9842 HC_REG_ATTN_MSG0_ADDR_L;
9843 REG_WR(sc, reg_offset, U64_LO(section));
9844 REG_WR(sc, (reg_offset + 4), U64_HI(section));
9845 } else if (!CHIP_IS_E1x(sc)) {
9846 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9847 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9848 }
9849
9850 section = ((uint64_t)mapping +
9851 offsetof(struct host_sp_status_block, sp_sb));
9852
9853 bxe_zero_sp_sb(sc);
9854
9855 /* PCI guarantees endianity of regpair */
9856 sp_sb_data.state = SB_ENABLED;
9857 sp_sb_data.host_sb_addr.lo = U64_LO(section);
9858 sp_sb_data.host_sb_addr.hi = U64_HI(section);
9859 sp_sb_data.igu_sb_id = igu_sp_sb_index;
9860 sp_sb_data.igu_seg_id = igu_seg_id;
9861 sp_sb_data.p_func.pf_id = func;
9862 sp_sb_data.p_func.vnic_id = SC_VN(sc);
9863 sp_sb_data.p_func.vf_id = 0xff;
9864
9865 bxe_wr_sp_sb_data(sc, &sp_sb_data);
9866
9867 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9868 }
9869
9870 static void
9871 bxe_init_sp_ring(struct bxe_softc *sc)
9872 {
9873 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9874 sc->spq_prod_idx = 0;
9875 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9876 sc->spq_prod_bd = sc->spq;
9877 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9878 }
9879
9880 static void
9881 bxe_init_eq_ring(struct bxe_softc *sc)
9882 {
9883 union event_ring_elem *elem;
9884 int i;
9885
9886 for (i = 1; i <= NUM_EQ_PAGES; i++) {
9887 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9888
9889 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9890 BCM_PAGE_SIZE *
9891 (i % NUM_EQ_PAGES)));
9892 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9893 BCM_PAGE_SIZE *
9894 (i % NUM_EQ_PAGES)));
9895 }
9896
9897 sc->eq_cons = 0;
9898 sc->eq_prod = NUM_EQ_DESC;
9899 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9900
9901 atomic_store_rel_long(&sc->eq_spq_left,
9902 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9903 NUM_EQ_DESC) - 1));
9904 }
9905
9906 static void
9907 bxe_init_internal_common(struct bxe_softc *sc)
9908 {
9909 int i;
9910
9911 /*
9912 * Zero this manually as its initialization is currently missing
9913 * in the initTool.
9914 */
9915 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9916 REG_WR(sc,
9917 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9918 0);
9919 }
9920
9921 if (!CHIP_IS_E1x(sc)) {
9922 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9923 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9924 }
9925 }
9926
9927 static void
9928 bxe_init_internal(struct bxe_softc *sc,
9929 uint32_t load_code)
9930 {
9931 switch (load_code) {
9932 case FW_MSG_CODE_DRV_LOAD_COMMON:
9933 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9934 bxe_init_internal_common(sc);
9935 /* no break */
9936
9937 case FW_MSG_CODE_DRV_LOAD_PORT:
9938 /* nothing to do */
9939 /* no break */
9940
9941 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9942 /* internal memory per function is initialized inside bxe_pf_init */
9943 break;
9944
9945 default:
9946 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9947 break;
9948 }
9949 }
9950
9951 static void
9952 storm_memset_func_cfg(struct bxe_softc *sc,
9953 struct tstorm_eth_function_common_config *tcfg,
9954 uint16_t abs_fid)
9955 {
9956 uint32_t addr;
9957 size_t size;
9958
9959 addr = (BAR_TSTRORM_INTMEM +
9960 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9961 size = sizeof(struct tstorm_eth_function_common_config);
9962 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9963 }
9964
9965 static void
9966 bxe_func_init(struct bxe_softc *sc,
9967 struct bxe_func_init_params *p)
9968 {
9969 struct tstorm_eth_function_common_config tcfg = { 0 };
9970
9971 if (CHIP_IS_E1x(sc)) {
9972 storm_memset_func_cfg(sc, &tcfg, p->func_id);
9973 }
9974
9975 /* Enable the function in the FW */
9976 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9977 storm_memset_func_en(sc, p->func_id, 1);
9978
9979 /* spq */
9980 if (p->func_flgs & FUNC_FLG_SPQ) {
9981 storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9982 REG_WR(sc,
9983 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9984 p->spq_prod);
9985 }
9986 }
9987
9988 /*
9989 * Calculates the sum of vn_min_rates.
9990 * It's needed for further normalizing of the min_rates.
9991 * Returns:
9992 * sum of vn_min_rates.
9993 * or
9994 * 0 - if all the min_rates are 0.
9995 * In the later case fainess algorithm should be deactivated.
9996 * If all min rates are not zero then those that are zeroes will be set to 1.
9997 */
9998 static void
9999 bxe_calc_vn_min(struct bxe_softc *sc,
10000 struct cmng_init_input *input)
10001 {
10002 uint32_t vn_cfg;
10003 uint32_t vn_min_rate;
10004 int all_zero = 1;
10005 int vn;
10006
10007 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10008 vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10009 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10010 FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10011
10012 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10013 /* skip hidden VNs */
10014 vn_min_rate = 0;
10015 } else if (!vn_min_rate) {
10016 /* If min rate is zero - set it to 100 */
10017 vn_min_rate = DEF_MIN_RATE;
10018 } else {
10019 all_zero = 0;
10020 }
10021
10022 input->vnic_min_rate[vn] = vn_min_rate;
10023 }
10024
10025 /* if ETS or all min rates are zeros - disable fairness */
10026 if (BXE_IS_ETS_ENABLED(sc)) {
10027 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10028 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10029 } else if (all_zero) {
10030 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10031 BLOGD(sc, DBG_LOAD,
10032 "Fariness disabled (all MIN values are zeroes)\n");
10033 } else {
10034 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10035 }
10036 }
10037
10038 static inline uint16_t
10039 bxe_extract_max_cfg(struct bxe_softc *sc,
10040 uint32_t mf_cfg)
10041 {
10042 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10043 FUNC_MF_CFG_MAX_BW_SHIFT);
10044
10045 if (!max_cfg) {
10046 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10047 max_cfg = 100;
10048 }
10049
10050 return (max_cfg);
10051 }
10052
10053 static void
10054 bxe_calc_vn_max(struct bxe_softc *sc,
10055 int vn,
10056 struct cmng_init_input *input)
10057 {
10058 uint16_t vn_max_rate;
10059 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10060 uint32_t max_cfg;
10061
10062 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10063 vn_max_rate = 0;
10064 } else {
10065 max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10066
10067 if (IS_MF_SI(sc)) {
10068 /* max_cfg in percents of linkspeed */
10069 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10070 } else { /* SD modes */
10071 /* max_cfg is absolute in 100Mb units */
10072 vn_max_rate = (max_cfg * 100);
10073 }
10074 }
10075
10076 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10077
10078 input->vnic_max_rate[vn] = vn_max_rate;
10079 }
10080
10081 static void
10082 bxe_cmng_fns_init(struct bxe_softc *sc,
10083 uint8_t read_cfg,
10084 uint8_t cmng_type)
10085 {
10086 struct cmng_init_input input;
10087 int vn;
10088
10089 memset(&input, 0, sizeof(struct cmng_init_input));
10090
10091 input.port_rate = sc->link_vars.line_speed;
10092
10093 if (cmng_type == CMNG_FNS_MINMAX) {
10094 /* read mf conf from shmem */
10095 if (read_cfg) {
10096 bxe_read_mf_cfg(sc);
10097 }
10098
10099 /* get VN min rate and enable fairness if not 0 */
10100 bxe_calc_vn_min(sc, &input);
10101
10102 /* get VN max rate */
10103 if (sc->port.pmf) {
10104 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10105 bxe_calc_vn_max(sc, vn, &input);
10106 }
10107 }
10108
10109 /* always enable rate shaping and fairness */
10110 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10111
10112 ecore_init_cmng(&input, &sc->cmng);
10113 return;
10114 }
10115
10116 /* rate shaping and fairness are disabled */
10117 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10118 }
10119
10120 static int
10121 bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10122 {
10123 if (CHIP_REV_IS_SLOW(sc)) {
10124 return (CMNG_FNS_NONE);
10125 }
10126
10127 if (IS_MF(sc)) {
10128 return (CMNG_FNS_MINMAX);
10129 }
10130
10131 return (CMNG_FNS_NONE);
10132 }
10133
10134 static void
10135 storm_memset_cmng(struct bxe_softc *sc,
10136 struct cmng_init *cmng,
10137 uint8_t port)
10138 {
10139 int vn;
10140 int func;
10141 uint32_t addr;
10142 size_t size;
10143
10144 addr = (BAR_XSTRORM_INTMEM +
10145 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10146 size = sizeof(struct cmng_struct_per_port);
10147 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10148
10149 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10150 func = func_by_vn(sc, vn);
10151
10152 addr = (BAR_XSTRORM_INTMEM +
10153 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10154 size = sizeof(struct rate_shaping_vars_per_vn);
10155 ecore_storm_memset_struct(sc, addr, size,
10156 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10157
10158 addr = (BAR_XSTRORM_INTMEM +
10159 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10160 size = sizeof(struct fairness_vars_per_vn);
10161 ecore_storm_memset_struct(sc, addr, size,
10162 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10163 }
10164 }
10165
10166 static void
10167 bxe_pf_init(struct bxe_softc *sc)
10168 {
10169 struct bxe_func_init_params func_init = { 0 };
10170 struct event_ring_data eq_data = { { 0 } };
10171 uint16_t flags;
10172
10173 if (!CHIP_IS_E1x(sc)) {
10174 /* reset IGU PF statistics: MSIX + ATTN */
10175 /* PF */
10176 REG_WR(sc,
10177 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10178 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10179 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10180 0);
10181 /* ATTN */
10182 REG_WR(sc,
10183 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10184 (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10185 (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10186 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10187 0);
10188 }
10189
10190 /* function setup flags */
10191 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10192
10193 /*
10194 * This flag is relevant for E1x only.
10195 * E2 doesn't have a TPA configuration in a function level.
10196 */
10197 flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10198
10199 func_init.func_flgs = flags;
10200 func_init.pf_id = SC_FUNC(sc);
10201 func_init.func_id = SC_FUNC(sc);
10202 func_init.spq_map = sc->spq_dma.paddr;
10203 func_init.spq_prod = sc->spq_prod_idx;
10204
10205 bxe_func_init(sc, &func_init);
10206
10207 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10208
10209 /*
10210 * Congestion management values depend on the link rate.
10211 * There is no active link so initial link rate is set to 10Gbps.
10212 * When the link comes up the congestion management values are
10213 * re-calculated according to the actual link rate.
10214 */
10215 sc->link_vars.line_speed = SPEED_10000;
10216 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10217
10218 /* Only the PMF sets the HW */
10219 if (sc->port.pmf) {
10220 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10221 }
10222
10223 /* init Event Queue - PCI bus guarantees correct endainity */
10224 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10225 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10226 eq_data.producer = sc->eq_prod;
10227 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
10228 eq_data.sb_id = DEF_SB_ID;
10229 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10230 }
10231
10232 static void
10233 bxe_hc_int_enable(struct bxe_softc *sc)
10234 {
10235 int port = SC_PORT(sc);
10236 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10237 uint32_t val = REG_RD(sc, addr);
10238 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10239 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10240 (sc->intr_count == 1)) ? TRUE : FALSE;
10241 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10242
10243 if (msix) {
10244 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10245 HC_CONFIG_0_REG_INT_LINE_EN_0);
10246 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10247 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10248 if (single_msix) {
10249 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10250 }
10251 } else if (msi) {
10252 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10253 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10254 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10255 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10256 } else {
10257 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10258 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10259 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10260 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10261
10262 if (!CHIP_IS_E1(sc)) {
10263 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10264 val, port, addr);
10265
10266 REG_WR(sc, addr, val);
10267
10268 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10269 }
10270 }
10271
10272 if (CHIP_IS_E1(sc)) {
10273 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10274 }
10275
10276 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10277 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10278
10279 REG_WR(sc, addr, val);
10280
10281 /* ensure that HC_CONFIG is written before leading/trailing edge config */
10282 mb();
10283
10284 if (!CHIP_IS_E1(sc)) {
10285 /* init leading/trailing edge */
10286 if (IS_MF(sc)) {
10287 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10288 if (sc->port.pmf) {
10289 /* enable nig and gpio3 attention */
10290 val |= 0x1100;
10291 }
10292 } else {
10293 val = 0xffff;
10294 }
10295
10296 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10297 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10298 }
10299
10300 /* make sure that interrupts are indeed enabled from here on */
10301 mb();
10302 }
10303
10304 static void
10305 bxe_igu_int_enable(struct bxe_softc *sc)
10306 {
10307 uint32_t val;
10308 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10309 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10310 (sc->intr_count == 1)) ? TRUE : FALSE;
10311 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10312
10313 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10314
10315 if (msix) {
10316 val &= ~(IGU_PF_CONF_INT_LINE_EN |
10317 IGU_PF_CONF_SINGLE_ISR_EN);
10318 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10319 IGU_PF_CONF_ATTN_BIT_EN);
10320 if (single_msix) {
10321 val |= IGU_PF_CONF_SINGLE_ISR_EN;
10322 }
10323 } else if (msi) {
10324 val &= ~IGU_PF_CONF_INT_LINE_EN;
10325 val |= (IGU_PF_CONF_MSI_MSIX_EN |
10326 IGU_PF_CONF_ATTN_BIT_EN |
10327 IGU_PF_CONF_SINGLE_ISR_EN);
10328 } else {
10329 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10330 val |= (IGU_PF_CONF_INT_LINE_EN |
10331 IGU_PF_CONF_ATTN_BIT_EN |
10332 IGU_PF_CONF_SINGLE_ISR_EN);
10333 }
10334
10335 /* clean previous status - need to configure igu prior to ack*/
10336 if ((!msix) || single_msix) {
10337 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10338 bxe_ack_int(sc);
10339 }
10340
10341 val |= IGU_PF_CONF_FUNC_EN;
10342
10343 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10344 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10345
10346 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10347
10348 mb();
10349
10350 /* init leading/trailing edge */
10351 if (IS_MF(sc)) {
10352 val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10353 if (sc->port.pmf) {
10354 /* enable nig and gpio3 attention */
10355 val |= 0x1100;
10356 }
10357 } else {
10358 val = 0xffff;
10359 }
10360
10361 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10362 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10363
10364 /* make sure that interrupts are indeed enabled from here on */
10365 mb();
10366 }
10367
10368 static void
10369 bxe_int_enable(struct bxe_softc *sc)
10370 {
10371 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10372 bxe_hc_int_enable(sc);
10373 } else {
10374 bxe_igu_int_enable(sc);
10375 }
10376 }
10377
10378 static void
10379 bxe_hc_int_disable(struct bxe_softc *sc)
10380 {
10381 int port = SC_PORT(sc);
10382 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10383 uint32_t val = REG_RD(sc, addr);
10384
10385 /*
10386 * In E1 we must use only PCI configuration space to disable MSI/MSIX
10387 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10388 * block
10389 */
10390 if (CHIP_IS_E1(sc)) {
10391 /*
10392 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10393 * to prevent from HC sending interrupts after we exit the function
10394 */
10395 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10396
10397 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10398 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10399 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10400 } else {
10401 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10402 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10403 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10404 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10405 }
10406
10407 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10408
10409 /* flush all outstanding writes */
10410 mb();
10411
10412 REG_WR(sc, addr, val);
10413 if (REG_RD(sc, addr) != val) {
10414 BLOGE(sc, "proper val not read from HC IGU!\n");
10415 }
10416 }
10417
10418 static void
10419 bxe_igu_int_disable(struct bxe_softc *sc)
10420 {
10421 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10422
10423 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10424 IGU_PF_CONF_INT_LINE_EN |
10425 IGU_PF_CONF_ATTN_BIT_EN);
10426
10427 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10428
10429 /* flush all outstanding writes */
10430 mb();
10431
10432 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10433 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10434 BLOGE(sc, "proper val not read from IGU!\n");
10435 }
10436 }
10437
10438 static void
10439 bxe_int_disable(struct bxe_softc *sc)
10440 {
10441 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10442 bxe_hc_int_disable(sc);
10443 } else {
10444 bxe_igu_int_disable(sc);
10445 }
10446 }
10447
10448 static void
10449 bxe_nic_init(struct bxe_softc *sc,
10450 int load_code)
10451 {
10452 int i;
10453
10454 for (i = 0; i < sc->num_queues; i++) {
10455 bxe_init_eth_fp(sc, i);
10456 }
10457
10458 rmb(); /* ensure status block indices were read */
10459
10460 bxe_init_rx_rings(sc);
10461 bxe_init_tx_rings(sc);
10462
10463 if (IS_VF(sc)) {
10464 return;
10465 }
10466
10467 /* initialize MOD_ABS interrupts */
10468 elink_init_mod_abs_int(sc, &sc->link_vars,
10469 sc->devinfo.chip_id,
10470 sc->devinfo.shmem_base,
10471 sc->devinfo.shmem2_base,
10472 SC_PORT(sc));
10473
10474 bxe_init_def_sb(sc);
10475 bxe_update_dsb_idx(sc);
10476 bxe_init_sp_ring(sc);
10477 bxe_init_eq_ring(sc);
10478 bxe_init_internal(sc, load_code);
10479 bxe_pf_init(sc);
10480 bxe_stats_init(sc);
10481
10482 /* flush all before enabling interrupts */
10483 mb();
10484
10485 bxe_int_enable(sc);
10486
10487 /* check for SPIO5 */
10488 bxe_attn_int_deasserted0(sc,
10489 REG_RD(sc,
10490 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10491 SC_PORT(sc)*4)) &
10492 AEU_INPUTS_ATTN_BITS_SPIO5);
10493 }
10494
10495 static inline void
10496 bxe_init_objs(struct bxe_softc *sc)
10497 {
10498 /* mcast rules must be added to tx if tx switching is enabled */
10499 ecore_obj_type o_type =
10500 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10501 ECORE_OBJ_TYPE_RX;
10502
10503 /* RX_MODE controlling object */
10504 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10505
10506 /* multicast configuration controlling object */
10507 ecore_init_mcast_obj(sc,
10508 &sc->mcast_obj,
10509 sc->fp[0].cl_id,
10510 sc->fp[0].index,
10511 SC_FUNC(sc),
10512 SC_FUNC(sc),
10513 BXE_SP(sc, mcast_rdata),
10514 BXE_SP_MAPPING(sc, mcast_rdata),
10515 ECORE_FILTER_MCAST_PENDING,
10516 &sc->sp_state,
10517 o_type);
10518
10519 /* Setup CAM credit pools */
10520 ecore_init_mac_credit_pool(sc,
10521 &sc->macs_pool,
10522 SC_FUNC(sc),
10523 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10524 VNICS_PER_PATH(sc));
10525
10526 ecore_init_vlan_credit_pool(sc,
10527 &sc->vlans_pool,
10528 SC_ABS_FUNC(sc) >> 1,
10529 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10530 VNICS_PER_PATH(sc));
10531
10532 /* RSS configuration object */
10533 ecore_init_rss_config_obj(sc,
10534 &sc->rss_conf_obj,
10535 sc->fp[0].cl_id,
10536 sc->fp[0].index,
10537 SC_FUNC(sc),
10538 SC_FUNC(sc),
10539 BXE_SP(sc, rss_rdata),
10540 BXE_SP_MAPPING(sc, rss_rdata),
10541 ECORE_FILTER_RSS_CONF_PENDING,
10542 &sc->sp_state, ECORE_OBJ_TYPE_RX);
10543 }
10544
10545 /*
10546 * Initialize the function. This must be called before sending CLIENT_SETUP
10547 * for the first client.
10548 */
10549 static inline int
10550 bxe_func_start(struct bxe_softc *sc)
10551 {
10552 struct ecore_func_state_params func_params = { NULL };
10553 struct ecore_func_start_params *start_params = &func_params.params.start;
10554
10555 /* Prepare parameters for function state transitions */
10556 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10557
10558 func_params.f_obj = &sc->func_obj;
10559 func_params.cmd = ECORE_F_CMD_START;
10560
10561 /* Function parameters */
10562 start_params->mf_mode = sc->devinfo.mf_info.mf_mode;
10563 start_params->sd_vlan_tag = OVLAN(sc);
10564
10565 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10566 start_params->network_cos_mode = STATIC_COS;
10567 } else { /* CHIP_IS_E1X */
10568 start_params->network_cos_mode = FW_WRR;
10569 }
10570
10571 //start_params->gre_tunnel_mode = 0;
10572 //start_params->gre_tunnel_rss = 0;
10573
10574 return (ecore_func_state_change(sc, &func_params));
10575 }
10576
10577 static int
10578 bxe_set_power_state(struct bxe_softc *sc,
10579 uint8_t state)
10580 {
10581 uint16_t pmcsr;
10582
10583 /* If there is no power capability, silently succeed */
10584 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10585 BLOGW(sc, "No power capability\n");
10586 return (0);
10587 }
10588
10589 pmcsr = pci_read_config(sc->dev,
10590 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10591 2);
10592
10593 switch (state) {
10594 case PCI_PM_D0:
10595 pci_write_config(sc->dev,
10596 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10597 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10598
10599 if (pmcsr & PCIM_PSTAT_DMASK) {
10600 /* delay required during transition out of D3hot */
10601 DELAY(20000);
10602 }
10603
10604 break;
10605
10606 case PCI_PM_D3hot:
10607 /* XXX if there are other clients above don't shut down the power */
10608
10609 /* don't shut down the power for emulation and FPGA */
10610 if (CHIP_REV_IS_SLOW(sc)) {
10611 return (0);
10612 }
10613
10614 pmcsr &= ~PCIM_PSTAT_DMASK;
10615 pmcsr |= PCIM_PSTAT_D3;
10616
10617 if (sc->wol) {
10618 pmcsr |= PCIM_PSTAT_PMEENABLE;
10619 }
10620
10621 pci_write_config(sc->dev,
10622 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10623 pmcsr, 4);
10624
10625 /*
10626 * No more memory access after this point until device is brought back
10627 * to D0 state.
10628 */
10629 break;
10630
10631 default:
10632 BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10633 state, pmcsr);
10634 return (-1);
10635 }
10636
10637 return (0);
10638 }
10639
10640
10641 /* return true if succeeded to acquire the lock */
10642 static uint8_t
10643 bxe_trylock_hw_lock(struct bxe_softc *sc,
10644 uint32_t resource)
10645 {
10646 uint32_t lock_status;
10647 uint32_t resource_bit = (1 << resource);
10648 int func = SC_FUNC(sc);
10649 uint32_t hw_lock_control_reg;
10650
10651 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10652
10653 /* Validating that the resource is within range */
10654 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10655 BLOGD(sc, DBG_LOAD,
10656 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10657 resource, HW_LOCK_MAX_RESOURCE_VALUE);
10658 return (FALSE);
10659 }
10660
10661 if (func <= 5) {
10662 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10663 } else {
10664 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10665 }
10666
10667 /* try to acquire the lock */
10668 REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10669 lock_status = REG_RD(sc, hw_lock_control_reg);
10670 if (lock_status & resource_bit) {
10671 return (TRUE);
10672 }
10673
10674 BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10675 "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10676 lock_status, resource_bit);
10677
10678 return (FALSE);
10679 }
10680
10681 /*
10682 * Get the recovery leader resource id according to the engine this function
10683 * belongs to. Currently only only 2 engines is supported.
10684 */
10685 static int
10686 bxe_get_leader_lock_resource(struct bxe_softc *sc)
10687 {
10688 if (SC_PATH(sc)) {
10689 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10690 } else {
10691 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10692 }
10693 }
10694
10695 /* try to acquire a leader lock for current engine */
10696 static uint8_t
10697 bxe_trylock_leader_lock(struct bxe_softc *sc)
10698 {
10699 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10700 }
10701
10702 static int
10703 bxe_release_leader_lock(struct bxe_softc *sc)
10704 {
10705 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10706 }
10707
10708 /* close gates #2, #3 and #4 */
10709 static void
10710 bxe_set_234_gates(struct bxe_softc *sc,
10711 uint8_t close)
10712 {
10713 uint32_t val;
10714
10715 /* gates #2 and #4a are closed/opened for "not E1" only */
10716 if (!CHIP_IS_E1(sc)) {
10717 /* #4 */
10718 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10719 /* #2 */
10720 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10721 }
10722
10723 /* #3 */
10724 if (CHIP_IS_E1x(sc)) {
10725 /* prevent interrupts from HC on both ports */
10726 val = REG_RD(sc, HC_REG_CONFIG_1);
10727 REG_WR(sc, HC_REG_CONFIG_1,
10728 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10729 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10730
10731 val = REG_RD(sc, HC_REG_CONFIG_0);
10732 REG_WR(sc, HC_REG_CONFIG_0,
10733 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10734 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10735 } else {
10736 /* Prevent incoming interrupts in IGU */
10737 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10738
10739 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10740 (!close) ?
10741 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10742 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10743 }
10744
10745 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10746 close ? "closing" : "opening");
10747
10748 wmb();
10749 }
10750
10751 /* poll for pending writes bit, it should get cleared in no more than 1s */
10752 static int
10753 bxe_er_poll_igu_vq(struct bxe_softc *sc)
10754 {
10755 uint32_t cnt = 1000;
10756 uint32_t pend_bits = 0;
10757
10758 do {
10759 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10760
10761 if (pend_bits == 0) {
10762 break;
10763 }
10764
10765 DELAY(1000);
10766 } while (--cnt > 0);
10767
10768 if (cnt == 0) {
10769 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10770 return (-1);
10771 }
10772
10773 return (0);
10774 }
10775
10776 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */
10777
10778 static void
10779 bxe_clp_reset_prep(struct bxe_softc *sc,
10780 uint32_t *magic_val)
10781 {
10782 /* Do some magic... */
10783 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10784 *magic_val = val & SHARED_MF_CLP_MAGIC;
10785 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10786 }
10787
10788 /* restore the value of the 'magic' bit */
10789 static void
10790 bxe_clp_reset_done(struct bxe_softc *sc,
10791 uint32_t magic_val)
10792 {
10793 /* Restore the 'magic' bit value... */
10794 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10795 MFCFG_WR(sc, shared_mf_config.clp_mb,
10796 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10797 }
10798
10799 /* prepare for MCP reset, takes care of CLP configurations */
10800 static void
10801 bxe_reset_mcp_prep(struct bxe_softc *sc,
10802 uint32_t *magic_val)
10803 {
10804 uint32_t shmem;
10805 uint32_t validity_offset;
10806
10807 /* set `magic' bit in order to save MF config */
10808 if (!CHIP_IS_E1(sc)) {
10809 bxe_clp_reset_prep(sc, magic_val);
10810 }
10811
10812 /* get shmem offset */
10813 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10814 validity_offset =
10815 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10816
10817 /* Clear validity map flags */
10818 if (shmem > 0) {
10819 REG_WR(sc, shmem + validity_offset, 0);
10820 }
10821 }
10822
10823 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
10824 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
10825
10826 static void
10827 bxe_mcp_wait_one(struct bxe_softc *sc)
10828 {
10829 /* special handling for emulation and FPGA (10 times longer) */
10830 if (CHIP_REV_IS_SLOW(sc)) {
10831 DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10832 } else {
10833 DELAY((MCP_ONE_TIMEOUT) * 1000);
10834 }
10835 }
10836
10837 /* initialize shmem_base and waits for validity signature to appear */
10838 static int
10839 bxe_init_shmem(struct bxe_softc *sc)
10840 {
10841 int cnt = 0;
10842 uint32_t val = 0;
10843
10844 do {
10845 sc->devinfo.shmem_base =
10846 sc->link_params.shmem_base =
10847 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10848
10849 if (sc->devinfo.shmem_base) {
10850 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10851 if (val & SHR_MEM_VALIDITY_MB)
10852 return (0);
10853 }
10854
10855 bxe_mcp_wait_one(sc);
10856
10857 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10858
10859 BLOGE(sc, "BAD MCP validity signature\n");
10860
10861 return (-1);
10862 }
10863
10864 static int
10865 bxe_reset_mcp_comp(struct bxe_softc *sc,
10866 uint32_t magic_val)
10867 {
10868 int rc = bxe_init_shmem(sc);
10869
10870 /* Restore the `magic' bit value */
10871 if (!CHIP_IS_E1(sc)) {
10872 bxe_clp_reset_done(sc, magic_val);
10873 }
10874
10875 return (rc);
10876 }
10877
10878 static void
10879 bxe_pxp_prep(struct bxe_softc *sc)
10880 {
10881 if (!CHIP_IS_E1(sc)) {
10882 REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10883 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10884 wmb();
10885 }
10886 }
10887
10888 /*
10889 * Reset the whole chip except for:
10890 * - PCIE core
10891 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10892 * - IGU
10893 * - MISC (including AEU)
10894 * - GRC
10895 * - RBCN, RBCP
10896 */
10897 static void
10898 bxe_process_kill_chip_reset(struct bxe_softc *sc,
10899 uint8_t global)
10900 {
10901 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10902 uint32_t global_bits2, stay_reset2;
10903
10904 /*
10905 * Bits that have to be set in reset_mask2 if we want to reset 'global'
10906 * (per chip) blocks.
10907 */
10908 global_bits2 =
10909 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10910 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10911
10912 /*
10913 * Don't reset the following blocks.
10914 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10915 * reset, as in 4 port device they might still be owned
10916 * by the MCP (there is only one leader per path).
10917 */
10918 not_reset_mask1 =
10919 MISC_REGISTERS_RESET_REG_1_RST_HC |
10920 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10921 MISC_REGISTERS_RESET_REG_1_RST_PXP;
10922
10923 not_reset_mask2 =
10924 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10925 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10926 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10927 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10928 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10929 MISC_REGISTERS_RESET_REG_2_RST_GRC |
10930 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10931 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10932 MISC_REGISTERS_RESET_REG_2_RST_ATC |
10933 MISC_REGISTERS_RESET_REG_2_PGLC |
10934 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10935 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10936 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10937 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10938 MISC_REGISTERS_RESET_REG_2_UMAC0 |
10939 MISC_REGISTERS_RESET_REG_2_UMAC1;
10940
10941 /*
10942 * Keep the following blocks in reset:
10943 * - all xxMACs are handled by the elink code.
10944 */
10945 stay_reset2 =
10946 MISC_REGISTERS_RESET_REG_2_XMAC |
10947 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10948
10949 /* Full reset masks according to the chip */
10950 reset_mask1 = 0xffffffff;
10951
10952 if (CHIP_IS_E1(sc))
10953 reset_mask2 = 0xffff;
10954 else if (CHIP_IS_E1H(sc))
10955 reset_mask2 = 0x1ffff;
10956 else if (CHIP_IS_E2(sc))
10957 reset_mask2 = 0xfffff;
10958 else /* CHIP_IS_E3 */
10959 reset_mask2 = 0x3ffffff;
10960
10961 /* Don't reset global blocks unless we need to */
10962 if (!global)
10963 reset_mask2 &= ~global_bits2;
10964
10965 /*
10966 * In case of attention in the QM, we need to reset PXP
10967 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10968 * because otherwise QM reset would release 'close the gates' shortly
10969 * before resetting the PXP, then the PSWRQ would send a write
10970 * request to PGLUE. Then when PXP is reset, PGLUE would try to
10971 * read the payload data from PSWWR, but PSWWR would not
10972 * respond. The write queue in PGLUE would stuck, dmae commands
10973 * would not return. Therefore it's important to reset the second
10974 * reset register (containing the
10975 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10976 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10977 * bit).
10978 */
10979 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10980 reset_mask2 & (~not_reset_mask2));
10981
10982 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10983 reset_mask1 & (~not_reset_mask1));
10984
10985 mb();
10986 wmb();
10987
10988 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10989 reset_mask2 & (~stay_reset2));
10990
10991 mb();
10992 wmb();
10993
10994 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10995 wmb();
10996 }
10997
10998 static int
10999 bxe_process_kill(struct bxe_softc *sc,
11000 uint8_t global)
11001 {
11002 int cnt = 1000;
11003 uint32_t val = 0;
11004 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11005 uint32_t tags_63_32 = 0;
11006
11007 /* Empty the Tetris buffer, wait for 1s */
11008 do {
11009 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11010 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11011 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11012 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11013 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11014 if (CHIP_IS_E3(sc)) {
11015 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11016 }
11017
11018 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11019 ((port_is_idle_0 & 0x1) == 0x1) &&
11020 ((port_is_idle_1 & 0x1) == 0x1) &&
11021 (pgl_exp_rom2 == 0xffffffff) &&
11022 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11023 break;
11024 DELAY(1000);
11025 } while (cnt-- > 0);
11026
11027 if (cnt <= 0) {
11028 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11029 "are still outstanding read requests after 1s! "
11030 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11031 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11032 sr_cnt, blk_cnt, port_is_idle_0,
11033 port_is_idle_1, pgl_exp_rom2);
11034 return (-1);
11035 }
11036
11037 mb();
11038
11039 /* Close gates #2, #3 and #4 */
11040 bxe_set_234_gates(sc, TRUE);
11041
11042 /* Poll for IGU VQs for 57712 and newer chips */
11043 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11044 return (-1);
11045 }
11046
11047 /* XXX indicate that "process kill" is in progress to MCP */
11048
11049 /* clear "unprepared" bit */
11050 REG_WR(sc, MISC_REG_UNPREPARED, 0);
11051 mb();
11052
11053 /* Make sure all is written to the chip before the reset */
11054 wmb();
11055
11056 /*
11057 * Wait for 1ms to empty GLUE and PCI-E core queues,
11058 * PSWHST, GRC and PSWRD Tetris buffer.
11059 */
11060 DELAY(1000);
11061
11062 /* Prepare to chip reset: */
11063 /* MCP */
11064 if (global) {
11065 bxe_reset_mcp_prep(sc, &val);
11066 }
11067
11068 /* PXP */
11069 bxe_pxp_prep(sc);
11070 mb();
11071
11072 /* reset the chip */
11073 bxe_process_kill_chip_reset(sc, global);
11074 mb();
11075
11076 /* clear errors in PGB */
11077 if (!CHIP_IS_E1(sc))
11078 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11079
11080 /* Recover after reset: */
11081 /* MCP */
11082 if (global && bxe_reset_mcp_comp(sc, val)) {
11083 return (-1);
11084 }
11085
11086 /* XXX add resetting the NO_MCP mode DB here */
11087
11088 /* Open the gates #2, #3 and #4 */
11089 bxe_set_234_gates(sc, FALSE);
11090
11091 /* XXX
11092 * IGU/AEU preparation bring back the AEU/IGU to a reset state
11093 * re-enable attentions
11094 */
11095
11096 return (0);
11097 }
11098
11099 static int
11100 bxe_leader_reset(struct bxe_softc *sc)
11101 {
11102 int rc = 0;
11103 uint8_t global = bxe_reset_is_global(sc);
11104 uint32_t load_code;
11105
11106 /*
11107 * If not going to reset MCP, load "fake" driver to reset HW while
11108 * driver is owner of the HW.
11109 */
11110 if (!global && !BXE_NOMCP(sc)) {
11111 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11112 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11113 if (!load_code) {
11114 BLOGE(sc, "MCP response failure, aborting\n");
11115 rc = -1;
11116 goto exit_leader_reset;
11117 }
11118
11119 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11120 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11121 BLOGE(sc, "MCP unexpected response, aborting\n");
11122 rc = -1;
11123 goto exit_leader_reset2;
11124 }
11125
11126 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11127 if (!load_code) {
11128 BLOGE(sc, "MCP response failure, aborting\n");
11129 rc = -1;
11130 goto exit_leader_reset2;
11131 }
11132 }
11133
11134 /* try to recover after the failure */
11135 if (bxe_process_kill(sc, global)) {
11136 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11137 rc = -1;
11138 goto exit_leader_reset2;
11139 }
11140
11141 /*
11142 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11143 * state.
11144 */
11145 bxe_set_reset_done(sc);
11146 if (global) {
11147 bxe_clear_reset_global(sc);
11148 }
11149
11150 exit_leader_reset2:
11151
11152 /* unload "fake driver" if it was loaded */
11153 if (!global && !BXE_NOMCP(sc)) {
11154 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11155 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11156 }
11157
11158 exit_leader_reset:
11159
11160 sc->is_leader = 0;
11161 bxe_release_leader_lock(sc);
11162
11163 mb();
11164 return (rc);
11165 }
11166
11167 /*
11168 * prepare INIT transition, parameters configured:
11169 * - HC configuration
11170 * - Queue's CDU context
11171 */
11172 static void
11173 bxe_pf_q_prep_init(struct bxe_softc *sc,
11174 struct bxe_fastpath *fp,
11175 struct ecore_queue_init_params *init_params)
11176 {
11177 uint8_t cos;
11178 int cxt_index, cxt_offset;
11179
11180 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11181 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11182
11183 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11184 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11185
11186 /* HC rate */
11187 init_params->rx.hc_rate =
11188 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11189 init_params->tx.hc_rate =
11190 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11191
11192 /* FW SB ID */
11193 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11194
11195 /* CQ index among the SB indices */
11196 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11197 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11198
11199 /* set maximum number of COSs supported by this queue */
11200 init_params->max_cos = sc->max_cos;
11201
11202 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11203 fp->index, init_params->max_cos);
11204
11205 /* set the context pointers queue object */
11206 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11207 /* XXX change index/cid here if ever support multiple tx CoS */
11208 /* fp->txdata[cos]->cid */
11209 cxt_index = fp->index / ILT_PAGE_CIDS;
11210 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11211 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11212 }
11213 }
11214
11215 /* set flags that are common for the Tx-only and not normal connections */
11216 static unsigned long
11217 bxe_get_common_flags(struct bxe_softc *sc,
11218 struct bxe_fastpath *fp,
11219 uint8_t zero_stats)
11220 {
11221 unsigned long flags = 0;
11222
11223 /* PF driver will always initialize the Queue to an ACTIVE state */
11224 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11225
11226 /*
11227 * tx only connections collect statistics (on the same index as the
11228 * parent connection). The statistics are zeroed when the parent
11229 * connection is initialized.
11230 */
11231
11232 bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11233 if (zero_stats) {
11234 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11235 }
11236
11237 /*
11238 * tx only connections can support tx-switching, though their
11239 * CoS-ness doesn't survive the loopback
11240 */
11241 if (sc->flags & BXE_TX_SWITCHING) {
11242 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11243 }
11244
11245 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11246
11247 return (flags);
11248 }
11249
11250 static unsigned long
11251 bxe_get_q_flags(struct bxe_softc *sc,
11252 struct bxe_fastpath *fp,
11253 uint8_t leading)
11254 {
11255 unsigned long flags = 0;
11256
11257 if (IS_MF_SD(sc)) {
11258 bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11259 }
11260
11261 if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11262 bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11263 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11264 }
11265
11266 if (leading) {
11267 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11268 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11269 }
11270
11271 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11272
11273 /* merge with common flags */
11274 return (flags | bxe_get_common_flags(sc, fp, TRUE));
11275 }
11276
11277 static void
11278 bxe_pf_q_prep_general(struct bxe_softc *sc,
11279 struct bxe_fastpath *fp,
11280 struct ecore_general_setup_params *gen_init,
11281 uint8_t cos)
11282 {
11283 gen_init->stat_id = bxe_stats_id(fp);
11284 gen_init->spcl_id = fp->cl_id;
11285 gen_init->mtu = sc->mtu;
11286 gen_init->cos = cos;
11287 }
11288
11289 static void
11290 bxe_pf_rx_q_prep(struct bxe_softc *sc,
11291 struct bxe_fastpath *fp,
11292 struct rxq_pause_params *pause,
11293 struct ecore_rxq_setup_params *rxq_init)
11294 {
11295 uint8_t max_sge = 0;
11296 uint16_t sge_sz = 0;
11297 uint16_t tpa_agg_size = 0;
11298
11299 pause->sge_th_lo = SGE_TH_LO(sc);
11300 pause->sge_th_hi = SGE_TH_HI(sc);
11301
11302 /* validate SGE ring has enough to cross high threshold */
11303 if (sc->dropless_fc &&
11304 (pause->sge_th_hi + FW_PREFETCH_CNT) >
11305 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11306 BLOGW(sc, "sge ring threshold limit\n");
11307 }
11308
11309 /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11310 tpa_agg_size = (2 * sc->mtu);
11311 if (tpa_agg_size < sc->max_aggregation_size) {
11312 tpa_agg_size = sc->max_aggregation_size;
11313 }
11314
11315 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11316 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11317 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11318 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11319
11320 /* pause - not for e1 */
11321 if (!CHIP_IS_E1(sc)) {
11322 pause->bd_th_lo = BD_TH_LO(sc);
11323 pause->bd_th_hi = BD_TH_HI(sc);
11324
11325 pause->rcq_th_lo = RCQ_TH_LO(sc);
11326 pause->rcq_th_hi = RCQ_TH_HI(sc);
11327
11328 /* validate rings have enough entries to cross high thresholds */
11329 if (sc->dropless_fc &&
11330 pause->bd_th_hi + FW_PREFETCH_CNT >
11331 sc->rx_ring_size) {
11332 BLOGW(sc, "rx bd ring threshold limit\n");
11333 }
11334
11335 if (sc->dropless_fc &&
11336 pause->rcq_th_hi + FW_PREFETCH_CNT >
11337 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11338 BLOGW(sc, "rcq ring threshold limit\n");
11339 }
11340
11341 pause->pri_map = 1;
11342 }
11343
11344 /* rxq setup */
11345 rxq_init->dscr_map = fp->rx_dma.paddr;
11346 rxq_init->sge_map = fp->rx_sge_dma.paddr;
11347 rxq_init->rcq_map = fp->rcq_dma.paddr;
11348 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11349
11350 /*
11351 * This should be a maximum number of data bytes that may be
11352 * placed on the BD (not including paddings).
11353 */
11354 rxq_init->buf_sz = (fp->rx_buf_size -
11355 IP_HEADER_ALIGNMENT_PADDING);
11356
11357 rxq_init->cl_qzone_id = fp->cl_qzone_id;
11358 rxq_init->tpa_agg_sz = tpa_agg_size;
11359 rxq_init->sge_buf_sz = sge_sz;
11360 rxq_init->max_sges_pkt = max_sge;
11361 rxq_init->rss_engine_id = SC_FUNC(sc);
11362 rxq_init->mcast_engine_id = SC_FUNC(sc);
11363
11364 /*
11365 * Maximum number or simultaneous TPA aggregation for this Queue.
11366 * For PF Clients it should be the maximum available number.
11367 * VF driver(s) may want to define it to a smaller value.
11368 */
11369 rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11370
11371 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11372 rxq_init->fw_sb_id = fp->fw_sb_id;
11373
11374 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11375
11376 /*
11377 * configure silent vlan removal
11378 * if multi function mode is afex, then mask default vlan
11379 */
11380 if (IS_MF_AFEX(sc)) {
11381 rxq_init->silent_removal_value =
11382 sc->devinfo.mf_info.afex_def_vlan_tag;
11383 rxq_init->silent_removal_mask = EVL_VLID_MASK;
11384 }
11385 }
11386
11387 static void
11388 bxe_pf_tx_q_prep(struct bxe_softc *sc,
11389 struct bxe_fastpath *fp,
11390 struct ecore_txq_setup_params *txq_init,
11391 uint8_t cos)
11392 {
11393 /*
11394 * XXX If multiple CoS is ever supported then each fastpath structure
11395 * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11396 * fp->txdata[cos]->tx_dma.paddr;
11397 */
11398 txq_init->dscr_map = fp->tx_dma.paddr;
11399 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11400 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11401 txq_init->fw_sb_id = fp->fw_sb_id;
11402
11403 /*
11404 * set the TSS leading client id for TX classfication to the
11405 * leading RSS client id
11406 */
11407 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11408 }
11409
11410 /*
11411 * This function performs 2 steps in a queue state machine:
11412 * 1) RESET->INIT
11413 * 2) INIT->SETUP
11414 */
11415 static int
11416 bxe_setup_queue(struct bxe_softc *sc,
11417 struct bxe_fastpath *fp,
11418 uint8_t leading)
11419 {
11420 struct ecore_queue_state_params q_params = { NULL };
11421 struct ecore_queue_setup_params *setup_params =
11422 &q_params.params.setup;
11423 int rc;
11424
11425 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11426
11427 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11428
11429 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11430
11431 /* we want to wait for completion in this context */
11432 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11433
11434 /* prepare the INIT parameters */
11435 bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11436
11437 /* Set the command */
11438 q_params.cmd = ECORE_Q_CMD_INIT;
11439
11440 /* Change the state to INIT */
11441 rc = ecore_queue_state_change(sc, &q_params);
11442 if (rc) {
11443 BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11444 return (rc);
11445 }
11446
11447 BLOGD(sc, DBG_LOAD, "init complete\n");
11448
11449 /* now move the Queue to the SETUP state */
11450 memset(setup_params, 0, sizeof(*setup_params));
11451
11452 /* set Queue flags */
11453 setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11454
11455 /* set general SETUP parameters */
11456 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11457 FIRST_TX_COS_INDEX);
11458
11459 bxe_pf_rx_q_prep(sc, fp,
11460 &setup_params->pause_params,
11461 &setup_params->rxq_params);
11462
11463 bxe_pf_tx_q_prep(sc, fp,
11464 &setup_params->txq_params,
11465 FIRST_TX_COS_INDEX);
11466
11467 /* Set the command */
11468 q_params.cmd = ECORE_Q_CMD_SETUP;
11469
11470 /* change the state to SETUP */
11471 rc = ecore_queue_state_change(sc, &q_params);
11472 if (rc) {
11473 BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11474 return (rc);
11475 }
11476
11477 return (rc);
11478 }
11479
11480 static int
11481 bxe_setup_leading(struct bxe_softc *sc)
11482 {
11483 return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11484 }
11485
11486 static int
11487 bxe_config_rss_pf(struct bxe_softc *sc,
11488 struct ecore_rss_config_obj *rss_obj,
11489 uint8_t config_hash)
11490 {
11491 struct ecore_config_rss_params params = { NULL };
11492 int i;
11493
11494 /*
11495 * Although RSS is meaningless when there is a single HW queue we
11496 * still need it enabled in order to have HW Rx hash generated.
11497 */
11498
11499 params.rss_obj = rss_obj;
11500
11501 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
11502
11503 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags);
11504
11505 /* RSS configuration */
11506 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags);
11507 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags);
11508 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags);
11509 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags);
11510 if (rss_obj->udp_rss_v4) {
11511 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags);
11512 }
11513 if (rss_obj->udp_rss_v6) {
11514 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags);
11515 }
11516
11517 /* Hash bits */
11518 params.rss_result_mask = MULTI_MASK;
11519
11520 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11521
11522 if (config_hash) {
11523 /* RSS keys */
11524 for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11525 params.rss_key[i] = arc4random();
11526 }
11527
11528 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags);
11529 }
11530
11531 return (ecore_config_rss(sc, ¶ms));
11532 }
11533
11534 static int
11535 bxe_config_rss_eth(struct bxe_softc *sc,
11536 uint8_t config_hash)
11537 {
11538 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11539 }
11540
11541 static int
11542 bxe_init_rss_pf(struct bxe_softc *sc)
11543 {
11544 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11545 int i;
11546
11547 /*
11548 * Prepare the initial contents of the indirection table if
11549 * RSS is enabled
11550 */
11551 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11552 sc->rss_conf_obj.ind_table[i] =
11553 (sc->fp->cl_id + (i % num_eth_queues));
11554 }
11555
11556 if (sc->udp_rss) {
11557 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11558 }
11559
11560 /*
11561 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11562 * per-port, so if explicit configuration is needed, do it only
11563 * for a PMF.
11564 *
11565 * For 57712 and newer it's a per-function configuration.
11566 */
11567 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11568 }
11569
11570 static int
11571 bxe_set_mac_one(struct bxe_softc *sc,
11572 uint8_t *mac,
11573 struct ecore_vlan_mac_obj *obj,
11574 uint8_t set,
11575 int mac_type,
11576 unsigned long *ramrod_flags)
11577 {
11578 struct ecore_vlan_mac_ramrod_params ramrod_param;
11579 int rc;
11580
11581 memset(&ramrod_param, 0, sizeof(ramrod_param));
11582
11583 /* fill in general parameters */
11584 ramrod_param.vlan_mac_obj = obj;
11585 ramrod_param.ramrod_flags = *ramrod_flags;
11586
11587 /* fill a user request section if needed */
11588 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11589 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11590
11591 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11592
11593 /* Set the command: ADD or DEL */
11594 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11595 ECORE_VLAN_MAC_DEL;
11596 }
11597
11598 rc = ecore_config_vlan_mac(sc, &ramrod_param);
11599
11600 if (rc == ECORE_EXISTS) {
11601 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11602 /* do not treat adding same MAC as error */
11603 rc = 0;
11604 } else if (rc < 0) {
11605 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11606 }
11607
11608 return (rc);
11609 }
11610
11611 static int
11612 bxe_set_eth_mac(struct bxe_softc *sc,
11613 uint8_t set)
11614 {
11615 unsigned long ramrod_flags = 0;
11616
11617 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11618
11619 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11620
11621 /* Eth MAC is set on RSS leading client (fp[0]) */
11622 return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11623 &sc->sp_objs->mac_obj,
11624 set, ECORE_ETH_MAC, &ramrod_flags));
11625 }
11626
11627 static int
11628 bxe_get_cur_phy_idx(struct bxe_softc *sc)
11629 {
11630 uint32_t sel_phy_idx = 0;
11631
11632 if (sc->link_params.num_phys <= 1) {
11633 return (ELINK_INT_PHY);
11634 }
11635
11636 if (sc->link_vars.link_up) {
11637 sel_phy_idx = ELINK_EXT_PHY1;
11638 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11639 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11640 (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11641 ELINK_SUPPORTED_FIBRE))
11642 sel_phy_idx = ELINK_EXT_PHY2;
11643 } else {
11644 switch (elink_phy_selection(&sc->link_params)) {
11645 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11646 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11647 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11648 sel_phy_idx = ELINK_EXT_PHY1;
11649 break;
11650 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11651 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11652 sel_phy_idx = ELINK_EXT_PHY2;
11653 break;
11654 }
11655 }
11656
11657 return (sel_phy_idx);
11658 }
11659
11660 static int
11661 bxe_get_link_cfg_idx(struct bxe_softc *sc)
11662 {
11663 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11664
11665 /*
11666 * The selected activated PHY is always after swapping (in case PHY
11667 * swapping is enabled). So when swapping is enabled, we need to reverse
11668 * the configuration
11669 */
11670
11671 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11672 if (sel_phy_idx == ELINK_EXT_PHY1)
11673 sel_phy_idx = ELINK_EXT_PHY2;
11674 else if (sel_phy_idx == ELINK_EXT_PHY2)
11675 sel_phy_idx = ELINK_EXT_PHY1;
11676 }
11677
11678 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11679 }
11680
11681 static void
11682 bxe_set_requested_fc(struct bxe_softc *sc)
11683 {
11684 /*
11685 * Initialize link parameters structure variables
11686 * It is recommended to turn off RX FC for jumbo frames
11687 * for better performance
11688 */
11689 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11690 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11691 } else {
11692 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11693 }
11694 }
11695
11696 static void
11697 bxe_calc_fc_adv(struct bxe_softc *sc)
11698 {
11699 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11700
11701
11702 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11703 ADVERTISED_Pause);
11704
11705 switch (sc->link_vars.ieee_fc &
11706 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11707
11708 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11709 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11710 ADVERTISED_Pause);
11711 break;
11712
11713 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11714 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11715 break;
11716
11717 default:
11718 break;
11719
11720 }
11721 }
11722
11723 static uint16_t
11724 bxe_get_mf_speed(struct bxe_softc *sc)
11725 {
11726 uint16_t line_speed = sc->link_vars.line_speed;
11727 if (IS_MF(sc)) {
11728 uint16_t maxCfg =
11729 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11730
11731 /* calculate the current MAX line speed limit for the MF devices */
11732 if (IS_MF_SI(sc)) {
11733 line_speed = (line_speed * maxCfg) / 100;
11734 } else { /* SD mode */
11735 uint16_t vn_max_rate = maxCfg * 100;
11736
11737 if (vn_max_rate < line_speed) {
11738 line_speed = vn_max_rate;
11739 }
11740 }
11741 }
11742
11743 return (line_speed);
11744 }
11745
11746 static void
11747 bxe_fill_report_data(struct bxe_softc *sc,
11748 struct bxe_link_report_data *data)
11749 {
11750 uint16_t line_speed = bxe_get_mf_speed(sc);
11751
11752 memset(data, 0, sizeof(*data));
11753
11754 /* fill the report data with the effective line speed */
11755 data->line_speed = line_speed;
11756
11757 /* Link is down */
11758 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11759 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11760 }
11761
11762 /* Full DUPLEX */
11763 if (sc->link_vars.duplex == DUPLEX_FULL) {
11764 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11765 }
11766
11767 /* Rx Flow Control is ON */
11768 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11769 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11770 }
11771
11772 /* Tx Flow Control is ON */
11773 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11774 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11775 }
11776 }
11777
11778 /* report link status to OS, should be called under phy_lock */
11779 static void
11780 bxe_link_report_locked(struct bxe_softc *sc)
11781 {
11782 struct bxe_link_report_data cur_data;
11783
11784 /* reread mf_cfg */
11785 if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11786 bxe_read_mf_cfg(sc);
11787 }
11788
11789 /* Read the current link report info */
11790 bxe_fill_report_data(sc, &cur_data);
11791
11792 /* Don't report link down or exactly the same link status twice */
11793 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11794 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11795 &sc->last_reported_link.link_report_flags) &&
11796 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11797 &cur_data.link_report_flags))) {
11798 return;
11799 }
11800
11801 ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11802 cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11803 sc->link_cnt++;
11804
11805 ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11806 /* report new link params and remember the state for the next time */
11807 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11808
11809 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11810 &cur_data.link_report_flags)) {
11811 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11812 } else {
11813 const char *duplex;
11814 const char *flow;
11815
11816 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11817 &cur_data.link_report_flags)) {
11818 duplex = "full";
11819 ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11820 } else {
11821 duplex = "half";
11822 ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11823 }
11824
11825 /*
11826 * Handle the FC at the end so that only these flags would be
11827 * possibly set. This way we may easily check if there is no FC
11828 * enabled.
11829 */
11830 if (cur_data.link_report_flags) {
11831 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11832 &cur_data.link_report_flags) &&
11833 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11834 &cur_data.link_report_flags)) {
11835 flow = "ON - receive & transmit";
11836 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11837 &cur_data.link_report_flags) &&
11838 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11839 &cur_data.link_report_flags)) {
11840 flow = "ON - receive";
11841 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11842 &cur_data.link_report_flags) &&
11843 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11844 &cur_data.link_report_flags)) {
11845 flow = "ON - transmit";
11846 } else {
11847 flow = "none"; /* possible? */
11848 }
11849 } else {
11850 flow = "none";
11851 }
11852
11853 if_link_state_change(sc->ifp, LINK_STATE_UP);
11854 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11855 cur_data.line_speed, duplex, flow);
11856 }
11857 }
11858
11859 static void
11860 bxe_link_report(struct bxe_softc *sc)
11861 {
11862 bxe_acquire_phy_lock(sc);
11863 bxe_link_report_locked(sc);
11864 bxe_release_phy_lock(sc);
11865 }
11866
11867 static void
11868 bxe_link_status_update(struct bxe_softc *sc)
11869 {
11870 if (sc->state != BXE_STATE_OPEN) {
11871 return;
11872 }
11873
11874 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11875 elink_link_status_update(&sc->link_params, &sc->link_vars);
11876 } else {
11877 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11878 ELINK_SUPPORTED_10baseT_Full |
11879 ELINK_SUPPORTED_100baseT_Half |
11880 ELINK_SUPPORTED_100baseT_Full |
11881 ELINK_SUPPORTED_1000baseT_Full |
11882 ELINK_SUPPORTED_2500baseX_Full |
11883 ELINK_SUPPORTED_10000baseT_Full |
11884 ELINK_SUPPORTED_TP |
11885 ELINK_SUPPORTED_FIBRE |
11886 ELINK_SUPPORTED_Autoneg |
11887 ELINK_SUPPORTED_Pause |
11888 ELINK_SUPPORTED_Asym_Pause);
11889 sc->port.advertising[0] = sc->port.supported[0];
11890
11891 sc->link_params.sc = sc;
11892 sc->link_params.port = SC_PORT(sc);
11893 sc->link_params.req_duplex[0] = DUPLEX_FULL;
11894 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE;
11895 sc->link_params.req_line_speed[0] = SPEED_10000;
11896 sc->link_params.speed_cap_mask[0] = 0x7f0000;
11897 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G;
11898
11899 if (CHIP_REV_IS_FPGA(sc)) {
11900 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC;
11901 sc->link_vars.line_speed = ELINK_SPEED_1000;
11902 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11903 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11904 } else {
11905 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC;
11906 sc->link_vars.line_speed = ELINK_SPEED_10000;
11907 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11908 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11909 }
11910
11911 sc->link_vars.link_up = 1;
11912
11913 sc->link_vars.duplex = DUPLEX_FULL;
11914 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11915
11916 if (IS_PF(sc)) {
11917 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11918 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11919 bxe_link_report(sc);
11920 }
11921 }
11922
11923 if (IS_PF(sc)) {
11924 if (sc->link_vars.link_up) {
11925 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11926 } else {
11927 bxe_stats_handle(sc, STATS_EVENT_STOP);
11928 }
11929 bxe_link_report(sc);
11930 } else {
11931 bxe_link_report(sc);
11932 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11933 }
11934 }
11935
11936 static int
11937 bxe_initial_phy_init(struct bxe_softc *sc,
11938 int load_mode)
11939 {
11940 int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11941 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11942 struct elink_params *lp = &sc->link_params;
11943
11944 bxe_set_requested_fc(sc);
11945
11946 if (CHIP_REV_IS_SLOW(sc)) {
11947 uint32_t bond = CHIP_BOND_ID(sc);
11948 uint32_t feat = 0;
11949
11950 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11951 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11952 } else if (bond & 0x4) {
11953 if (CHIP_IS_E3(sc)) {
11954 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11955 } else {
11956 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11957 }
11958 } else if (bond & 0x8) {
11959 if (CHIP_IS_E3(sc)) {
11960 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11961 } else {
11962 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11963 }
11964 }
11965
11966 /* disable EMAC for E3 and above */
11967 if (bond & 0x2) {
11968 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11969 }
11970
11971 sc->link_params.feature_config_flags |= feat;
11972 }
11973
11974 bxe_acquire_phy_lock(sc);
11975
11976 if (load_mode == LOAD_DIAG) {
11977 lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11978 /* Prefer doing PHY loopback at 10G speed, if possible */
11979 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11980 if (lp->speed_cap_mask[cfg_idx] &
11981 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11982 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11983 } else {
11984 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11985 }
11986 }
11987 }
11988
11989 if (load_mode == LOAD_LOOPBACK_EXT) {
11990 lp->loopback_mode = ELINK_LOOPBACK_EXT;
11991 }
11992
11993 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11994
11995 bxe_release_phy_lock(sc);
11996
11997 bxe_calc_fc_adv(sc);
11998
11999 if (sc->link_vars.link_up) {
12000 bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
12001 bxe_link_report(sc);
12002 }
12003
12004 if (!CHIP_REV_IS_SLOW(sc)) {
12005 bxe_periodic_start(sc);
12006 }
12007
12008 sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12009 return (rc);
12010 }
12011
12012 static u_int
12013 bxe_push_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12014 {
12015 struct ecore_mcast_list_elem *mc_mac = arg;
12016
12017 mc_mac += cnt;
12018 mc_mac->mac = (uint8_t *)LLADDR(sdl);
12019
12020 return (1);
12021 }
12022
12023 static int
12024 bxe_init_mcast_macs_list(struct bxe_softc *sc,
12025 struct ecore_mcast_ramrod_params *p)
12026 {
12027 if_t ifp = sc->ifp;
12028 int mc_count;
12029 struct ecore_mcast_list_elem *mc_mac;
12030
12031 ECORE_LIST_INIT(&p->mcast_list);
12032 p->mcast_list_len = 0;
12033
12034 /* XXXGL: multicast count may change later */
12035 mc_count = if_llmaddr_count(ifp);
12036
12037 if (!mc_count) {
12038 return (0);
12039 }
12040
12041 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12042 (M_NOWAIT | M_ZERO));
12043 if (!mc_mac) {
12044 BLOGE(sc, "Failed to allocate temp mcast list\n");
12045 return (-1);
12046 }
12047 bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12048 if_foreach_llmaddr(ifp, bxe_push_maddr, mc_mac);
12049
12050 for (int i = 0; i < mc_count; i ++) {
12051 ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list);
12052 BLOGD(sc, DBG_LOAD,
12053 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n",
12054 mc_mac[i].mac[0], mc_mac[i].mac[1], mc_mac[i].mac[2],
12055 mc_mac[i].mac[3], mc_mac[i].mac[4], mc_mac[i].mac[5],
12056 mc_count);
12057 }
12058
12059 p->mcast_list_len = mc_count;
12060
12061 return (0);
12062 }
12063
12064 static void
12065 bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12066 {
12067 struct ecore_mcast_list_elem *mc_mac =
12068 ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12069 struct ecore_mcast_list_elem,
12070 link);
12071
12072 if (mc_mac) {
12073 /* only a single free as all mc_macs are in the same heap array */
12074 free(mc_mac, M_DEVBUF);
12075 }
12076 }
12077 static int
12078 bxe_set_mc_list(struct bxe_softc *sc)
12079 {
12080 struct ecore_mcast_ramrod_params rparam = { NULL };
12081 int rc = 0;
12082
12083 rparam.mcast_obj = &sc->mcast_obj;
12084
12085 BXE_MCAST_LOCK(sc);
12086
12087 /* first, clear all configured multicast MACs */
12088 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12089 if (rc < 0) {
12090 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12091 /* Manual backport parts of FreeBSD upstream r284470. */
12092 BXE_MCAST_UNLOCK(sc);
12093 return (rc);
12094 }
12095
12096 /* configure a new MACs list */
12097 rc = bxe_init_mcast_macs_list(sc, &rparam);
12098 if (rc) {
12099 BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12100 BXE_MCAST_UNLOCK(sc);
12101 return (rc);
12102 }
12103
12104 /* Now add the new MACs */
12105 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12106 if (rc < 0) {
12107 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12108 }
12109
12110 bxe_free_mcast_macs_list(&rparam);
12111
12112 BXE_MCAST_UNLOCK(sc);
12113
12114 return (rc);
12115 }
12116
12117 struct bxe_set_addr_ctx {
12118 struct bxe_softc *sc;
12119 unsigned long ramrod_flags;
12120 int rc;
12121 };
12122
12123 static u_int
12124 bxe_set_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12125 {
12126 struct bxe_set_addr_ctx *ctx = arg;
12127 struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj;
12128 int rc;
12129
12130 if (ctx->rc < 0)
12131 return (0);
12132
12133 rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE,
12134 ECORE_UC_LIST_MAC, &ctx->ramrod_flags);
12135
12136 /* do not treat adding same MAC as an error */
12137 if (rc == -EEXIST)
12138 BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12139 else if (rc < 0) {
12140 BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc);
12141 ctx->rc = rc;
12142 }
12143
12144 return (1);
12145 }
12146
12147 static int
12148 bxe_set_uc_list(struct bxe_softc *sc)
12149 {
12150 if_t ifp = sc->ifp;
12151 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12152 struct bxe_set_addr_ctx ctx = { sc, 0, 0 };
12153 int rc;
12154
12155 /* first schedule a cleanup up of old configuration */
12156 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12157 if (rc < 0) {
12158 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12159 return (rc);
12160 }
12161
12162 if_foreach_lladdr(ifp, bxe_set_addr, &ctx);
12163 if (ctx.rc < 0)
12164 return (ctx.rc);
12165
12166 /* Execute the pending commands */
12167 bit_set(&ctx.ramrod_flags, RAMROD_CONT);
12168 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12169 ECORE_UC_LIST_MAC, &ctx.ramrod_flags));
12170 }
12171
12172 static void
12173 bxe_set_rx_mode(struct bxe_softc *sc)
12174 {
12175 if_t ifp = sc->ifp;
12176 uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12177
12178 if (sc->state != BXE_STATE_OPEN) {
12179 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12180 return;
12181 }
12182
12183 BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12184
12185 if (if_getflags(ifp) & IFF_PROMISC) {
12186 rx_mode = BXE_RX_MODE_PROMISC;
12187 } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12188 ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12189 CHIP_IS_E1(sc))) {
12190 rx_mode = BXE_RX_MODE_ALLMULTI;
12191 } else {
12192 if (IS_PF(sc)) {
12193 /* some multicasts */
12194 if (bxe_set_mc_list(sc) < 0) {
12195 rx_mode = BXE_RX_MODE_ALLMULTI;
12196 }
12197 if (bxe_set_uc_list(sc) < 0) {
12198 rx_mode = BXE_RX_MODE_PROMISC;
12199 }
12200 }
12201 }
12202
12203 sc->rx_mode = rx_mode;
12204
12205 /* schedule the rx_mode command */
12206 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12207 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12208 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12209 return;
12210 }
12211
12212 if (IS_PF(sc)) {
12213 bxe_set_storm_rx_mode(sc);
12214 }
12215 }
12216
12217
12218 /* update flags in shmem */
12219 static void
12220 bxe_update_drv_flags(struct bxe_softc *sc,
12221 uint32_t flags,
12222 uint32_t set)
12223 {
12224 uint32_t drv_flags;
12225
12226 if (SHMEM2_HAS(sc, drv_flags)) {
12227 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12228 drv_flags = SHMEM2_RD(sc, drv_flags);
12229
12230 if (set) {
12231 SET_FLAGS(drv_flags, flags);
12232 } else {
12233 RESET_FLAGS(drv_flags, flags);
12234 }
12235
12236 SHMEM2_WR(sc, drv_flags, drv_flags);
12237 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12238
12239 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12240 }
12241 }
12242
12243 /* periodic timer callout routine, only runs when the interface is up */
12244
12245 static void
12246 bxe_periodic_callout_func(void *xsc)
12247 {
12248 struct bxe_softc *sc = (struct bxe_softc *)xsc;
12249 int i;
12250
12251 if (!BXE_CORE_TRYLOCK(sc)) {
12252 /* just bail and try again next time */
12253
12254 if ((sc->state == BXE_STATE_OPEN) &&
12255 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12256 /* schedule the next periodic callout */
12257 callout_reset(&sc->periodic_callout, hz,
12258 bxe_periodic_callout_func, sc);
12259 }
12260
12261 return;
12262 }
12263
12264 if ((sc->state != BXE_STATE_OPEN) ||
12265 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12266 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12267 BXE_CORE_UNLOCK(sc);
12268 return;
12269 }
12270
12271
12272 /* Check for TX timeouts on any fastpath. */
12273 FOR_EACH_QUEUE(sc, i) {
12274 if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12275 /* Ruh-Roh, chip was reset! */
12276 break;
12277 }
12278 }
12279
12280 if (!CHIP_REV_IS_SLOW(sc)) {
12281 /*
12282 * This barrier is needed to ensure the ordering between the writing
12283 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12284 * the reading here.
12285 */
12286 mb();
12287 if (sc->port.pmf) {
12288 bxe_acquire_phy_lock(sc);
12289 elink_period_func(&sc->link_params, &sc->link_vars);
12290 bxe_release_phy_lock(sc);
12291 }
12292 }
12293
12294 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12295 int mb_idx = SC_FW_MB_IDX(sc);
12296 uint32_t drv_pulse;
12297 uint32_t mcp_pulse;
12298
12299 ++sc->fw_drv_pulse_wr_seq;
12300 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12301
12302 drv_pulse = sc->fw_drv_pulse_wr_seq;
12303 bxe_drv_pulse(sc);
12304
12305 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12306 MCP_PULSE_SEQ_MASK);
12307
12308 /*
12309 * The delta between driver pulse and mcp response should
12310 * be 1 (before mcp response) or 0 (after mcp response).
12311 */
12312 if ((drv_pulse != mcp_pulse) &&
12313 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12314 /* someone lost a heartbeat... */
12315 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12316 drv_pulse, mcp_pulse);
12317 }
12318 }
12319
12320 /* state is BXE_STATE_OPEN */
12321 bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12322
12323 BXE_CORE_UNLOCK(sc);
12324
12325 if ((sc->state == BXE_STATE_OPEN) &&
12326 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12327 /* schedule the next periodic callout */
12328 callout_reset(&sc->periodic_callout, hz,
12329 bxe_periodic_callout_func, sc);
12330 }
12331 }
12332
12333 static void
12334 bxe_periodic_start(struct bxe_softc *sc)
12335 {
12336 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12337 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12338 }
12339
12340 static void
12341 bxe_periodic_stop(struct bxe_softc *sc)
12342 {
12343 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12344 callout_drain(&sc->periodic_callout);
12345 }
12346
12347 void
12348 bxe_parity_recover(struct bxe_softc *sc)
12349 {
12350 uint8_t global = FALSE;
12351 uint32_t error_recovered, error_unrecovered;
12352
12353
12354 if ((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12355 (sc->state == BXE_STATE_ERROR)) {
12356 BLOGE(sc, "RECOVERY failed, "
12357 "stack notified driver is NOT running! "
12358 "Please reboot/power cycle the system.\n");
12359 return;
12360 }
12361
12362 while (1) {
12363 BLOGD(sc, DBG_SP,
12364 "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n",
12365 __func__, sc, sc->state, sc->recovery_state, sc->error_status);
12366
12367 switch(sc->recovery_state) {
12368
12369 case BXE_RECOVERY_INIT:
12370 bxe_chk_parity_attn(sc, &global, FALSE);
12371
12372 if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ||
12373 (sc->error_status & BXE_ERR_MCP_ASSERT) ||
12374 (sc->error_status & BXE_ERR_GLOBAL)) {
12375
12376 BXE_CORE_LOCK(sc);
12377 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12378 bxe_periodic_stop(sc);
12379 }
12380 bxe_nic_unload(sc, UNLOAD_RECOVERY, false);
12381 sc->state = BXE_STATE_ERROR;
12382 sc->recovery_state = BXE_RECOVERY_FAILED;
12383 BLOGE(sc, " No Recovery tried for error 0x%x"
12384 " stack notified driver is NOT running!"
12385 " Please reboot/power cycle the system.\n",
12386 sc->error_status);
12387 BXE_CORE_UNLOCK(sc);
12388 return;
12389 }
12390
12391
12392 /* Try to get a LEADER_LOCK HW lock */
12393 if (bxe_trylock_leader_lock(sc)) {
12394
12395 bxe_set_reset_in_progress(sc);
12396 /*
12397 * Check if there is a global attention and if
12398 * there was a global attention, set the global
12399 * reset bit.
12400 */
12401 if (global) {
12402 bxe_set_reset_global(sc);
12403 }
12404 sc->is_leader = 1;
12405 }
12406
12407 /* If interface has been removed - break */
12408
12409 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12410 bxe_periodic_stop(sc);
12411 }
12412
12413 BXE_CORE_LOCK(sc);
12414 bxe_nic_unload(sc,UNLOAD_RECOVERY, false);
12415 sc->recovery_state = BXE_RECOVERY_WAIT;
12416 BXE_CORE_UNLOCK(sc);
12417
12418 /*
12419 * Ensure "is_leader", MCP command sequence and
12420 * "recovery_state" update values are seen on other
12421 * CPUs.
12422 */
12423 mb();
12424 break;
12425 case BXE_RECOVERY_WAIT:
12426
12427 if (sc->is_leader) {
12428 int other_engine = SC_PATH(sc) ? 0 : 1;
12429 bool other_load_status =
12430 bxe_get_load_status(sc, other_engine);
12431 bool load_status =
12432 bxe_get_load_status(sc, SC_PATH(sc));
12433 global = bxe_reset_is_global(sc);
12434
12435 /*
12436 * In case of a parity in a global block, let
12437 * the first leader that performs a
12438 * leader_reset() reset the global blocks in
12439 * order to clear global attentions. Otherwise
12440 * the gates will remain closed for that
12441 * engine.
12442 */
12443 if (load_status ||
12444 (global && other_load_status)) {
12445 /*
12446 * Wait until all other functions get
12447 * down.
12448 */
12449 taskqueue_enqueue_timeout(taskqueue_thread,
12450 &sc->sp_err_timeout_task, hz/10);
12451 return;
12452 } else {
12453 /*
12454 * If all other functions got down
12455 * try to bring the chip back to
12456 * normal. In any case it's an exit
12457 * point for a leader.
12458 */
12459 if (bxe_leader_reset(sc)) {
12460 BLOGE(sc, "RECOVERY failed, "
12461 "stack notified driver is NOT running!\n");
12462 sc->recovery_state = BXE_RECOVERY_FAILED;
12463 sc->state = BXE_STATE_ERROR;
12464 mb();
12465 return;
12466 }
12467
12468 /*
12469 * If we are here, means that the
12470 * leader has succeeded and doesn't
12471 * want to be a leader any more. Try
12472 * to continue as a none-leader.
12473 */
12474 break;
12475 }
12476
12477 } else { /* non-leader */
12478 if (!bxe_reset_is_done(sc, SC_PATH(sc))) {
12479 /*
12480 * Try to get a LEADER_LOCK HW lock as
12481 * long as a former leader may have
12482 * been unloaded by the user or
12483 * released a leadership by another
12484 * reason.
12485 */
12486 if (bxe_trylock_leader_lock(sc)) {
12487 /*
12488 * I'm a leader now! Restart a
12489 * switch case.
12490 */
12491 sc->is_leader = 1;
12492 break;
12493 }
12494
12495 taskqueue_enqueue_timeout(taskqueue_thread,
12496 &sc->sp_err_timeout_task, hz/10);
12497 return;
12498
12499 } else {
12500 /*
12501 * If there was a global attention, wait
12502 * for it to be cleared.
12503 */
12504 if (bxe_reset_is_global(sc)) {
12505 taskqueue_enqueue_timeout(taskqueue_thread,
12506 &sc->sp_err_timeout_task, hz/10);
12507 return;
12508 }
12509
12510 error_recovered =
12511 sc->eth_stats.recoverable_error;
12512 error_unrecovered =
12513 sc->eth_stats.unrecoverable_error;
12514 BXE_CORE_LOCK(sc);
12515 sc->recovery_state =
12516 BXE_RECOVERY_NIC_LOADING;
12517 if (bxe_nic_load(sc, LOAD_NORMAL)) {
12518 error_unrecovered++;
12519 sc->recovery_state = BXE_RECOVERY_FAILED;
12520 sc->state = BXE_STATE_ERROR;
12521 BLOGE(sc, "Recovery is NOT successfull, "
12522 " state=0x%x recovery_state=0x%x error=%x\n",
12523 sc->state, sc->recovery_state, sc->error_status);
12524 sc->error_status = 0;
12525 } else {
12526 sc->recovery_state =
12527 BXE_RECOVERY_DONE;
12528 error_recovered++;
12529 BLOGI(sc, "Recovery is successfull from errors %x,"
12530 " state=0x%x"
12531 " recovery_state=0x%x \n", sc->error_status,
12532 sc->state, sc->recovery_state);
12533 mb();
12534 }
12535 sc->error_status = 0;
12536 BXE_CORE_UNLOCK(sc);
12537 sc->eth_stats.recoverable_error =
12538 error_recovered;
12539 sc->eth_stats.unrecoverable_error =
12540 error_unrecovered;
12541
12542 return;
12543 }
12544 }
12545 default:
12546 return;
12547 }
12548 }
12549 }
12550 void
12551 bxe_handle_error(struct bxe_softc * sc)
12552 {
12553
12554 if(sc->recovery_state == BXE_RECOVERY_WAIT) {
12555 return;
12556 }
12557 if(sc->error_status) {
12558 if (sc->state == BXE_STATE_OPEN) {
12559 bxe_int_disable(sc);
12560 }
12561 if (sc->link_vars.link_up) {
12562 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
12563 }
12564 sc->recovery_state = BXE_RECOVERY_INIT;
12565 BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n",
12566 sc->unit, sc->error_status, sc->recovery_state);
12567 bxe_parity_recover(sc);
12568 }
12569 }
12570
12571 static void
12572 bxe_sp_err_timeout_task(void *arg, int pending)
12573 {
12574
12575 struct bxe_softc *sc = (struct bxe_softc *)arg;
12576
12577 BLOGD(sc, DBG_SP,
12578 "%s state = 0x%x rec state=0x%x error_status=%x\n",
12579 __func__, sc->state, sc->recovery_state, sc->error_status);
12580
12581 if((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12582 (sc->state == BXE_STATE_ERROR)) {
12583 return;
12584 }
12585 /* if can be taken */
12586 if ((sc->error_status) && (sc->trigger_grcdump)) {
12587 bxe_grc_dump(sc);
12588 }
12589 if (sc->recovery_state != BXE_RECOVERY_DONE) {
12590 bxe_handle_error(sc);
12591 bxe_parity_recover(sc);
12592 } else if (sc->error_status) {
12593 bxe_handle_error(sc);
12594 }
12595
12596 return;
12597 }
12598
12599 /* start the controller */
12600 static __noinline int
12601 bxe_nic_load(struct bxe_softc *sc,
12602 int load_mode)
12603 {
12604 uint32_t val;
12605 int load_code = 0;
12606 int i, rc = 0;
12607
12608 BXE_CORE_LOCK_ASSERT(sc);
12609
12610 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12611
12612 sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12613
12614 if (IS_PF(sc)) {
12615 /* must be called before memory allocation and HW init */
12616 bxe_ilt_set_info(sc);
12617 }
12618
12619 sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12620
12621 bxe_set_fp_rx_buf_size(sc);
12622
12623 if (bxe_alloc_fp_buffers(sc) != 0) {
12624 BLOGE(sc, "Failed to allocate fastpath memory\n");
12625 sc->state = BXE_STATE_CLOSED;
12626 rc = ENOMEM;
12627 goto bxe_nic_load_error0;
12628 }
12629
12630 if (bxe_alloc_mem(sc) != 0) {
12631 sc->state = BXE_STATE_CLOSED;
12632 rc = ENOMEM;
12633 goto bxe_nic_load_error0;
12634 }
12635
12636 if (bxe_alloc_fw_stats_mem(sc) != 0) {
12637 sc->state = BXE_STATE_CLOSED;
12638 rc = ENOMEM;
12639 goto bxe_nic_load_error0;
12640 }
12641
12642 if (IS_PF(sc)) {
12643 /* set pf load just before approaching the MCP */
12644 bxe_set_pf_load(sc);
12645
12646 /* if MCP exists send load request and analyze response */
12647 if (!BXE_NOMCP(sc)) {
12648 /* attempt to load pf */
12649 if (bxe_nic_load_request(sc, &load_code) != 0) {
12650 sc->state = BXE_STATE_CLOSED;
12651 rc = ENXIO;
12652 goto bxe_nic_load_error1;
12653 }
12654
12655 /* what did the MCP say? */
12656 if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12657 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12658 sc->state = BXE_STATE_CLOSED;
12659 rc = ENXIO;
12660 goto bxe_nic_load_error2;
12661 }
12662 } else {
12663 BLOGI(sc, "Device has no MCP!\n");
12664 load_code = bxe_nic_load_no_mcp(sc);
12665 }
12666
12667 /* mark PMF if applicable */
12668 bxe_nic_load_pmf(sc, load_code);
12669
12670 /* Init Function state controlling object */
12671 bxe_init_func_obj(sc);
12672
12673 /* Initialize HW */
12674 if (bxe_init_hw(sc, load_code) != 0) {
12675 BLOGE(sc, "HW init failed\n");
12676 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12677 sc->state = BXE_STATE_CLOSED;
12678 rc = ENXIO;
12679 goto bxe_nic_load_error2;
12680 }
12681 }
12682
12683 /* set ALWAYS_ALIVE bit in shmem */
12684 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12685 bxe_drv_pulse(sc);
12686 sc->flags |= BXE_NO_PULSE;
12687
12688 /* attach interrupts */
12689 if (bxe_interrupt_attach(sc) != 0) {
12690 sc->state = BXE_STATE_CLOSED;
12691 rc = ENXIO;
12692 goto bxe_nic_load_error2;
12693 }
12694
12695 bxe_nic_init(sc, load_code);
12696
12697 /* Init per-function objects */
12698 if (IS_PF(sc)) {
12699 bxe_init_objs(sc);
12700 // XXX bxe_iov_nic_init(sc);
12701
12702 /* set AFEX default VLAN tag to an invalid value */
12703 sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12704 // XXX bxe_nic_load_afex_dcc(sc, load_code);
12705
12706 sc->state = BXE_STATE_OPENING_WAITING_PORT;
12707 rc = bxe_func_start(sc);
12708 if (rc) {
12709 BLOGE(sc, "Function start failed! rc = %d\n", rc);
12710 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12711 sc->state = BXE_STATE_ERROR;
12712 goto bxe_nic_load_error3;
12713 }
12714
12715 /* send LOAD_DONE command to MCP */
12716 if (!BXE_NOMCP(sc)) {
12717 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12718 if (!load_code) {
12719 BLOGE(sc, "MCP response failure, aborting\n");
12720 sc->state = BXE_STATE_ERROR;
12721 rc = ENXIO;
12722 goto bxe_nic_load_error3;
12723 }
12724 }
12725
12726 rc = bxe_setup_leading(sc);
12727 if (rc) {
12728 BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12729 sc->state = BXE_STATE_ERROR;
12730 goto bxe_nic_load_error3;
12731 }
12732
12733 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12734 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12735 if (rc) {
12736 BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12737 sc->state = BXE_STATE_ERROR;
12738 goto bxe_nic_load_error3;
12739 }
12740 }
12741
12742 rc = bxe_init_rss_pf(sc);
12743 if (rc) {
12744 BLOGE(sc, "PF RSS init failed\n");
12745 sc->state = BXE_STATE_ERROR;
12746 goto bxe_nic_load_error3;
12747 }
12748 }
12749 /* XXX VF */
12750
12751 /* now when Clients are configured we are ready to work */
12752 sc->state = BXE_STATE_OPEN;
12753
12754 /* Configure a ucast MAC */
12755 if (IS_PF(sc)) {
12756 rc = bxe_set_eth_mac(sc, TRUE);
12757 }
12758 if (rc) {
12759 BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12760 sc->state = BXE_STATE_ERROR;
12761 goto bxe_nic_load_error3;
12762 }
12763
12764 if (sc->port.pmf) {
12765 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12766 if (rc) {
12767 sc->state = BXE_STATE_ERROR;
12768 goto bxe_nic_load_error3;
12769 }
12770 }
12771
12772 sc->link_params.feature_config_flags &=
12773 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12774
12775 /* start fast path */
12776
12777 /* Initialize Rx filter */
12778 bxe_set_rx_mode(sc);
12779
12780 /* start the Tx */
12781 switch (/* XXX load_mode */LOAD_OPEN) {
12782 case LOAD_NORMAL:
12783 case LOAD_OPEN:
12784 break;
12785
12786 case LOAD_DIAG:
12787 case LOAD_LOOPBACK_EXT:
12788 sc->state = BXE_STATE_DIAG;
12789 break;
12790
12791 default:
12792 break;
12793 }
12794
12795 if (sc->port.pmf) {
12796 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12797 } else {
12798 bxe_link_status_update(sc);
12799 }
12800
12801 /* start the periodic timer callout */
12802 bxe_periodic_start(sc);
12803
12804 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12805 /* mark driver is loaded in shmem2 */
12806 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12807 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12808 (val |
12809 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12810 DRV_FLAGS_CAPABILITIES_LOADED_L2));
12811 }
12812
12813 /* wait for all pending SP commands to complete */
12814 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12815 BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12816 bxe_periodic_stop(sc);
12817 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12818 return (ENXIO);
12819 }
12820
12821 /* Tell the stack the driver is running! */
12822 if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12823
12824 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12825
12826 return (0);
12827
12828 bxe_nic_load_error3:
12829
12830 if (IS_PF(sc)) {
12831 bxe_int_disable_sync(sc, 1);
12832
12833 /* clean out queued objects */
12834 bxe_squeeze_objects(sc);
12835 }
12836
12837 bxe_interrupt_detach(sc);
12838
12839 bxe_nic_load_error2:
12840
12841 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12842 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12843 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12844 }
12845
12846 sc->port.pmf = 0;
12847
12848 bxe_nic_load_error1:
12849
12850 /* clear pf_load status, as it was already set */
12851 if (IS_PF(sc)) {
12852 bxe_clear_pf_load(sc);
12853 }
12854
12855 bxe_nic_load_error0:
12856
12857 bxe_free_fw_stats_mem(sc);
12858 bxe_free_fp_buffers(sc);
12859 bxe_free_mem(sc);
12860
12861 return (rc);
12862 }
12863
12864 static int
12865 bxe_init_locked(struct bxe_softc *sc)
12866 {
12867 int other_engine = SC_PATH(sc) ? 0 : 1;
12868 uint8_t other_load_status, load_status;
12869 uint8_t global = FALSE;
12870 int rc;
12871
12872 BXE_CORE_LOCK_ASSERT(sc);
12873
12874 /* check if the driver is already running */
12875 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12876 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12877 return (0);
12878 }
12879
12880 if((sc->state == BXE_STATE_ERROR) &&
12881 (sc->recovery_state == BXE_RECOVERY_FAILED)) {
12882 BLOGE(sc, "Initialization not done, "
12883 "as previous recovery failed."
12884 "Reboot/Power-cycle the system\n" );
12885 return (ENXIO);
12886 }
12887
12888
12889 bxe_set_power_state(sc, PCI_PM_D0);
12890
12891 /*
12892 * If parity occurred during the unload, then attentions and/or
12893 * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12894 * loaded on the current engine to complete the recovery. Parity recovery
12895 * is only relevant for PF driver.
12896 */
12897 if (IS_PF(sc)) {
12898 other_load_status = bxe_get_load_status(sc, other_engine);
12899 load_status = bxe_get_load_status(sc, SC_PATH(sc));
12900
12901 if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12902 bxe_chk_parity_attn(sc, &global, TRUE)) {
12903 do {
12904 /*
12905 * If there are attentions and they are in global blocks, set
12906 * the GLOBAL_RESET bit regardless whether it will be this
12907 * function that will complete the recovery or not.
12908 */
12909 if (global) {
12910 bxe_set_reset_global(sc);
12911 }
12912
12913 /*
12914 * Only the first function on the current engine should try
12915 * to recover in open. In case of attentions in global blocks
12916 * only the first in the chip should try to recover.
12917 */
12918 if ((!load_status && (!global || !other_load_status)) &&
12919 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12920 BLOGI(sc, "Recovered during init\n");
12921 break;
12922 }
12923
12924 /* recovery has failed... */
12925 bxe_set_power_state(sc, PCI_PM_D3hot);
12926 sc->recovery_state = BXE_RECOVERY_FAILED;
12927
12928 BLOGE(sc, "Recovery flow hasn't properly "
12929 "completed yet, try again later. "
12930 "If you still see this message after a "
12931 "few retries then power cycle is required.\n");
12932
12933 rc = ENXIO;
12934 goto bxe_init_locked_done;
12935 } while (0);
12936 }
12937 }
12938
12939 sc->recovery_state = BXE_RECOVERY_DONE;
12940
12941 rc = bxe_nic_load(sc, LOAD_OPEN);
12942
12943 bxe_init_locked_done:
12944
12945 if (rc) {
12946 /* Tell the stack the driver is NOT running! */
12947 BLOGE(sc, "Initialization failed, "
12948 "stack notified driver is NOT running!\n");
12949 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12950 }
12951
12952 return (rc);
12953 }
12954
12955 static int
12956 bxe_stop_locked(struct bxe_softc *sc)
12957 {
12958 BXE_CORE_LOCK_ASSERT(sc);
12959 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12960 }
12961
12962 /*
12963 * Handles controller initialization when called from an unlocked routine.
12964 * ifconfig calls this function.
12965 *
12966 * Returns:
12967 * void
12968 */
12969 static void
12970 bxe_init(void *xsc)
12971 {
12972 struct bxe_softc *sc = (struct bxe_softc *)xsc;
12973
12974 BXE_CORE_LOCK(sc);
12975 bxe_init_locked(sc);
12976 BXE_CORE_UNLOCK(sc);
12977 }
12978
12979 static int
12980 bxe_init_ifnet(struct bxe_softc *sc)
12981 {
12982 if_t ifp;
12983 int capabilities;
12984
12985 /* ifconfig entrypoint for media type/status reporting */
12986 ifmedia_init(&sc->ifmedia, IFM_IMASK,
12987 bxe_ifmedia_update,
12988 bxe_ifmedia_status);
12989
12990 /* set the default interface values */
12991 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12992 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12993 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12994
12995 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12996 BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
12997
12998 /* allocate the ifnet structure */
12999 if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
13000 BLOGE(sc, "Interface allocation failed!\n");
13001 return (ENXIO);
13002 }
13003
13004 if_setsoftc(ifp, sc);
13005 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
13006 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
13007 if_setioctlfn(ifp, bxe_ioctl);
13008 if_setstartfn(ifp, bxe_tx_start);
13009 if_setgetcounterfn(ifp, bxe_get_counter);
13010 if_settransmitfn(ifp, bxe_tx_mq_start);
13011 if_setqflushfn(ifp, bxe_mq_flush);
13012 if_setinitfn(ifp, bxe_init);
13013 if_setmtu(ifp, sc->mtu);
13014 if_sethwassist(ifp, (CSUM_IP |
13015 CSUM_TCP |
13016 CSUM_UDP |
13017 CSUM_TSO |
13018 CSUM_TCP_IPV6 |
13019 CSUM_UDP_IPV6));
13020
13021 capabilities =
13022 (IFCAP_VLAN_MTU |
13023 IFCAP_VLAN_HWTAGGING |
13024 IFCAP_VLAN_HWTSO |
13025 IFCAP_VLAN_HWFILTER |
13026 IFCAP_VLAN_HWCSUM |
13027 IFCAP_HWCSUM |
13028 IFCAP_JUMBO_MTU |
13029 IFCAP_LRO |
13030 IFCAP_TSO4 |
13031 IFCAP_TSO6 |
13032 IFCAP_WOL_MAGIC);
13033 if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
13034 if_setcapenable(ifp, if_getcapabilities(ifp));
13035 if_setbaudrate(ifp, IF_Gbps(10));
13036 /* XXX */
13037 if_setsendqlen(ifp, sc->tx_ring_size);
13038 if_setsendqready(ifp);
13039 /* XXX */
13040
13041 sc->ifp = ifp;
13042
13043 /* attach to the Ethernet interface list */
13044 ether_ifattach(ifp, sc->link_params.mac_addr);
13045
13046 /* Attach driver debugnet methods. */
13047 DEBUGNET_SET(ifp, bxe);
13048
13049 return (0);
13050 }
13051
13052 static void
13053 bxe_deallocate_bars(struct bxe_softc *sc)
13054 {
13055 int i;
13056
13057 for (i = 0; i < MAX_BARS; i++) {
13058 if (sc->bar[i].resource != NULL) {
13059 bus_release_resource(sc->dev,
13060 SYS_RES_MEMORY,
13061 sc->bar[i].rid,
13062 sc->bar[i].resource);
13063 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
13064 i, PCIR_BAR(i));
13065 }
13066 }
13067 }
13068
13069 static int
13070 bxe_allocate_bars(struct bxe_softc *sc)
13071 {
13072 u_int flags;
13073 int i;
13074
13075 memset(sc->bar, 0, sizeof(sc->bar));
13076
13077 for (i = 0; i < MAX_BARS; i++) {
13078
13079 /* memory resources reside at BARs 0, 2, 4 */
13080 /* Run `pciconf -lb` to see mappings */
13081 if ((i != 0) && (i != 2) && (i != 4)) {
13082 continue;
13083 }
13084
13085 sc->bar[i].rid = PCIR_BAR(i);
13086
13087 flags = RF_ACTIVE;
13088 if (i == 0) {
13089 flags |= RF_SHAREABLE;
13090 }
13091
13092 if ((sc->bar[i].resource =
13093 bus_alloc_resource_any(sc->dev,
13094 SYS_RES_MEMORY,
13095 &sc->bar[i].rid,
13096 flags)) == NULL) {
13097 return (0);
13098 }
13099
13100 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource);
13101 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13102 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13103
13104 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n",
13105 i, PCIR_BAR(i),
13106 rman_get_start(sc->bar[i].resource),
13107 rman_get_end(sc->bar[i].resource),
13108 rman_get_size(sc->bar[i].resource),
13109 (uintmax_t)sc->bar[i].kva);
13110 }
13111
13112 return (0);
13113 }
13114
13115 static void
13116 bxe_get_function_num(struct bxe_softc *sc)
13117 {
13118 uint32_t val = 0;
13119
13120 /*
13121 * Read the ME register to get the function number. The ME register
13122 * holds the relative-function number and absolute-function number. The
13123 * absolute-function number appears only in E2 and above. Before that
13124 * these bits always contained zero, therefore we cannot blindly use them.
13125 */
13126
13127 val = REG_RD(sc, BAR_ME_REGISTER);
13128
13129 sc->pfunc_rel =
13130 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
13131 sc->path_id =
13132 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
13133
13134 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13135 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13136 } else {
13137 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13138 }
13139
13140 BLOGD(sc, DBG_LOAD,
13141 "Relative function %d, Absolute function %d, Path %d\n",
13142 sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13143 }
13144
13145 static uint32_t
13146 bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
13147 {
13148 uint32_t shmem2_size;
13149 uint32_t offset;
13150 uint32_t mf_cfg_offset_value;
13151
13152 /* Non 57712 */
13153 offset = (SHMEM_RD(sc, func_mb) +
13154 (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
13155
13156 /* 57712 plus */
13157 if (sc->devinfo.shmem2_base != 0) {
13158 shmem2_size = SHMEM2_RD(sc, size);
13159 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
13160 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
13161 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
13162 offset = mf_cfg_offset_value;
13163 }
13164 }
13165 }
13166
13167 return (offset);
13168 }
13169
13170 static uint32_t
13171 bxe_pcie_capability_read(struct bxe_softc *sc,
13172 int reg,
13173 int width)
13174 {
13175 int pcie_reg;
13176
13177 /* ensure PCIe capability is enabled */
13178 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13179 if (pcie_reg != 0) {
13180 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
13181 return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13182 }
13183 }
13184
13185 BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
13186
13187 return (0);
13188 }
13189
13190 static uint8_t
13191 bxe_is_pcie_pending(struct bxe_softc *sc)
13192 {
13193 return (bxe_pcie_capability_read(sc, PCIER_DEVICE_STA, 2) &
13194 PCIEM_STA_TRANSACTION_PND);
13195 }
13196
13197 /*
13198 * Walk the PCI capabiites list for the device to find what features are
13199 * supported. These capabilites may be enabled/disabled by firmware so it's
13200 * best to walk the list rather than make assumptions.
13201 */
13202 static void
13203 bxe_probe_pci_caps(struct bxe_softc *sc)
13204 {
13205 uint16_t link_status;
13206 int reg;
13207
13208 /* check if PCI Power Management is enabled */
13209 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) {
13210 if (reg != 0) {
13211 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13212
13213 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13214 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13215 }
13216 }
13217
13218 link_status = bxe_pcie_capability_read(sc, PCIER_LINK_STA, 2);
13219
13220 /* handle PCIe 2.0 workarounds for 57710 */
13221 if (CHIP_IS_E1(sc)) {
13222 /* workaround for 57710 errata E4_57710_27462 */
13223 sc->devinfo.pcie_link_speed =
13224 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13225
13226 /* workaround for 57710 errata E4_57710_27488 */
13227 sc->devinfo.pcie_link_width =
13228 ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13229 if (sc->devinfo.pcie_link_speed > 1) {
13230 sc->devinfo.pcie_link_width =
13231 ((link_status & PCIEM_LINK_STA_WIDTH) >> 4) >> 1;
13232 }
13233 } else {
13234 sc->devinfo.pcie_link_speed =
13235 (link_status & PCIEM_LINK_STA_SPEED);
13236 sc->devinfo.pcie_link_width =
13237 ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13238 }
13239
13240 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13241 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13242
13243 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13244 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13245
13246 /* check if MSI capability is enabled */
13247 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) {
13248 if (reg != 0) {
13249 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13250
13251 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13252 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13253 }
13254 }
13255
13256 /* check if MSI-X capability is enabled */
13257 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) {
13258 if (reg != 0) {
13259 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13260
13261 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13262 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13263 }
13264 }
13265 }
13266
13267 static int
13268 bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13269 {
13270 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13271 uint32_t val;
13272
13273 /* get the outer vlan if we're in switch-dependent mode */
13274
13275 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13276 mf_info->ext_id = (uint16_t)val;
13277
13278 mf_info->multi_vnics_mode = 1;
13279
13280 if (!VALID_OVLAN(mf_info->ext_id)) {
13281 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13282 return (1);
13283 }
13284
13285 /* get the capabilities */
13286 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13287 FUNC_MF_CFG_PROTOCOL_ISCSI) {
13288 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13289 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13290 FUNC_MF_CFG_PROTOCOL_FCOE) {
13291 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13292 } else {
13293 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13294 }
13295
13296 mf_info->vnics_per_port =
13297 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13298
13299 return (0);
13300 }
13301
13302 static uint32_t
13303 bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13304 {
13305 uint32_t retval = 0;
13306 uint32_t val;
13307
13308 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13309
13310 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13311 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13312 retval |= MF_PROTO_SUPPORT_ETHERNET;
13313 }
13314 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13315 retval |= MF_PROTO_SUPPORT_ISCSI;
13316 }
13317 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13318 retval |= MF_PROTO_SUPPORT_FCOE;
13319 }
13320 }
13321
13322 return (retval);
13323 }
13324
13325 static int
13326 bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13327 {
13328 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13329 uint32_t val;
13330
13331 /*
13332 * There is no outer vlan if we're in switch-independent mode.
13333 * If the mac is valid then assume multi-function.
13334 */
13335
13336 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13337
13338 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13339
13340 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13341
13342 mf_info->vnics_per_port =
13343 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13344
13345 return (0);
13346 }
13347
13348 static int
13349 bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13350 {
13351 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13352 uint32_t e1hov_tag;
13353 uint32_t func_config;
13354 uint32_t niv_config;
13355
13356 mf_info->multi_vnics_mode = 1;
13357
13358 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13359 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13360 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13361
13362 mf_info->ext_id =
13363 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13364 FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13365
13366 mf_info->default_vlan =
13367 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13368 FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13369
13370 mf_info->niv_allowed_priorities =
13371 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13372 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13373
13374 mf_info->niv_default_cos =
13375 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13376 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13377
13378 mf_info->afex_vlan_mode =
13379 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13380 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13381
13382 mf_info->niv_mba_enabled =
13383 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13384 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13385
13386 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13387
13388 mf_info->vnics_per_port =
13389 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13390
13391 return (0);
13392 }
13393
13394 static int
13395 bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13396 {
13397 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13398 uint32_t mf_cfg1;
13399 uint32_t mf_cfg2;
13400 uint32_t ovlan1;
13401 uint32_t ovlan2;
13402 uint8_t i, j;
13403
13404 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13405 SC_PORT(sc));
13406 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13407 mf_info->mf_config[SC_VN(sc)]);
13408 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13409 mf_info->multi_vnics_mode);
13410 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13411 mf_info->vnics_per_port);
13412 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13413 mf_info->ext_id);
13414 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13415 mf_info->min_bw[0], mf_info->min_bw[1],
13416 mf_info->min_bw[2], mf_info->min_bw[3]);
13417 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13418 mf_info->max_bw[0], mf_info->max_bw[1],
13419 mf_info->max_bw[2], mf_info->max_bw[3]);
13420 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13421 sc->mac_addr_str);
13422
13423 /* various MF mode sanity checks... */
13424
13425 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13426 BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13427 SC_PORT(sc));
13428 return (1);
13429 }
13430
13431 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13432 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13433 mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13434 return (1);
13435 }
13436
13437 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13438 /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13439 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13440 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13441 SC_VN(sc), OVLAN(sc));
13442 return (1);
13443 }
13444
13445 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13446 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13447 mf_info->multi_vnics_mode, OVLAN(sc));
13448 return (1);
13449 }
13450
13451 /*
13452 * Verify all functions are either MF or SF mode. If MF, make sure
13453 * sure that all non-hidden functions have a valid ovlan. If SF,
13454 * make sure that all non-hidden functions have an invalid ovlan.
13455 */
13456 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13457 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13458 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13459 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13460 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13461 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13462 BLOGE(sc, "mf_mode=SD function %d MF config "
13463 "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13464 i, mf_info->multi_vnics_mode, ovlan1);
13465 return (1);
13466 }
13467 }
13468
13469 /* Verify all funcs on the same port each have a different ovlan. */
13470 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13471 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13472 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13473 /* iterate from the next function on the port to the max func */
13474 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13475 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13476 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13477 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13478 VALID_OVLAN(ovlan1) &&
13479 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13480 VALID_OVLAN(ovlan2) &&
13481 (ovlan1 == ovlan2)) {
13482 BLOGE(sc, "mf_mode=SD functions %d and %d "
13483 "have the same ovlan (%d)\n",
13484 i, j, ovlan1);
13485 return (1);
13486 }
13487 }
13488 }
13489 } /* MULTI_FUNCTION_SD */
13490
13491 return (0);
13492 }
13493
13494 static int
13495 bxe_get_mf_cfg_info(struct bxe_softc *sc)
13496 {
13497 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13498 uint32_t val, mac_upper;
13499 uint8_t i, vnic;
13500
13501 /* initialize mf_info defaults */
13502 mf_info->vnics_per_port = 1;
13503 mf_info->multi_vnics_mode = FALSE;
13504 mf_info->path_has_ovlan = FALSE;
13505 mf_info->mf_mode = SINGLE_FUNCTION;
13506
13507 if (!CHIP_IS_MF_CAP(sc)) {
13508 return (0);
13509 }
13510
13511 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13512 BLOGE(sc, "Invalid mf_cfg_base!\n");
13513 return (1);
13514 }
13515
13516 /* get the MF mode (switch dependent / independent / single-function) */
13517
13518 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13519
13520 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13521 {
13522 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13523
13524 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13525
13526 /* check for legal upper mac bytes */
13527 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13528 mf_info->mf_mode = MULTI_FUNCTION_SI;
13529 } else {
13530 BLOGE(sc, "Invalid config for Switch Independent mode\n");
13531 }
13532
13533 break;
13534
13535 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13536 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13537
13538 /* get outer vlan configuration */
13539 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13540
13541 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13542 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13543 mf_info->mf_mode = MULTI_FUNCTION_SD;
13544 } else {
13545 BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13546 }
13547
13548 break;
13549
13550 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13551
13552 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13553 return (0);
13554
13555 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13556
13557 /*
13558 * Mark MF mode as NIV if MCP version includes NPAR-SD support
13559 * and the MAC address is valid.
13560 */
13561 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13562
13563 if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13564 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13565 mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13566 } else {
13567 BLOGE(sc, "Invalid config for AFEX mode\n");
13568 }
13569
13570 break;
13571
13572 default:
13573
13574 BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13575 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13576
13577 return (1);
13578 }
13579
13580 /* set path mf_mode (which could be different than function mf_mode) */
13581 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13582 mf_info->path_has_ovlan = TRUE;
13583 } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13584 /*
13585 * Decide on path multi vnics mode. If we're not in MF mode and in
13586 * 4-port mode, this is good enough to check vnic-0 of the other port
13587 * on the same path
13588 */
13589 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13590 uint8_t other_port = !(PORT_ID(sc) & 1);
13591 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13592
13593 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13594
13595 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13596 }
13597 }
13598
13599 if (mf_info->mf_mode == SINGLE_FUNCTION) {
13600 /* invalid MF config */
13601 if (SC_VN(sc) >= 1) {
13602 BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13603 return (1);
13604 }
13605
13606 return (0);
13607 }
13608
13609 /* get the MF configuration */
13610 mf_info->mf_config[SC_VN(sc)] =
13611 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13612
13613 switch(mf_info->mf_mode)
13614 {
13615 case MULTI_FUNCTION_SD:
13616
13617 bxe_get_shmem_mf_cfg_info_sd(sc);
13618 break;
13619
13620 case MULTI_FUNCTION_SI:
13621
13622 bxe_get_shmem_mf_cfg_info_si(sc);
13623 break;
13624
13625 case MULTI_FUNCTION_AFEX:
13626
13627 bxe_get_shmem_mf_cfg_info_niv(sc);
13628 break;
13629
13630 default:
13631
13632 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13633 mf_info->mf_mode);
13634 return (1);
13635 }
13636
13637 /* get the congestion management parameters */
13638
13639 vnic = 0;
13640 FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13641 /* get min/max bw */
13642 val = MFCFG_RD(sc, func_mf_config[i].config);
13643 mf_info->min_bw[vnic] =
13644 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13645 mf_info->max_bw[vnic] =
13646 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13647 vnic++;
13648 }
13649
13650 return (bxe_check_valid_mf_cfg(sc));
13651 }
13652
13653 static int
13654 bxe_get_shmem_info(struct bxe_softc *sc)
13655 {
13656 int port;
13657 uint32_t mac_hi, mac_lo, val;
13658
13659 port = SC_PORT(sc);
13660 mac_hi = mac_lo = 0;
13661
13662 sc->link_params.sc = sc;
13663 sc->link_params.port = port;
13664
13665 /* get the hardware config info */
13666 sc->devinfo.hw_config =
13667 SHMEM_RD(sc, dev_info.shared_hw_config.config);
13668 sc->devinfo.hw_config2 =
13669 SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13670
13671 sc->link_params.hw_led_mode =
13672 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13673 SHARED_HW_CFG_LED_MODE_SHIFT);
13674
13675 /* get the port feature config */
13676 sc->port.config =
13677 SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13678
13679 /* get the link params */
13680 sc->link_params.speed_cap_mask[0] =
13681 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13682 sc->link_params.speed_cap_mask[1] =
13683 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13684
13685 /* get the lane config */
13686 sc->link_params.lane_config =
13687 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13688
13689 /* get the link config */
13690 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13691 sc->port.link_config[ELINK_INT_PHY] = val;
13692 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13693 sc->port.link_config[ELINK_EXT_PHY1] =
13694 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13695
13696 /* get the override preemphasis flag and enable it or turn it off */
13697 val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13698 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13699 sc->link_params.feature_config_flags |=
13700 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13701 } else {
13702 sc->link_params.feature_config_flags &=
13703 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13704 }
13705
13706 /* get the initial value of the link params */
13707 sc->link_params.multi_phy_config =
13708 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13709
13710 /* get external phy info */
13711 sc->port.ext_phy_config =
13712 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13713
13714 /* get the multifunction configuration */
13715 bxe_get_mf_cfg_info(sc);
13716
13717 /* get the mac address */
13718 if (IS_MF(sc)) {
13719 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13720 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13721 } else {
13722 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13723 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13724 }
13725
13726 if ((mac_lo == 0) && (mac_hi == 0)) {
13727 *sc->mac_addr_str = 0;
13728 BLOGE(sc, "No Ethernet address programmed!\n");
13729 } else {
13730 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13731 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13732 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13733 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13734 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13735 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13736 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13737 "%02x:%02x:%02x:%02x:%02x:%02x",
13738 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13739 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13740 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13741 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13742 }
13743
13744 return (0);
13745 }
13746
13747 static void
13748 bxe_get_tunable_params(struct bxe_softc *sc)
13749 {
13750 /* sanity checks */
13751
13752 if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13753 (bxe_interrupt_mode != INTR_MODE_MSI) &&
13754 (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13755 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13756 bxe_interrupt_mode = INTR_MODE_MSIX;
13757 }
13758
13759 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13760 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13761 bxe_queue_count = 0;
13762 }
13763
13764 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13765 if (bxe_max_rx_bufs == 0) {
13766 bxe_max_rx_bufs = RX_BD_USABLE;
13767 } else {
13768 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13769 bxe_max_rx_bufs = 2048;
13770 }
13771 }
13772
13773 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13774 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13775 bxe_hc_rx_ticks = 25;
13776 }
13777
13778 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13779 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13780 bxe_hc_tx_ticks = 50;
13781 }
13782
13783 if (bxe_max_aggregation_size == 0) {
13784 bxe_max_aggregation_size = TPA_AGG_SIZE;
13785 }
13786
13787 if (bxe_max_aggregation_size > 0xffff) {
13788 BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13789 bxe_max_aggregation_size);
13790 bxe_max_aggregation_size = TPA_AGG_SIZE;
13791 }
13792
13793 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13794 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13795 bxe_mrrs = -1;
13796 }
13797
13798 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13799 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13800 bxe_autogreeen = 0;
13801 }
13802
13803 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13804 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13805 bxe_udp_rss = 0;
13806 }
13807
13808 /* pull in user settings */
13809
13810 sc->interrupt_mode = bxe_interrupt_mode;
13811 sc->max_rx_bufs = bxe_max_rx_bufs;
13812 sc->hc_rx_ticks = bxe_hc_rx_ticks;
13813 sc->hc_tx_ticks = bxe_hc_tx_ticks;
13814 sc->max_aggregation_size = bxe_max_aggregation_size;
13815 sc->mrrs = bxe_mrrs;
13816 sc->autogreeen = bxe_autogreeen;
13817 sc->udp_rss = bxe_udp_rss;
13818
13819 if (bxe_interrupt_mode == INTR_MODE_INTX) {
13820 sc->num_queues = 1;
13821 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13822 sc->num_queues =
13823 min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13824 MAX_RSS_CHAINS);
13825 if (sc->num_queues > mp_ncpus) {
13826 sc->num_queues = mp_ncpus;
13827 }
13828 }
13829
13830 BLOGD(sc, DBG_LOAD,
13831 "User Config: "
13832 "debug=0x%lx "
13833 "interrupt_mode=%d "
13834 "queue_count=%d "
13835 "hc_rx_ticks=%d "
13836 "hc_tx_ticks=%d "
13837 "rx_budget=%d "
13838 "max_aggregation_size=%d "
13839 "mrrs=%d "
13840 "autogreeen=%d "
13841 "udp_rss=%d\n",
13842 bxe_debug,
13843 sc->interrupt_mode,
13844 sc->num_queues,
13845 sc->hc_rx_ticks,
13846 sc->hc_tx_ticks,
13847 bxe_rx_budget,
13848 sc->max_aggregation_size,
13849 sc->mrrs,
13850 sc->autogreeen,
13851 sc->udp_rss);
13852 }
13853
13854 static int
13855 bxe_media_detect(struct bxe_softc *sc)
13856 {
13857 int port_type;
13858 uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13859
13860 switch (sc->link_params.phy[phy_idx].media_type) {
13861 case ELINK_ETH_PHY_SFPP_10G_FIBER:
13862 case ELINK_ETH_PHY_XFP_FIBER:
13863 BLOGI(sc, "Found 10Gb Fiber media.\n");
13864 sc->media = IFM_10G_SR;
13865 port_type = PORT_FIBRE;
13866 break;
13867 case ELINK_ETH_PHY_SFP_1G_FIBER:
13868 BLOGI(sc, "Found 1Gb Fiber media.\n");
13869 sc->media = IFM_1000_SX;
13870 port_type = PORT_FIBRE;
13871 break;
13872 case ELINK_ETH_PHY_KR:
13873 case ELINK_ETH_PHY_CX4:
13874 BLOGI(sc, "Found 10GBase-CX4 media.\n");
13875 sc->media = IFM_10G_CX4;
13876 port_type = PORT_FIBRE;
13877 break;
13878 case ELINK_ETH_PHY_DA_TWINAX:
13879 BLOGI(sc, "Found 10Gb Twinax media.\n");
13880 sc->media = IFM_10G_TWINAX;
13881 port_type = PORT_DA;
13882 break;
13883 case ELINK_ETH_PHY_BASE_T:
13884 if (sc->link_params.speed_cap_mask[0] &
13885 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13886 BLOGI(sc, "Found 10GBase-T media.\n");
13887 sc->media = IFM_10G_T;
13888 port_type = PORT_TP;
13889 } else {
13890 BLOGI(sc, "Found 1000Base-T media.\n");
13891 sc->media = IFM_1000_T;
13892 port_type = PORT_TP;
13893 }
13894 break;
13895 case ELINK_ETH_PHY_NOT_PRESENT:
13896 BLOGI(sc, "Media not present.\n");
13897 sc->media = 0;
13898 port_type = PORT_OTHER;
13899 break;
13900 case ELINK_ETH_PHY_UNSPECIFIED:
13901 default:
13902 BLOGI(sc, "Unknown media!\n");
13903 sc->media = 0;
13904 port_type = PORT_OTHER;
13905 break;
13906 }
13907 return port_type;
13908 }
13909
13910 #define GET_FIELD(value, fname) \
13911 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13912 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13913 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13914
13915 static int
13916 bxe_get_igu_cam_info(struct bxe_softc *sc)
13917 {
13918 int pfid = SC_FUNC(sc);
13919 int igu_sb_id;
13920 uint32_t val;
13921 uint8_t fid, igu_sb_cnt = 0;
13922
13923 sc->igu_base_sb = 0xff;
13924
13925 if (CHIP_INT_MODE_IS_BC(sc)) {
13926 int vn = SC_VN(sc);
13927 igu_sb_cnt = sc->igu_sb_cnt;
13928 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13929 FP_SB_MAX_E1x);
13930 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13931 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13932 return (0);
13933 }
13934
13935 /* IGU in normal mode - read CAM */
13936 for (igu_sb_id = 0;
13937 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13938 igu_sb_id++) {
13939 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13940 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13941 continue;
13942 }
13943 fid = IGU_FID(val);
13944 if ((fid & IGU_FID_ENCODE_IS_PF)) {
13945 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13946 continue;
13947 }
13948 if (IGU_VEC(val) == 0) {
13949 /* default status block */
13950 sc->igu_dsb_id = igu_sb_id;
13951 } else {
13952 if (sc->igu_base_sb == 0xff) {
13953 sc->igu_base_sb = igu_sb_id;
13954 }
13955 igu_sb_cnt++;
13956 }
13957 }
13958 }
13959
13960 /*
13961 * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13962 * that number of CAM entries will not be equal to the value advertised in
13963 * PCI. Driver should use the minimal value of both as the actual status
13964 * block count
13965 */
13966 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13967
13968 if (igu_sb_cnt == 0) {
13969 BLOGE(sc, "CAM configuration error\n");
13970 return (-1);
13971 }
13972
13973 return (0);
13974 }
13975
13976 /*
13977 * Gather various information from the device config space, the device itself,
13978 * shmem, and the user input.
13979 */
13980 static int
13981 bxe_get_device_info(struct bxe_softc *sc)
13982 {
13983 uint32_t val;
13984 int rc;
13985
13986 /* Get the data for the device */
13987 sc->devinfo.vendor_id = pci_get_vendor(sc->dev);
13988 sc->devinfo.device_id = pci_get_device(sc->dev);
13989 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13990 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13991
13992 /* get the chip revision (chip metal comes from pci config space) */
13993 sc->devinfo.chip_id =
13994 sc->link_params.chip_id =
13995 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) |
13996 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) |
13997 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) |
13998 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0));
13999
14000 /* force 57811 according to MISC register */
14001 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
14002 if (CHIP_IS_57810(sc)) {
14003 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
14004 (sc->devinfo.chip_id & 0x0000ffff));
14005 } else if (CHIP_IS_57810_MF(sc)) {
14006 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
14007 (sc->devinfo.chip_id & 0x0000ffff));
14008 }
14009 sc->devinfo.chip_id |= 0x1;
14010 }
14011
14012 BLOGD(sc, DBG_LOAD,
14013 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
14014 sc->devinfo.chip_id,
14015 ((sc->devinfo.chip_id >> 16) & 0xffff),
14016 ((sc->devinfo.chip_id >> 12) & 0xf),
14017 ((sc->devinfo.chip_id >> 4) & 0xff),
14018 ((sc->devinfo.chip_id >> 0) & 0xf));
14019
14020 val = (REG_RD(sc, 0x2874) & 0x55);
14021 if ((sc->devinfo.chip_id & 0x1) ||
14022 (CHIP_IS_E1(sc) && val) ||
14023 (CHIP_IS_E1H(sc) && (val == 0x55))) {
14024 sc->flags |= BXE_ONE_PORT_FLAG;
14025 BLOGD(sc, DBG_LOAD, "single port device\n");
14026 }
14027
14028 /* set the doorbell size */
14029 sc->doorbell_size = (1 << BXE_DB_SHIFT);
14030
14031 /* determine whether the device is in 2 port or 4 port mode */
14032 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14033 if (CHIP_IS_E2E3(sc)) {
14034 /*
14035 * Read port4mode_en_ovwr[0]:
14036 * If 1, four port mode is in port4mode_en_ovwr[1].
14037 * If 0, four port mode is in port4mode_en[0].
14038 */
14039 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
14040 if (val & 1) {
14041 val = ((val >> 1) & 1);
14042 } else {
14043 val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
14044 }
14045
14046 sc->devinfo.chip_port_mode =
14047 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
14048
14049 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
14050 }
14051
14052 /* get the function and path info for the device */
14053 bxe_get_function_num(sc);
14054
14055 /* get the shared memory base address */
14056 sc->devinfo.shmem_base =
14057 sc->link_params.shmem_base =
14058 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
14059 sc->devinfo.shmem2_base =
14060 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
14061 MISC_REG_GENERIC_CR_0));
14062
14063 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
14064 sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14065
14066 if (!sc->devinfo.shmem_base) {
14067 /* this should ONLY prevent upcoming shmem reads */
14068 BLOGI(sc, "MCP not active\n");
14069 sc->flags |= BXE_NO_MCP_FLAG;
14070 return (0);
14071 }
14072
14073 /* make sure the shared memory contents are valid */
14074 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
14075 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
14076 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
14077 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
14078 return (0);
14079 }
14080 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
14081
14082 /* get the bootcode version */
14083 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14084 snprintf(sc->devinfo.bc_ver_str,
14085 sizeof(sc->devinfo.bc_ver_str),
14086 "%d.%d.%d",
14087 ((sc->devinfo.bc_ver >> 24) & 0xff),
14088 ((sc->devinfo.bc_ver >> 16) & 0xff),
14089 ((sc->devinfo.bc_ver >> 8) & 0xff));
14090 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14091
14092 /* get the bootcode shmem address */
14093 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14094 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14095
14096 /* clean indirect addresses as they're not used */
14097 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14098 if (IS_PF(sc)) {
14099 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
14100 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
14101 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
14102 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
14103 if (CHIP_IS_E1x(sc)) {
14104 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
14105 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
14106 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
14107 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
14108 }
14109
14110 /*
14111 * Enable internal target-read (in case we are probed after PF
14112 * FLR). Must be done prior to any BAR read access. Only for
14113 * 57712 and up
14114 */
14115 if (!CHIP_IS_E1x(sc)) {
14116 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
14117 }
14118 }
14119
14120 /* get the nvram size */
14121 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
14122 sc->devinfo.flash_size =
14123 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
14124 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14125
14126 /* get PCI capabilites */
14127 bxe_probe_pci_caps(sc);
14128
14129 bxe_set_power_state(sc, PCI_PM_D0);
14130
14131 /* get various configuration parameters from shmem */
14132 bxe_get_shmem_info(sc);
14133
14134 if (sc->devinfo.pcie_msix_cap_reg != 0) {
14135 val = pci_read_config(sc->dev,
14136 (sc->devinfo.pcie_msix_cap_reg +
14137 PCIR_MSIX_CTRL),
14138 2);
14139 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14140 } else {
14141 sc->igu_sb_cnt = 1;
14142 }
14143
14144 sc->igu_base_addr = BAR_IGU_INTMEM;
14145
14146 /* initialize IGU parameters */
14147 if (CHIP_IS_E1x(sc)) {
14148 sc->devinfo.int_block = INT_BLOCK_HC;
14149 sc->igu_dsb_id = DEF_SB_IGU_ID;
14150 sc->igu_base_sb = 0;
14151 } else {
14152 sc->devinfo.int_block = INT_BLOCK_IGU;
14153
14154 /* do not allow device reset during IGU info preocessing */
14155 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14156
14157 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
14158
14159 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14160 int tout = 5000;
14161
14162 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
14163
14164 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
14165 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
14166 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
14167
14168 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14169 tout--;
14170 DELAY(1000);
14171 }
14172
14173 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14174 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
14175 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14176 return (-1);
14177 }
14178 }
14179
14180 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14181 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
14182 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14183 } else {
14184 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
14185 }
14186
14187 rc = bxe_get_igu_cam_info(sc);
14188
14189 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14190
14191 if (rc) {
14192 return (rc);
14193 }
14194 }
14195
14196 /*
14197 * Get base FW non-default (fast path) status block ID. This value is
14198 * used to initialize the fw_sb_id saved on the fp/queue structure to
14199 * determine the id used by the FW.
14200 */
14201 if (CHIP_IS_E1x(sc)) {
14202 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14203 } else {
14204 /*
14205 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14206 * the same queue are indicated on the same IGU SB). So we prefer
14207 * FW and IGU SBs to be the same value.
14208 */
14209 sc->base_fw_ndsb = sc->igu_base_sb;
14210 }
14211
14212 BLOGD(sc, DBG_LOAD,
14213 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
14214 sc->igu_dsb_id, sc->igu_base_sb,
14215 sc->igu_sb_cnt, sc->base_fw_ndsb);
14216
14217 elink_phy_probe(&sc->link_params);
14218
14219 return (0);
14220 }
14221
14222 static void
14223 bxe_link_settings_supported(struct bxe_softc *sc,
14224 uint32_t switch_cfg)
14225 {
14226 uint32_t cfg_size = 0;
14227 uint32_t idx;
14228 uint8_t port = SC_PORT(sc);
14229
14230 /* aggregation of supported attributes of all external phys */
14231 sc->port.supported[0] = 0;
14232 sc->port.supported[1] = 0;
14233
14234 switch (sc->link_params.num_phys) {
14235 case 1:
14236 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14237 cfg_size = 1;
14238 break;
14239 case 2:
14240 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14241 cfg_size = 1;
14242 break;
14243 case 3:
14244 if (sc->link_params.multi_phy_config &
14245 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14246 sc->port.supported[1] =
14247 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14248 sc->port.supported[0] =
14249 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14250 } else {
14251 sc->port.supported[0] =
14252 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14253 sc->port.supported[1] =
14254 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14255 }
14256 cfg_size = 2;
14257 break;
14258 }
14259
14260 if (!(sc->port.supported[0] || sc->port.supported[1])) {
14261 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14262 SHMEM_RD(sc,
14263 dev_info.port_hw_config[port].external_phy_config),
14264 SHMEM_RD(sc,
14265 dev_info.port_hw_config[port].external_phy_config2));
14266 return;
14267 }
14268
14269 if (CHIP_IS_E3(sc))
14270 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14271 else {
14272 switch (switch_cfg) {
14273 case ELINK_SWITCH_CFG_1G:
14274 sc->port.phy_addr =
14275 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14276 break;
14277 case ELINK_SWITCH_CFG_10G:
14278 sc->port.phy_addr =
14279 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14280 break;
14281 default:
14282 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14283 sc->port.link_config[0]);
14284 return;
14285 }
14286 }
14287
14288 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14289
14290 /* mask what we support according to speed_cap_mask per configuration */
14291 for (idx = 0; idx < cfg_size; idx++) {
14292 if (!(sc->link_params.speed_cap_mask[idx] &
14293 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14294 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14295 }
14296
14297 if (!(sc->link_params.speed_cap_mask[idx] &
14298 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14299 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14300 }
14301
14302 if (!(sc->link_params.speed_cap_mask[idx] &
14303 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14304 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14305 }
14306
14307 if (!(sc->link_params.speed_cap_mask[idx] &
14308 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14309 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14310 }
14311
14312 if (!(sc->link_params.speed_cap_mask[idx] &
14313 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14314 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14315 }
14316
14317 if (!(sc->link_params.speed_cap_mask[idx] &
14318 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14319 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14320 }
14321
14322 if (!(sc->link_params.speed_cap_mask[idx] &
14323 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14324 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14325 }
14326
14327 if (!(sc->link_params.speed_cap_mask[idx] &
14328 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14329 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14330 }
14331 }
14332
14333 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14334 sc->port.supported[0], sc->port.supported[1]);
14335 ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14336 sc->port.supported[0], sc->port.supported[1]);
14337 }
14338
14339 static void
14340 bxe_link_settings_requested(struct bxe_softc *sc)
14341 {
14342 uint32_t link_config;
14343 uint32_t idx;
14344 uint32_t cfg_size = 0;
14345
14346 sc->port.advertising[0] = 0;
14347 sc->port.advertising[1] = 0;
14348
14349 switch (sc->link_params.num_phys) {
14350 case 1:
14351 case 2:
14352 cfg_size = 1;
14353 break;
14354 case 3:
14355 cfg_size = 2;
14356 break;
14357 }
14358
14359 for (idx = 0; idx < cfg_size; idx++) {
14360 sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14361 link_config = sc->port.link_config[idx];
14362
14363 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14364 case PORT_FEATURE_LINK_SPEED_AUTO:
14365 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14366 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14367 sc->port.advertising[idx] |= sc->port.supported[idx];
14368 if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14369 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14370 sc->port.advertising[idx] |=
14371 (ELINK_SUPPORTED_100baseT_Half |
14372 ELINK_SUPPORTED_100baseT_Full);
14373 } else {
14374 /* force 10G, no AN */
14375 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14376 sc->port.advertising[idx] |=
14377 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14378 continue;
14379 }
14380 break;
14381
14382 case PORT_FEATURE_LINK_SPEED_10M_FULL:
14383 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14384 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14385 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14386 ADVERTISED_TP);
14387 } else {
14388 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14389 "speed_cap_mask=0x%08x\n",
14390 link_config, sc->link_params.speed_cap_mask[idx]);
14391 return;
14392 }
14393 break;
14394
14395 case PORT_FEATURE_LINK_SPEED_10M_HALF:
14396 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14397 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14398 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14399 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14400 ADVERTISED_TP);
14401 ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14402 sc->link_params.req_duplex[idx]);
14403 } else {
14404 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14405 "speed_cap_mask=0x%08x\n",
14406 link_config, sc->link_params.speed_cap_mask[idx]);
14407 return;
14408 }
14409 break;
14410
14411 case PORT_FEATURE_LINK_SPEED_100M_FULL:
14412 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14413 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14414 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14415 ADVERTISED_TP);
14416 } else {
14417 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14418 "speed_cap_mask=0x%08x\n",
14419 link_config, sc->link_params.speed_cap_mask[idx]);
14420 return;
14421 }
14422 break;
14423
14424 case PORT_FEATURE_LINK_SPEED_100M_HALF:
14425 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14426 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14427 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14428 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14429 ADVERTISED_TP);
14430 } else {
14431 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14432 "speed_cap_mask=0x%08x\n",
14433 link_config, sc->link_params.speed_cap_mask[idx]);
14434 return;
14435 }
14436 break;
14437
14438 case PORT_FEATURE_LINK_SPEED_1G:
14439 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14440 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14441 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14442 ADVERTISED_TP);
14443 } else {
14444 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14445 "speed_cap_mask=0x%08x\n",
14446 link_config, sc->link_params.speed_cap_mask[idx]);
14447 return;
14448 }
14449 break;
14450
14451 case PORT_FEATURE_LINK_SPEED_2_5G:
14452 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14453 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14454 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14455 ADVERTISED_TP);
14456 } else {
14457 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14458 "speed_cap_mask=0x%08x\n",
14459 link_config, sc->link_params.speed_cap_mask[idx]);
14460 return;
14461 }
14462 break;
14463
14464 case PORT_FEATURE_LINK_SPEED_10G_CX4:
14465 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14466 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14467 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14468 ADVERTISED_FIBRE);
14469 } else {
14470 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14471 "speed_cap_mask=0x%08x\n",
14472 link_config, sc->link_params.speed_cap_mask[idx]);
14473 return;
14474 }
14475 break;
14476
14477 case PORT_FEATURE_LINK_SPEED_20G:
14478 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14479 break;
14480
14481 default:
14482 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14483 "speed_cap_mask=0x%08x\n",
14484 link_config, sc->link_params.speed_cap_mask[idx]);
14485 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14486 sc->port.advertising[idx] = sc->port.supported[idx];
14487 break;
14488 }
14489
14490 sc->link_params.req_flow_ctrl[idx] =
14491 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14492
14493 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14494 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14495 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14496 } else {
14497 bxe_set_requested_fc(sc);
14498 }
14499 }
14500
14501 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14502 "req_flow_ctrl=0x%x advertising=0x%x\n",
14503 sc->link_params.req_line_speed[idx],
14504 sc->link_params.req_duplex[idx],
14505 sc->link_params.req_flow_ctrl[idx],
14506 sc->port.advertising[idx]);
14507 ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14508 "advertising=0x%x\n",
14509 sc->link_params.req_line_speed[idx],
14510 sc->link_params.req_duplex[idx],
14511 sc->port.advertising[idx]);
14512 }
14513 }
14514
14515 static void
14516 bxe_get_phy_info(struct bxe_softc *sc)
14517 {
14518 uint8_t port = SC_PORT(sc);
14519 uint32_t config = sc->port.config;
14520 uint32_t eee_mode;
14521
14522 /* shmem data already read in bxe_get_shmem_info() */
14523
14524 ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14525 "link_config0=0x%08x\n",
14526 sc->link_params.lane_config,
14527 sc->link_params.speed_cap_mask[0],
14528 sc->port.link_config[0]);
14529
14530
14531 bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14532 bxe_link_settings_requested(sc);
14533
14534 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14535 sc->link_params.feature_config_flags |=
14536 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14537 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14538 sc->link_params.feature_config_flags &=
14539 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14540 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14541 sc->link_params.feature_config_flags |=
14542 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14543 }
14544
14545 /* configure link feature according to nvram value */
14546 eee_mode =
14547 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14548 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14549 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14550 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14551 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14552 ELINK_EEE_MODE_ENABLE_LPI |
14553 ELINK_EEE_MODE_OUTPUT_TIME);
14554 } else {
14555 sc->link_params.eee_mode = 0;
14556 }
14557
14558 /* get the media type */
14559 bxe_media_detect(sc);
14560 ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14561 }
14562
14563 static void
14564 bxe_get_params(struct bxe_softc *sc)
14565 {
14566 /* get user tunable params */
14567 bxe_get_tunable_params(sc);
14568
14569 /* select the RX and TX ring sizes */
14570 sc->tx_ring_size = TX_BD_USABLE;
14571 sc->rx_ring_size = RX_BD_USABLE;
14572
14573 /* XXX disable WoL */
14574 sc->wol = 0;
14575 }
14576
14577 static void
14578 bxe_set_modes_bitmap(struct bxe_softc *sc)
14579 {
14580 uint32_t flags = 0;
14581
14582 if (CHIP_REV_IS_FPGA(sc)) {
14583 SET_FLAGS(flags, MODE_FPGA);
14584 } else if (CHIP_REV_IS_EMUL(sc)) {
14585 SET_FLAGS(flags, MODE_EMUL);
14586 } else {
14587 SET_FLAGS(flags, MODE_ASIC);
14588 }
14589
14590 if (CHIP_IS_MODE_4_PORT(sc)) {
14591 SET_FLAGS(flags, MODE_PORT4);
14592 } else {
14593 SET_FLAGS(flags, MODE_PORT2);
14594 }
14595
14596 if (CHIP_IS_E2(sc)) {
14597 SET_FLAGS(flags, MODE_E2);
14598 } else if (CHIP_IS_E3(sc)) {
14599 SET_FLAGS(flags, MODE_E3);
14600 if (CHIP_REV(sc) == CHIP_REV_Ax) {
14601 SET_FLAGS(flags, MODE_E3_A0);
14602 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14603 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14604 }
14605 }
14606
14607 if (IS_MF(sc)) {
14608 SET_FLAGS(flags, MODE_MF);
14609 switch (sc->devinfo.mf_info.mf_mode) {
14610 case MULTI_FUNCTION_SD:
14611 SET_FLAGS(flags, MODE_MF_SD);
14612 break;
14613 case MULTI_FUNCTION_SI:
14614 SET_FLAGS(flags, MODE_MF_SI);
14615 break;
14616 case MULTI_FUNCTION_AFEX:
14617 SET_FLAGS(flags, MODE_MF_AFEX);
14618 break;
14619 }
14620 } else {
14621 SET_FLAGS(flags, MODE_SF);
14622 }
14623
14624 #if defined(__LITTLE_ENDIAN)
14625 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14626 #else /* __BIG_ENDIAN */
14627 SET_FLAGS(flags, MODE_BIG_ENDIAN);
14628 #endif
14629
14630 INIT_MODE_FLAGS(sc) = flags;
14631 }
14632
14633 static int
14634 bxe_alloc_hsi_mem(struct bxe_softc *sc)
14635 {
14636 struct bxe_fastpath *fp;
14637 bus_addr_t busaddr;
14638 int max_agg_queues;
14639 int max_segments;
14640 bus_size_t max_size;
14641 bus_size_t max_seg_size;
14642 char buf[32];
14643 int rc;
14644 int i, j;
14645
14646 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14647
14648 /* allocate the parent bus DMA tag */
14649 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14650 1, /* alignment */
14651 0, /* boundary limit */
14652 BUS_SPACE_MAXADDR, /* restricted low */
14653 BUS_SPACE_MAXADDR, /* restricted hi */
14654 NULL, /* addr filter() */
14655 NULL, /* addr filter() arg */
14656 BUS_SPACE_MAXSIZE_32BIT, /* max map size */
14657 BUS_SPACE_UNRESTRICTED, /* num discontinuous */
14658 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */
14659 0, /* flags */
14660 NULL, /* lock() */
14661 NULL, /* lock() arg */
14662 &sc->parent_dma_tag); /* returned dma tag */
14663 if (rc != 0) {
14664 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14665 return (1);
14666 }
14667
14668 /************************/
14669 /* DEFAULT STATUS BLOCK */
14670 /************************/
14671
14672 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14673 &sc->def_sb_dma, "default status block") != 0) {
14674 /* XXX */
14675 bus_dma_tag_destroy(sc->parent_dma_tag);
14676 return (1);
14677 }
14678
14679 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14680
14681 /***************/
14682 /* EVENT QUEUE */
14683 /***************/
14684
14685 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14686 &sc->eq_dma, "event queue") != 0) {
14687 /* XXX */
14688 bxe_dma_free(sc, &sc->def_sb_dma);
14689 sc->def_sb = NULL;
14690 bus_dma_tag_destroy(sc->parent_dma_tag);
14691 return (1);
14692 }
14693
14694 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14695
14696 /*************/
14697 /* SLOW PATH */
14698 /*************/
14699
14700 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14701 &sc->sp_dma, "slow path") != 0) {
14702 /* XXX */
14703 bxe_dma_free(sc, &sc->eq_dma);
14704 sc->eq = NULL;
14705 bxe_dma_free(sc, &sc->def_sb_dma);
14706 sc->def_sb = NULL;
14707 bus_dma_tag_destroy(sc->parent_dma_tag);
14708 return (1);
14709 }
14710
14711 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14712
14713 /*******************/
14714 /* SLOW PATH QUEUE */
14715 /*******************/
14716
14717 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14718 &sc->spq_dma, "slow path queue") != 0) {
14719 /* XXX */
14720 bxe_dma_free(sc, &sc->sp_dma);
14721 sc->sp = NULL;
14722 bxe_dma_free(sc, &sc->eq_dma);
14723 sc->eq = NULL;
14724 bxe_dma_free(sc, &sc->def_sb_dma);
14725 sc->def_sb = NULL;
14726 bus_dma_tag_destroy(sc->parent_dma_tag);
14727 return (1);
14728 }
14729
14730 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14731
14732 /***************************/
14733 /* FW DECOMPRESSION BUFFER */
14734 /***************************/
14735
14736 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14737 "fw decompression buffer") != 0) {
14738 /* XXX */
14739 bxe_dma_free(sc, &sc->spq_dma);
14740 sc->spq = NULL;
14741 bxe_dma_free(sc, &sc->sp_dma);
14742 sc->sp = NULL;
14743 bxe_dma_free(sc, &sc->eq_dma);
14744 sc->eq = NULL;
14745 bxe_dma_free(sc, &sc->def_sb_dma);
14746 sc->def_sb = NULL;
14747 bus_dma_tag_destroy(sc->parent_dma_tag);
14748 return (1);
14749 }
14750
14751 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14752
14753 if ((sc->gz_strm =
14754 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14755 /* XXX */
14756 bxe_dma_free(sc, &sc->gz_buf_dma);
14757 sc->gz_buf = NULL;
14758 bxe_dma_free(sc, &sc->spq_dma);
14759 sc->spq = NULL;
14760 bxe_dma_free(sc, &sc->sp_dma);
14761 sc->sp = NULL;
14762 bxe_dma_free(sc, &sc->eq_dma);
14763 sc->eq = NULL;
14764 bxe_dma_free(sc, &sc->def_sb_dma);
14765 sc->def_sb = NULL;
14766 bus_dma_tag_destroy(sc->parent_dma_tag);
14767 return (1);
14768 }
14769
14770 /*************/
14771 /* FASTPATHS */
14772 /*************/
14773
14774 /* allocate DMA memory for each fastpath structure */
14775 for (i = 0; i < sc->num_queues; i++) {
14776 fp = &sc->fp[i];
14777 fp->sc = sc;
14778 fp->index = i;
14779
14780 /*******************/
14781 /* FP STATUS BLOCK */
14782 /*******************/
14783
14784 snprintf(buf, sizeof(buf), "fp %d status block", i);
14785 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14786 &fp->sb_dma, buf) != 0) {
14787 /* XXX unwind and free previous fastpath allocations */
14788 BLOGE(sc, "Failed to alloc %s\n", buf);
14789 return (1);
14790 } else {
14791 if (CHIP_IS_E2E3(sc)) {
14792 fp->status_block.e2_sb =
14793 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14794 } else {
14795 fp->status_block.e1x_sb =
14796 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14797 }
14798 }
14799
14800 /******************/
14801 /* FP TX BD CHAIN */
14802 /******************/
14803
14804 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14805 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14806 &fp->tx_dma, buf) != 0) {
14807 /* XXX unwind and free previous fastpath allocations */
14808 BLOGE(sc, "Failed to alloc %s\n", buf);
14809 return (1);
14810 } else {
14811 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14812 }
14813
14814 /* link together the tx bd chain pages */
14815 for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14816 /* index into the tx bd chain array to last entry per page */
14817 struct eth_tx_next_bd *tx_next_bd =
14818 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14819 /* point to the next page and wrap from last page */
14820 busaddr = (fp->tx_dma.paddr +
14821 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14822 tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14823 tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14824 }
14825
14826 /******************/
14827 /* FP RX BD CHAIN */
14828 /******************/
14829
14830 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14831 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14832 &fp->rx_dma, buf) != 0) {
14833 /* XXX unwind and free previous fastpath allocations */
14834 BLOGE(sc, "Failed to alloc %s\n", buf);
14835 return (1);
14836 } else {
14837 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14838 }
14839
14840 /* link together the rx bd chain pages */
14841 for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14842 /* index into the rx bd chain array to last entry per page */
14843 struct eth_rx_bd *rx_bd =
14844 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14845 /* point to the next page and wrap from last page */
14846 busaddr = (fp->rx_dma.paddr +
14847 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14848 rx_bd->addr_hi = htole32(U64_HI(busaddr));
14849 rx_bd->addr_lo = htole32(U64_LO(busaddr));
14850 }
14851
14852 /*******************/
14853 /* FP RX RCQ CHAIN */
14854 /*******************/
14855
14856 snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14857 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14858 &fp->rcq_dma, buf) != 0) {
14859 /* XXX unwind and free previous fastpath allocations */
14860 BLOGE(sc, "Failed to alloc %s\n", buf);
14861 return (1);
14862 } else {
14863 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14864 }
14865
14866 /* link together the rcq chain pages */
14867 for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14868 /* index into the rcq chain array to last entry per page */
14869 struct eth_rx_cqe_next_page *rx_cqe_next =
14870 (struct eth_rx_cqe_next_page *)
14871 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14872 /* point to the next page and wrap from last page */
14873 busaddr = (fp->rcq_dma.paddr +
14874 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14875 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14876 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14877 }
14878
14879 /*******************/
14880 /* FP RX SGE CHAIN */
14881 /*******************/
14882
14883 snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14884 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14885 &fp->rx_sge_dma, buf) != 0) {
14886 /* XXX unwind and free previous fastpath allocations */
14887 BLOGE(sc, "Failed to alloc %s\n", buf);
14888 return (1);
14889 } else {
14890 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14891 }
14892
14893 /* link together the sge chain pages */
14894 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14895 /* index into the rcq chain array to last entry per page */
14896 struct eth_rx_sge *rx_sge =
14897 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14898 /* point to the next page and wrap from last page */
14899 busaddr = (fp->rx_sge_dma.paddr +
14900 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14901 rx_sge->addr_hi = htole32(U64_HI(busaddr));
14902 rx_sge->addr_lo = htole32(U64_LO(busaddr));
14903 }
14904
14905 /***********************/
14906 /* FP TX MBUF DMA MAPS */
14907 /***********************/
14908
14909 /* set required sizes before mapping to conserve resources */
14910 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14911 max_size = BXE_TSO_MAX_SIZE;
14912 max_segments = BXE_TSO_MAX_SEGMENTS;
14913 max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14914 } else {
14915 max_size = (MCLBYTES * BXE_MAX_SEGMENTS);
14916 max_segments = BXE_MAX_SEGMENTS;
14917 max_seg_size = MCLBYTES;
14918 }
14919
14920 /* create a dma tag for the tx mbufs */
14921 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14922 1, /* alignment */
14923 0, /* boundary limit */
14924 BUS_SPACE_MAXADDR, /* restricted low */
14925 BUS_SPACE_MAXADDR, /* restricted hi */
14926 NULL, /* addr filter() */
14927 NULL, /* addr filter() arg */
14928 max_size, /* max map size */
14929 max_segments, /* num discontinuous */
14930 max_seg_size, /* max seg size */
14931 0, /* flags */
14932 NULL, /* lock() */
14933 NULL, /* lock() arg */
14934 &fp->tx_mbuf_tag); /* returned dma tag */
14935 if (rc != 0) {
14936 /* XXX unwind and free previous fastpath allocations */
14937 BLOGE(sc, "Failed to create dma tag for "
14938 "'fp %d tx mbufs' (%d)\n", i, rc);
14939 return (1);
14940 }
14941
14942 /* create dma maps for each of the tx mbuf clusters */
14943 for (j = 0; j < TX_BD_TOTAL; j++) {
14944 if (bus_dmamap_create(fp->tx_mbuf_tag,
14945 BUS_DMA_NOWAIT,
14946 &fp->tx_mbuf_chain[j].m_map)) {
14947 /* XXX unwind and free previous fastpath allocations */
14948 BLOGE(sc, "Failed to create dma map for "
14949 "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14950 return (1);
14951 }
14952 }
14953
14954 /***********************/
14955 /* FP RX MBUF DMA MAPS */
14956 /***********************/
14957
14958 /* create a dma tag for the rx mbufs */
14959 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14960 1, /* alignment */
14961 0, /* boundary limit */
14962 BUS_SPACE_MAXADDR, /* restricted low */
14963 BUS_SPACE_MAXADDR, /* restricted hi */
14964 NULL, /* addr filter() */
14965 NULL, /* addr filter() arg */
14966 MJUM9BYTES, /* max map size */
14967 1, /* num discontinuous */
14968 MJUM9BYTES, /* max seg size */
14969 0, /* flags */
14970 NULL, /* lock() */
14971 NULL, /* lock() arg */
14972 &fp->rx_mbuf_tag); /* returned dma tag */
14973 if (rc != 0) {
14974 /* XXX unwind and free previous fastpath allocations */
14975 BLOGE(sc, "Failed to create dma tag for "
14976 "'fp %d rx mbufs' (%d)\n", i, rc);
14977 return (1);
14978 }
14979
14980 /* create dma maps for each of the rx mbuf clusters */
14981 for (j = 0; j < RX_BD_TOTAL; j++) {
14982 if (bus_dmamap_create(fp->rx_mbuf_tag,
14983 BUS_DMA_NOWAIT,
14984 &fp->rx_mbuf_chain[j].m_map)) {
14985 /* XXX unwind and free previous fastpath allocations */
14986 BLOGE(sc, "Failed to create dma map for "
14987 "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14988 return (1);
14989 }
14990 }
14991
14992 /* create dma map for the spare rx mbuf cluster */
14993 if (bus_dmamap_create(fp->rx_mbuf_tag,
14994 BUS_DMA_NOWAIT,
14995 &fp->rx_mbuf_spare_map)) {
14996 /* XXX unwind and free previous fastpath allocations */
14997 BLOGE(sc, "Failed to create dma map for "
14998 "'fp %d spare rx mbuf' (%d)\n", i, rc);
14999 return (1);
15000 }
15001
15002 /***************************/
15003 /* FP RX SGE MBUF DMA MAPS */
15004 /***************************/
15005
15006 /* create a dma tag for the rx sge mbufs */
15007 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15008 1, /* alignment */
15009 0, /* boundary limit */
15010 BUS_SPACE_MAXADDR, /* restricted low */
15011 BUS_SPACE_MAXADDR, /* restricted hi */
15012 NULL, /* addr filter() */
15013 NULL, /* addr filter() arg */
15014 BCM_PAGE_SIZE, /* max map size */
15015 1, /* num discontinuous */
15016 BCM_PAGE_SIZE, /* max seg size */
15017 0, /* flags */
15018 NULL, /* lock() */
15019 NULL, /* lock() arg */
15020 &fp->rx_sge_mbuf_tag); /* returned dma tag */
15021 if (rc != 0) {
15022 /* XXX unwind and free previous fastpath allocations */
15023 BLOGE(sc, "Failed to create dma tag for "
15024 "'fp %d rx sge mbufs' (%d)\n", i, rc);
15025 return (1);
15026 }
15027
15028 /* create dma maps for the rx sge mbuf clusters */
15029 for (j = 0; j < RX_SGE_TOTAL; j++) {
15030 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15031 BUS_DMA_NOWAIT,
15032 &fp->rx_sge_mbuf_chain[j].m_map)) {
15033 /* XXX unwind and free previous fastpath allocations */
15034 BLOGE(sc, "Failed to create dma map for "
15035 "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
15036 return (1);
15037 }
15038 }
15039
15040 /* create dma map for the spare rx sge mbuf cluster */
15041 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15042 BUS_DMA_NOWAIT,
15043 &fp->rx_sge_mbuf_spare_map)) {
15044 /* XXX unwind and free previous fastpath allocations */
15045 BLOGE(sc, "Failed to create dma map for "
15046 "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
15047 return (1);
15048 }
15049
15050 /***************************/
15051 /* FP RX TPA MBUF DMA MAPS */
15052 /***************************/
15053
15054 /* create dma maps for the rx tpa mbuf clusters */
15055 max_agg_queues = MAX_AGG_QS(sc);
15056
15057 for (j = 0; j < max_agg_queues; j++) {
15058 if (bus_dmamap_create(fp->rx_mbuf_tag,
15059 BUS_DMA_NOWAIT,
15060 &fp->rx_tpa_info[j].bd.m_map)) {
15061 /* XXX unwind and free previous fastpath allocations */
15062 BLOGE(sc, "Failed to create dma map for "
15063 "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
15064 return (1);
15065 }
15066 }
15067
15068 /* create dma map for the spare rx tpa mbuf cluster */
15069 if (bus_dmamap_create(fp->rx_mbuf_tag,
15070 BUS_DMA_NOWAIT,
15071 &fp->rx_tpa_info_mbuf_spare_map)) {
15072 /* XXX unwind and free previous fastpath allocations */
15073 BLOGE(sc, "Failed to create dma map for "
15074 "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
15075 return (1);
15076 }
15077
15078 bxe_init_sge_ring_bit_mask(fp);
15079 }
15080
15081 return (0);
15082 }
15083
15084 static void
15085 bxe_free_hsi_mem(struct bxe_softc *sc)
15086 {
15087 struct bxe_fastpath *fp;
15088 int max_agg_queues;
15089 int i, j;
15090
15091 if (sc->parent_dma_tag == NULL) {
15092 return; /* assume nothing was allocated */
15093 }
15094
15095 for (i = 0; i < sc->num_queues; i++) {
15096 fp = &sc->fp[i];
15097
15098 /*******************/
15099 /* FP STATUS BLOCK */
15100 /*******************/
15101
15102 bxe_dma_free(sc, &fp->sb_dma);
15103 memset(&fp->status_block, 0, sizeof(fp->status_block));
15104
15105 /******************/
15106 /* FP TX BD CHAIN */
15107 /******************/
15108
15109 bxe_dma_free(sc, &fp->tx_dma);
15110 fp->tx_chain = NULL;
15111
15112 /******************/
15113 /* FP RX BD CHAIN */
15114 /******************/
15115
15116 bxe_dma_free(sc, &fp->rx_dma);
15117 fp->rx_chain = NULL;
15118
15119 /*******************/
15120 /* FP RX RCQ CHAIN */
15121 /*******************/
15122
15123 bxe_dma_free(sc, &fp->rcq_dma);
15124 fp->rcq_chain = NULL;
15125
15126 /*******************/
15127 /* FP RX SGE CHAIN */
15128 /*******************/
15129
15130 bxe_dma_free(sc, &fp->rx_sge_dma);
15131 fp->rx_sge_chain = NULL;
15132
15133 /***********************/
15134 /* FP TX MBUF DMA MAPS */
15135 /***********************/
15136
15137 if (fp->tx_mbuf_tag != NULL) {
15138 for (j = 0; j < TX_BD_TOTAL; j++) {
15139 if (fp->tx_mbuf_chain[j].m_map != NULL) {
15140 bus_dmamap_unload(fp->tx_mbuf_tag,
15141 fp->tx_mbuf_chain[j].m_map);
15142 bus_dmamap_destroy(fp->tx_mbuf_tag,
15143 fp->tx_mbuf_chain[j].m_map);
15144 }
15145 }
15146
15147 bus_dma_tag_destroy(fp->tx_mbuf_tag);
15148 fp->tx_mbuf_tag = NULL;
15149 }
15150
15151 /***********************/
15152 /* FP RX MBUF DMA MAPS */
15153 /***********************/
15154
15155 if (fp->rx_mbuf_tag != NULL) {
15156 for (j = 0; j < RX_BD_TOTAL; j++) {
15157 if (fp->rx_mbuf_chain[j].m_map != NULL) {
15158 bus_dmamap_unload(fp->rx_mbuf_tag,
15159 fp->rx_mbuf_chain[j].m_map);
15160 bus_dmamap_destroy(fp->rx_mbuf_tag,
15161 fp->rx_mbuf_chain[j].m_map);
15162 }
15163 }
15164
15165 if (fp->rx_mbuf_spare_map != NULL) {
15166 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15167 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15168 }
15169
15170 /***************************/
15171 /* FP RX TPA MBUF DMA MAPS */
15172 /***************************/
15173
15174 max_agg_queues = MAX_AGG_QS(sc);
15175
15176 for (j = 0; j < max_agg_queues; j++) {
15177 if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15178 bus_dmamap_unload(fp->rx_mbuf_tag,
15179 fp->rx_tpa_info[j].bd.m_map);
15180 bus_dmamap_destroy(fp->rx_mbuf_tag,
15181 fp->rx_tpa_info[j].bd.m_map);
15182 }
15183 }
15184
15185 if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15186 bus_dmamap_unload(fp->rx_mbuf_tag,
15187 fp->rx_tpa_info_mbuf_spare_map);
15188 bus_dmamap_destroy(fp->rx_mbuf_tag,
15189 fp->rx_tpa_info_mbuf_spare_map);
15190 }
15191
15192 bus_dma_tag_destroy(fp->rx_mbuf_tag);
15193 fp->rx_mbuf_tag = NULL;
15194 }
15195
15196 /***************************/
15197 /* FP RX SGE MBUF DMA MAPS */
15198 /***************************/
15199
15200 if (fp->rx_sge_mbuf_tag != NULL) {
15201 for (j = 0; j < RX_SGE_TOTAL; j++) {
15202 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15203 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15204 fp->rx_sge_mbuf_chain[j].m_map);
15205 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15206 fp->rx_sge_mbuf_chain[j].m_map);
15207 }
15208 }
15209
15210 if (fp->rx_sge_mbuf_spare_map != NULL) {
15211 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15212 fp->rx_sge_mbuf_spare_map);
15213 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15214 fp->rx_sge_mbuf_spare_map);
15215 }
15216
15217 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15218 fp->rx_sge_mbuf_tag = NULL;
15219 }
15220 }
15221
15222 /***************************/
15223 /* FW DECOMPRESSION BUFFER */
15224 /***************************/
15225
15226 bxe_dma_free(sc, &sc->gz_buf_dma);
15227 sc->gz_buf = NULL;
15228 free(sc->gz_strm, M_DEVBUF);
15229 sc->gz_strm = NULL;
15230
15231 /*******************/
15232 /* SLOW PATH QUEUE */
15233 /*******************/
15234
15235 bxe_dma_free(sc, &sc->spq_dma);
15236 sc->spq = NULL;
15237
15238 /*************/
15239 /* SLOW PATH */
15240 /*************/
15241
15242 bxe_dma_free(sc, &sc->sp_dma);
15243 sc->sp = NULL;
15244
15245 /***************/
15246 /* EVENT QUEUE */
15247 /***************/
15248
15249 bxe_dma_free(sc, &sc->eq_dma);
15250 sc->eq = NULL;
15251
15252 /************************/
15253 /* DEFAULT STATUS BLOCK */
15254 /************************/
15255
15256 bxe_dma_free(sc, &sc->def_sb_dma);
15257 sc->def_sb = NULL;
15258
15259 bus_dma_tag_destroy(sc->parent_dma_tag);
15260 sc->parent_dma_tag = NULL;
15261 }
15262
15263 /*
15264 * Previous driver DMAE transaction may have occurred when pre-boot stage
15265 * ended and boot began. This would invalidate the addresses of the
15266 * transaction, resulting in was-error bit set in the PCI causing all
15267 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15268 * the interrupt which detected this from the pglueb and the was-done bit
15269 */
15270 static void
15271 bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15272 {
15273 uint32_t val;
15274
15275 if (!CHIP_IS_E1x(sc)) {
15276 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15277 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15278 BLOGD(sc, DBG_LOAD,
15279 "Clearing 'was-error' bit that was set in pglueb");
15280 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15281 }
15282 }
15283 }
15284
15285 static int
15286 bxe_prev_mcp_done(struct bxe_softc *sc)
15287 {
15288 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15289 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15290 if (!rc) {
15291 BLOGE(sc, "MCP response failure, aborting\n");
15292 return (-1);
15293 }
15294
15295 return (0);
15296 }
15297
15298 static struct bxe_prev_list_node *
15299 bxe_prev_path_get_entry(struct bxe_softc *sc)
15300 {
15301 struct bxe_prev_list_node *tmp;
15302
15303 LIST_FOREACH(tmp, &bxe_prev_list, node) {
15304 if ((sc->pcie_bus == tmp->bus) &&
15305 (sc->pcie_device == tmp->slot) &&
15306 (SC_PATH(sc) == tmp->path)) {
15307 return (tmp);
15308 }
15309 }
15310
15311 return (NULL);
15312 }
15313
15314 static uint8_t
15315 bxe_prev_is_path_marked(struct bxe_softc *sc)
15316 {
15317 struct bxe_prev_list_node *tmp;
15318 int rc = FALSE;
15319
15320 mtx_lock(&bxe_prev_mtx);
15321
15322 tmp = bxe_prev_path_get_entry(sc);
15323 if (tmp) {
15324 if (tmp->aer) {
15325 BLOGD(sc, DBG_LOAD,
15326 "Path %d/%d/%d was marked by AER\n",
15327 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15328 } else {
15329 rc = TRUE;
15330 BLOGD(sc, DBG_LOAD,
15331 "Path %d/%d/%d was already cleaned from previous drivers\n",
15332 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15333 }
15334 }
15335
15336 mtx_unlock(&bxe_prev_mtx);
15337
15338 return (rc);
15339 }
15340
15341 static int
15342 bxe_prev_mark_path(struct bxe_softc *sc,
15343 uint8_t after_undi)
15344 {
15345 struct bxe_prev_list_node *tmp;
15346
15347 mtx_lock(&bxe_prev_mtx);
15348
15349 /* Check whether the entry for this path already exists */
15350 tmp = bxe_prev_path_get_entry(sc);
15351 if (tmp) {
15352 if (!tmp->aer) {
15353 BLOGD(sc, DBG_LOAD,
15354 "Re-marking AER in path %d/%d/%d\n",
15355 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15356 } else {
15357 BLOGD(sc, DBG_LOAD,
15358 "Removing AER indication from path %d/%d/%d\n",
15359 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15360 tmp->aer = 0;
15361 }
15362
15363 mtx_unlock(&bxe_prev_mtx);
15364 return (0);
15365 }
15366
15367 mtx_unlock(&bxe_prev_mtx);
15368
15369 /* Create an entry for this path and add it */
15370 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15371 (M_NOWAIT | M_ZERO));
15372 if (!tmp) {
15373 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15374 return (-1);
15375 }
15376
15377 tmp->bus = sc->pcie_bus;
15378 tmp->slot = sc->pcie_device;
15379 tmp->path = SC_PATH(sc);
15380 tmp->aer = 0;
15381 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15382
15383 mtx_lock(&bxe_prev_mtx);
15384
15385 BLOGD(sc, DBG_LOAD,
15386 "Marked path %d/%d/%d - finished previous unload\n",
15387 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15388 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15389
15390 mtx_unlock(&bxe_prev_mtx);
15391
15392 return (0);
15393 }
15394
15395 static int
15396 bxe_do_flr(struct bxe_softc *sc)
15397 {
15398 int i;
15399
15400 /* only E2 and onwards support FLR */
15401 if (CHIP_IS_E1x(sc)) {
15402 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15403 return (-1);
15404 }
15405
15406 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15407 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15408 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15409 sc->devinfo.bc_ver);
15410 return (-1);
15411 }
15412
15413 /* Wait for Transaction Pending bit clean */
15414 for (i = 0; i < 4; i++) {
15415 if (i) {
15416 DELAY(((1 << (i - 1)) * 100) * 1000);
15417 }
15418
15419 if (!bxe_is_pcie_pending(sc)) {
15420 goto clear;
15421 }
15422 }
15423
15424 BLOGE(sc, "PCIE transaction is not cleared, "
15425 "proceeding with reset anyway\n");
15426
15427 clear:
15428
15429 BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15430 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15431
15432 return (0);
15433 }
15434
15435 struct bxe_mac_vals {
15436 uint32_t xmac_addr;
15437 uint32_t xmac_val;
15438 uint32_t emac_addr;
15439 uint32_t emac_val;
15440 uint32_t umac_addr;
15441 uint32_t umac_val;
15442 uint32_t bmac_addr;
15443 uint32_t bmac_val[2];
15444 };
15445
15446 static void
15447 bxe_prev_unload_close_mac(struct bxe_softc *sc,
15448 struct bxe_mac_vals *vals)
15449 {
15450 uint32_t val, base_addr, offset, mask, reset_reg;
15451 uint8_t mac_stopped = FALSE;
15452 uint8_t port = SC_PORT(sc);
15453 uint32_t wb_data[2];
15454
15455 /* reset addresses as they also mark which values were changed */
15456 vals->bmac_addr = 0;
15457 vals->umac_addr = 0;
15458 vals->xmac_addr = 0;
15459 vals->emac_addr = 0;
15460
15461 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15462
15463 if (!CHIP_IS_E3(sc)) {
15464 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15465 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15466 if ((mask & reset_reg) && val) {
15467 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15468 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15469 : NIG_REG_INGRESS_BMAC0_MEM;
15470 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15471 : BIGMAC_REGISTER_BMAC_CONTROL;
15472
15473 /*
15474 * use rd/wr since we cannot use dmae. This is safe
15475 * since MCP won't access the bus due to the request
15476 * to unload, and no function on the path can be
15477 * loaded at this time.
15478 */
15479 wb_data[0] = REG_RD(sc, base_addr + offset);
15480 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15481 vals->bmac_addr = base_addr + offset;
15482 vals->bmac_val[0] = wb_data[0];
15483 vals->bmac_val[1] = wb_data[1];
15484 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15485 REG_WR(sc, vals->bmac_addr, wb_data[0]);
15486 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15487 }
15488
15489 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15490 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15491 vals->emac_val = REG_RD(sc, vals->emac_addr);
15492 REG_WR(sc, vals->emac_addr, 0);
15493 mac_stopped = TRUE;
15494 } else {
15495 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15496 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15497 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15498 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15499 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15500 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15501 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15502 vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15503 REG_WR(sc, vals->xmac_addr, 0);
15504 mac_stopped = TRUE;
15505 }
15506
15507 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15508 if (mask & reset_reg) {
15509 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15510 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15511 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15512 vals->umac_val = REG_RD(sc, vals->umac_addr);
15513 REG_WR(sc, vals->umac_addr, 0);
15514 mac_stopped = TRUE;
15515 }
15516 }
15517
15518 if (mac_stopped) {
15519 DELAY(20000);
15520 }
15521 }
15522
15523 #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15524 #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff)
15525 #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
15526 #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15527
15528 static void
15529 bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15530 uint8_t port,
15531 uint8_t inc)
15532 {
15533 uint16_t rcq, bd;
15534 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15535
15536 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15537 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15538
15539 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15540 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15541
15542 BLOGD(sc, DBG_LOAD,
15543 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15544 port, bd, rcq);
15545 }
15546
15547 static int
15548 bxe_prev_unload_common(struct bxe_softc *sc)
15549 {
15550 uint32_t reset_reg, tmp_reg = 0, rc;
15551 uint8_t prev_undi = FALSE;
15552 struct bxe_mac_vals mac_vals;
15553 uint32_t timer_count = 1000;
15554 uint32_t prev_brb;
15555
15556 /*
15557 * It is possible a previous function received 'common' answer,
15558 * but hasn't loaded yet, therefore creating a scenario of
15559 * multiple functions receiving 'common' on the same path.
15560 */
15561 BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15562
15563 memset(&mac_vals, 0, sizeof(mac_vals));
15564
15565 if (bxe_prev_is_path_marked(sc)) {
15566 return (bxe_prev_mcp_done(sc));
15567 }
15568
15569 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15570
15571 /* Reset should be performed after BRB is emptied */
15572 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15573 /* Close the MAC Rx to prevent BRB from filling up */
15574 bxe_prev_unload_close_mac(sc, &mac_vals);
15575
15576 /* close LLH filters towards the BRB */
15577 elink_set_rx_filter(&sc->link_params, 0);
15578
15579 /*
15580 * Check if the UNDI driver was previously loaded.
15581 * UNDI driver initializes CID offset for normal bell to 0x7
15582 */
15583 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15584 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15585 if (tmp_reg == 0x7) {
15586 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15587 prev_undi = TRUE;
15588 /* clear the UNDI indication */
15589 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15590 /* clear possible idle check errors */
15591 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15592 }
15593 }
15594
15595 /* wait until BRB is empty */
15596 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15597 while (timer_count) {
15598 prev_brb = tmp_reg;
15599
15600 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15601 if (!tmp_reg) {
15602 break;
15603 }
15604
15605 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15606
15607 /* reset timer as long as BRB actually gets emptied */
15608 if (prev_brb > tmp_reg) {
15609 timer_count = 1000;
15610 } else {
15611 timer_count--;
15612 }
15613
15614 /* If UNDI resides in memory, manually increment it */
15615 if (prev_undi) {
15616 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15617 }
15618
15619 DELAY(10);
15620 }
15621
15622 if (!timer_count) {
15623 BLOGE(sc, "Failed to empty BRB\n");
15624 }
15625 }
15626
15627 /* No packets are in the pipeline, path is ready for reset */
15628 bxe_reset_common(sc);
15629
15630 if (mac_vals.xmac_addr) {
15631 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15632 }
15633 if (mac_vals.umac_addr) {
15634 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15635 }
15636 if (mac_vals.emac_addr) {
15637 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15638 }
15639 if (mac_vals.bmac_addr) {
15640 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15641 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15642 }
15643
15644 rc = bxe_prev_mark_path(sc, prev_undi);
15645 if (rc) {
15646 bxe_prev_mcp_done(sc);
15647 return (rc);
15648 }
15649
15650 return (bxe_prev_mcp_done(sc));
15651 }
15652
15653 static int
15654 bxe_prev_unload_uncommon(struct bxe_softc *sc)
15655 {
15656 int rc;
15657
15658 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15659
15660 /* Test if previous unload process was already finished for this path */
15661 if (bxe_prev_is_path_marked(sc)) {
15662 return (bxe_prev_mcp_done(sc));
15663 }
15664
15665 BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15666
15667 /*
15668 * If function has FLR capabilities, and existing FW version matches
15669 * the one required, then FLR will be sufficient to clean any residue
15670 * left by previous driver
15671 */
15672 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15673 if (!rc) {
15674 /* fw version is good */
15675 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15676 rc = bxe_do_flr(sc);
15677 }
15678
15679 if (!rc) {
15680 /* FLR was performed */
15681 BLOGD(sc, DBG_LOAD, "FLR successful\n");
15682 return (0);
15683 }
15684
15685 BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15686
15687 /* Close the MCP request, return failure*/
15688 rc = bxe_prev_mcp_done(sc);
15689 if (!rc) {
15690 rc = BXE_PREV_WAIT_NEEDED;
15691 }
15692
15693 return (rc);
15694 }
15695
15696 static int
15697 bxe_prev_unload(struct bxe_softc *sc)
15698 {
15699 int time_counter = 10;
15700 uint32_t fw, hw_lock_reg, hw_lock_val;
15701 uint32_t rc = 0;
15702
15703 /*
15704 * Clear HW from errors which may have resulted from an interrupted
15705 * DMAE transaction.
15706 */
15707 bxe_prev_interrupted_dmae(sc);
15708
15709 /* Release previously held locks */
15710 hw_lock_reg =
15711 (SC_FUNC(sc) <= 5) ?
15712 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15713 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15714
15715 hw_lock_val = (REG_RD(sc, hw_lock_reg));
15716 if (hw_lock_val) {
15717 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15718 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15719 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15720 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15721 }
15722 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15723 REG_WR(sc, hw_lock_reg, 0xffffffff);
15724 } else {
15725 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15726 }
15727
15728 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15729 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15730 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15731 }
15732
15733 do {
15734 /* Lock MCP using an unload request */
15735 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15736 if (!fw) {
15737 BLOGE(sc, "MCP response failure, aborting\n");
15738 rc = -1;
15739 break;
15740 }
15741
15742 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15743 rc = bxe_prev_unload_common(sc);
15744 break;
15745 }
15746
15747 /* non-common reply from MCP night require looping */
15748 rc = bxe_prev_unload_uncommon(sc);
15749 if (rc != BXE_PREV_WAIT_NEEDED) {
15750 break;
15751 }
15752
15753 DELAY(20000);
15754 } while (--time_counter);
15755
15756 if (!time_counter || rc) {
15757 BLOGE(sc, "Failed to unload previous driver!"
15758 " time_counter %d rc %d\n", time_counter, rc);
15759 rc = -1;
15760 }
15761
15762 return (rc);
15763 }
15764
15765 void
15766 bxe_dcbx_set_state(struct bxe_softc *sc,
15767 uint8_t dcb_on,
15768 uint32_t dcbx_enabled)
15769 {
15770 if (!CHIP_IS_E1x(sc)) {
15771 sc->dcb_state = dcb_on;
15772 sc->dcbx_enabled = dcbx_enabled;
15773 } else {
15774 sc->dcb_state = FALSE;
15775 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15776 }
15777 BLOGD(sc, DBG_LOAD,
15778 "DCB state [%s:%s]\n",
15779 dcb_on ? "ON" : "OFF",
15780 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15781 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15782 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15783 "on-chip with negotiation" : "invalid");
15784 }
15785
15786 /* must be called after sriov-enable */
15787 static int
15788 bxe_set_qm_cid_count(struct bxe_softc *sc)
15789 {
15790 int cid_count = BXE_L2_MAX_CID(sc);
15791
15792 if (IS_SRIOV(sc)) {
15793 cid_count += BXE_VF_CIDS;
15794 }
15795
15796 if (CNIC_SUPPORT(sc)) {
15797 cid_count += CNIC_CID_MAX;
15798 }
15799
15800 return (roundup(cid_count, QM_CID_ROUND));
15801 }
15802
15803 static void
15804 bxe_init_multi_cos(struct bxe_softc *sc)
15805 {
15806 int pri, cos;
15807
15808 uint32_t pri_map = 0; /* XXX change to user config */
15809
15810 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15811 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15812 if (cos < sc->max_cos) {
15813 sc->prio_to_cos[pri] = cos;
15814 } else {
15815 BLOGW(sc, "Invalid COS %d for priority %d "
15816 "(max COS is %d), setting to 0\n",
15817 cos, pri, (sc->max_cos - 1));
15818 sc->prio_to_cos[pri] = 0;
15819 }
15820 }
15821 }
15822
15823 static int
15824 bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15825 {
15826 struct bxe_softc *sc;
15827 int error, result;
15828
15829 result = 0;
15830 error = sysctl_handle_int(oidp, &result, 0, req);
15831
15832 if (error || !req->newptr) {
15833 return (error);
15834 }
15835
15836 if (result == 1) {
15837 uint32_t temp;
15838 sc = (struct bxe_softc *)arg1;
15839
15840 BLOGI(sc, "... dumping driver state ...\n");
15841 temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15842 BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15843 }
15844
15845 return (error);
15846 }
15847
15848 static int
15849 bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15850 {
15851 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15852 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15853 uint32_t *offset;
15854 uint64_t value = 0;
15855 int index = (int)arg2;
15856
15857 if (index >= BXE_NUM_ETH_STATS) {
15858 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15859 return (-1);
15860 }
15861
15862 offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15863
15864 switch (bxe_eth_stats_arr[index].size) {
15865 case 4:
15866 value = (uint64_t)*offset;
15867 break;
15868 case 8:
15869 value = HILO_U64(*offset, *(offset + 1));
15870 break;
15871 default:
15872 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15873 index, bxe_eth_stats_arr[index].size);
15874 return (-1);
15875 }
15876
15877 return (sysctl_handle_64(oidp, &value, 0, req));
15878 }
15879
15880 static int
15881 bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15882 {
15883 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15884 uint32_t *eth_stats;
15885 uint32_t *offset;
15886 uint64_t value = 0;
15887 uint32_t q_stat = (uint32_t)arg2;
15888 uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15889 uint32_t index = (q_stat & 0xffff);
15890
15891 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15892
15893 if (index >= BXE_NUM_ETH_Q_STATS) {
15894 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15895 return (-1);
15896 }
15897
15898 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15899
15900 switch (bxe_eth_q_stats_arr[index].size) {
15901 case 4:
15902 value = (uint64_t)*offset;
15903 break;
15904 case 8:
15905 value = HILO_U64(*offset, *(offset + 1));
15906 break;
15907 default:
15908 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15909 index, bxe_eth_q_stats_arr[index].size);
15910 return (-1);
15911 }
15912
15913 return (sysctl_handle_64(oidp, &value, 0, req));
15914 }
15915
15916 static void bxe_force_link_reset(struct bxe_softc *sc)
15917 {
15918
15919 bxe_acquire_phy_lock(sc);
15920 elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15921 bxe_release_phy_lock(sc);
15922 }
15923
15924 static int
15925 bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15926 {
15927 struct bxe_softc *sc = (struct bxe_softc *)arg1;
15928 uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15929 int rc = 0;
15930 int error;
15931 int result;
15932
15933
15934 error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15935
15936 if (error || !req->newptr) {
15937 return (error);
15938 }
15939 if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) {
15940 BLOGW(sc, "invalid pause param (%d) - use integers between 1 & 8\n",sc->bxe_pause_param);
15941 sc->bxe_pause_param = 8;
15942 }
15943
15944 result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15945
15946
15947 if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) {
15948 BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15949 return -EINVAL;
15950 }
15951
15952 if(IS_MF(sc))
15953 return 0;
15954 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15955 if(result & ELINK_FLOW_CTRL_RX)
15956 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15957
15958 if(result & ELINK_FLOW_CTRL_TX)
15959 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15960 if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15961 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15962
15963 if(result & 0x400) {
15964 if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15965 sc->link_params.req_flow_ctrl[cfg_idx] =
15966 ELINK_FLOW_CTRL_AUTO;
15967 }
15968 sc->link_params.req_fc_auto_adv = 0;
15969 if (result & ELINK_FLOW_CTRL_RX)
15970 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15971
15972 if (result & ELINK_FLOW_CTRL_TX)
15973 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15974 if (!sc->link_params.req_fc_auto_adv)
15975 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15976 }
15977 if (IS_PF(sc)) {
15978 if (sc->link_vars.link_up) {
15979 bxe_stats_handle(sc, STATS_EVENT_STOP);
15980 }
15981 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15982 bxe_force_link_reset(sc);
15983 bxe_acquire_phy_lock(sc);
15984
15985 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15986
15987 bxe_release_phy_lock(sc);
15988
15989 bxe_calc_fc_adv(sc);
15990 }
15991 }
15992 return rc;
15993 }
15994
15995
15996 static void
15997 bxe_add_sysctls(struct bxe_softc *sc)
15998 {
15999 struct sysctl_ctx_list *ctx;
16000 struct sysctl_oid_list *children;
16001 struct sysctl_oid *queue_top, *queue;
16002 struct sysctl_oid_list *queue_top_children, *queue_children;
16003 char queue_num_buf[32];
16004 uint32_t q_stat;
16005 int i, j;
16006
16007 ctx = device_get_sysctl_ctx(sc->dev);
16008 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
16009
16010 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
16011 CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
16012 "version");
16013
16014 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
16015 BCM_5710_FW_MAJOR_VERSION,
16016 BCM_5710_FW_MINOR_VERSION,
16017 BCM_5710_FW_REVISION_VERSION,
16018 BCM_5710_FW_ENGINEERING_VERSION);
16019
16020 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16021 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" :
16022 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" :
16023 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" :
16024 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16025 "Unknown"));
16026 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
16027 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16028 "multifunction vnics per port");
16029
16030 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16031 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16032 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16033 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16034 "???GT/s"),
16035 sc->devinfo.pcie_link_width);
16036
16037 sc->debug = bxe_debug;
16038
16039 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16040 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
16041 "bootcode version");
16042 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16043 CTLFLAG_RD, sc->fw_ver_str, 0,
16044 "firmware version");
16045 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16046 CTLFLAG_RD, sc->mf_mode_str, 0,
16047 "multifunction mode");
16048 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16049 CTLFLAG_RD, sc->mac_addr_str, 0,
16050 "mac address");
16051 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16052 CTLFLAG_RD, sc->pci_link_str, 0,
16053 "pci link status");
16054 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
16055 CTLFLAG_RW, &sc->debug,
16056 "debug logging mode");
16057
16058 sc->trigger_grcdump = 0;
16059 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
16060 CTLFLAG_RW, &sc->trigger_grcdump, 0,
16061 "trigger grcdump should be invoked"
16062 " before collecting grcdump");
16063
16064 sc->grcdump_started = 0;
16065 sc->grcdump_done = 0;
16066 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
16067 CTLFLAG_RD, &sc->grcdump_done, 0,
16068 "set by driver when grcdump is done");
16069
16070 sc->rx_budget = bxe_rx_budget;
16071 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
16072 CTLFLAG_RW, &sc->rx_budget, 0,
16073 "rx processing budget");
16074
16075 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
16076 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16077 bxe_sysctl_pauseparam, "IU",
16078 "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
16079
16080
16081 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
16082 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16083 bxe_sysctl_state, "IU", "dump driver state");
16084
16085 for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
16086 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
16087 bxe_eth_stats_arr[i].string,
16088 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
16089 bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string);
16090 }
16091
16092 /* add a new parent node for all queues "dev.bxe.#.queue" */
16093 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
16094 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "queue");
16095 queue_top_children = SYSCTL_CHILDREN(queue_top);
16096
16097 for (i = 0; i < sc->num_queues; i++) {
16098 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
16099 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
16100 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
16101 queue_num_buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "single queue");
16102 queue_children = SYSCTL_CHILDREN(queue);
16103
16104 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
16105 q_stat = ((i << 16) | j);
16106 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
16107 bxe_eth_q_stats_arr[j].string,
16108 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, q_stat,
16109 bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string);
16110 }
16111 }
16112 }
16113
16114 static int
16115 bxe_alloc_buf_rings(struct bxe_softc *sc)
16116 {
16117 int i;
16118 struct bxe_fastpath *fp;
16119
16120 for (i = 0; i < sc->num_queues; i++) {
16121
16122 fp = &sc->fp[i];
16123
16124 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
16125 M_NOWAIT, &fp->tx_mtx);
16126 if (fp->tx_br == NULL)
16127 return (-1);
16128 }
16129
16130 return (0);
16131 }
16132
16133 static void
16134 bxe_free_buf_rings(struct bxe_softc *sc)
16135 {
16136 int i;
16137 struct bxe_fastpath *fp;
16138
16139 for (i = 0; i < sc->num_queues; i++) {
16140
16141 fp = &sc->fp[i];
16142
16143 if (fp->tx_br) {
16144 buf_ring_free(fp->tx_br, M_DEVBUF);
16145 fp->tx_br = NULL;
16146 }
16147 }
16148 }
16149
16150 static void
16151 bxe_init_fp_mutexs(struct bxe_softc *sc)
16152 {
16153 int i;
16154 struct bxe_fastpath *fp;
16155
16156 for (i = 0; i < sc->num_queues; i++) {
16157
16158 fp = &sc->fp[i];
16159
16160 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
16161 "bxe%d_fp%d_tx_lock", sc->unit, i);
16162 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
16163
16164 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
16165 "bxe%d_fp%d_rx_lock", sc->unit, i);
16166 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
16167 }
16168 }
16169
16170 static void
16171 bxe_destroy_fp_mutexs(struct bxe_softc *sc)
16172 {
16173 int i;
16174 struct bxe_fastpath *fp;
16175
16176 for (i = 0; i < sc->num_queues; i++) {
16177
16178 fp = &sc->fp[i];
16179
16180 if (mtx_initialized(&fp->tx_mtx)) {
16181 mtx_destroy(&fp->tx_mtx);
16182 }
16183
16184 if (mtx_initialized(&fp->rx_mtx)) {
16185 mtx_destroy(&fp->rx_mtx);
16186 }
16187 }
16188 }
16189
16190
16191 /*
16192 * Device attach function.
16193 *
16194 * Allocates device resources, performs secondary chip identification, and
16195 * initializes driver instance variables. This function is called from driver
16196 * load after a successful probe.
16197 *
16198 * Returns:
16199 * 0 = Success, >0 = Failure
16200 */
16201 static int
16202 bxe_attach(device_t dev)
16203 {
16204 struct bxe_softc *sc;
16205
16206 sc = device_get_softc(dev);
16207
16208 BLOGD(sc, DBG_LOAD, "Starting attach...\n");
16209
16210 sc->state = BXE_STATE_CLOSED;
16211
16212 sc->dev = dev;
16213 sc->unit = device_get_unit(dev);
16214
16215 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16216
16217 sc->pcie_bus = pci_get_bus(dev);
16218 sc->pcie_device = pci_get_slot(dev);
16219 sc->pcie_func = pci_get_function(dev);
16220
16221 /* enable bus master capability */
16222 pci_enable_busmaster(dev);
16223
16224 /* get the BARs */
16225 if (bxe_allocate_bars(sc) != 0) {
16226 return (ENXIO);
16227 }
16228
16229 /* initialize the mutexes */
16230 bxe_init_mutexes(sc);
16231
16232 /* prepare the periodic callout */
16233 callout_init(&sc->periodic_callout, 1);
16234
16235 /* prepare the chip taskqueue */
16236 sc->chip_tq_flags = CHIP_TQ_NONE;
16237 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16238 "bxe%d_chip_tq", sc->unit);
16239 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16240 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16241 taskqueue_thread_enqueue,
16242 &sc->chip_tq);
16243 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16244 "%s", sc->chip_tq_name);
16245
16246 TIMEOUT_TASK_INIT(taskqueue_thread,
16247 &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task, sc);
16248
16249
16250 /* get device info and set params */
16251 if (bxe_get_device_info(sc) != 0) {
16252 BLOGE(sc, "getting device info\n");
16253 bxe_deallocate_bars(sc);
16254 pci_disable_busmaster(dev);
16255 return (ENXIO);
16256 }
16257
16258 /* get final misc params */
16259 bxe_get_params(sc);
16260
16261 /* set the default MTU (changed via ifconfig) */
16262 sc->mtu = ETHERMTU;
16263
16264 bxe_set_modes_bitmap(sc);
16265
16266 /* XXX
16267 * If in AFEX mode and the function is configured for FCoE
16268 * then bail... no L2 allowed.
16269 */
16270
16271 /* get phy settings from shmem and 'and' against admin settings */
16272 bxe_get_phy_info(sc);
16273
16274 /* initialize the FreeBSD ifnet interface */
16275 if (bxe_init_ifnet(sc) != 0) {
16276 bxe_release_mutexes(sc);
16277 bxe_deallocate_bars(sc);
16278 pci_disable_busmaster(dev);
16279 return (ENXIO);
16280 }
16281
16282 if (bxe_add_cdev(sc) != 0) {
16283 if (sc->ifp != NULL) {
16284 ether_ifdetach(sc->ifp);
16285 }
16286 ifmedia_removeall(&sc->ifmedia);
16287 bxe_release_mutexes(sc);
16288 bxe_deallocate_bars(sc);
16289 pci_disable_busmaster(dev);
16290 return (ENXIO);
16291 }
16292
16293 /* allocate device interrupts */
16294 if (bxe_interrupt_alloc(sc) != 0) {
16295 bxe_del_cdev(sc);
16296 if (sc->ifp != NULL) {
16297 ether_ifdetach(sc->ifp);
16298 }
16299 ifmedia_removeall(&sc->ifmedia);
16300 bxe_release_mutexes(sc);
16301 bxe_deallocate_bars(sc);
16302 pci_disable_busmaster(dev);
16303 return (ENXIO);
16304 }
16305
16306 bxe_init_fp_mutexs(sc);
16307
16308 if (bxe_alloc_buf_rings(sc) != 0) {
16309 bxe_free_buf_rings(sc);
16310 bxe_interrupt_free(sc);
16311 bxe_del_cdev(sc);
16312 if (sc->ifp != NULL) {
16313 ether_ifdetach(sc->ifp);
16314 }
16315 ifmedia_removeall(&sc->ifmedia);
16316 bxe_release_mutexes(sc);
16317 bxe_deallocate_bars(sc);
16318 pci_disable_busmaster(dev);
16319 return (ENXIO);
16320 }
16321
16322 /* allocate ilt */
16323 if (bxe_alloc_ilt_mem(sc) != 0) {
16324 bxe_free_buf_rings(sc);
16325 bxe_interrupt_free(sc);
16326 bxe_del_cdev(sc);
16327 if (sc->ifp != NULL) {
16328 ether_ifdetach(sc->ifp);
16329 }
16330 ifmedia_removeall(&sc->ifmedia);
16331 bxe_release_mutexes(sc);
16332 bxe_deallocate_bars(sc);
16333 pci_disable_busmaster(dev);
16334 return (ENXIO);
16335 }
16336
16337 /* allocate the host hardware/software hsi structures */
16338 if (bxe_alloc_hsi_mem(sc) != 0) {
16339 bxe_free_ilt_mem(sc);
16340 bxe_free_buf_rings(sc);
16341 bxe_interrupt_free(sc);
16342 bxe_del_cdev(sc);
16343 if (sc->ifp != NULL) {
16344 ether_ifdetach(sc->ifp);
16345 }
16346 ifmedia_removeall(&sc->ifmedia);
16347 bxe_release_mutexes(sc);
16348 bxe_deallocate_bars(sc);
16349 pci_disable_busmaster(dev);
16350 return (ENXIO);
16351 }
16352
16353 /* need to reset chip if UNDI was active */
16354 if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16355 /* init fw_seq */
16356 sc->fw_seq =
16357 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16358 DRV_MSG_SEQ_NUMBER_MASK);
16359 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16360 bxe_prev_unload(sc);
16361 }
16362
16363 #if 1
16364 /* XXX */
16365 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16366 #else
16367 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16368 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16369 SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16370 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16371 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16372 bxe_dcbx_init_params(sc);
16373 } else {
16374 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16375 }
16376 #endif
16377
16378 /* calculate qm_cid_count */
16379 sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16380 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16381
16382 sc->max_cos = 1;
16383 bxe_init_multi_cos(sc);
16384
16385 bxe_add_sysctls(sc);
16386
16387 return (0);
16388 }
16389
16390 /*
16391 * Device detach function.
16392 *
16393 * Stops the controller, resets the controller, and releases resources.
16394 *
16395 * Returns:
16396 * 0 = Success, >0 = Failure
16397 */
16398 static int
16399 bxe_detach(device_t dev)
16400 {
16401 struct bxe_softc *sc;
16402 if_t ifp;
16403
16404 sc = device_get_softc(dev);
16405
16406 BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16407
16408 ifp = sc->ifp;
16409 if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16410 BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16411 return(EBUSY);
16412 }
16413
16414 bxe_del_cdev(sc);
16415
16416 /* stop the periodic callout */
16417 bxe_periodic_stop(sc);
16418
16419 /* stop the chip taskqueue */
16420 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16421 if (sc->chip_tq) {
16422 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16423 taskqueue_free(sc->chip_tq);
16424 sc->chip_tq = NULL;
16425 taskqueue_drain_timeout(taskqueue_thread,
16426 &sc->sp_err_timeout_task);
16427 }
16428
16429 /* stop and reset the controller if it was open */
16430 if (sc->state != BXE_STATE_CLOSED) {
16431 BXE_CORE_LOCK(sc);
16432 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16433 sc->state = BXE_STATE_DISABLED;
16434 BXE_CORE_UNLOCK(sc);
16435 }
16436
16437 /* release the network interface */
16438 if (ifp != NULL) {
16439 ether_ifdetach(ifp);
16440 }
16441 ifmedia_removeall(&sc->ifmedia);
16442
16443 /* XXX do the following based on driver state... */
16444
16445 /* free the host hardware/software hsi structures */
16446 bxe_free_hsi_mem(sc);
16447
16448 /* free ilt */
16449 bxe_free_ilt_mem(sc);
16450
16451 bxe_free_buf_rings(sc);
16452
16453 /* release the interrupts */
16454 bxe_interrupt_free(sc);
16455
16456 /* Release the mutexes*/
16457 bxe_destroy_fp_mutexs(sc);
16458 bxe_release_mutexes(sc);
16459
16460
16461 /* Release the PCIe BAR mapped memory */
16462 bxe_deallocate_bars(sc);
16463
16464 /* Release the FreeBSD interface. */
16465 if (sc->ifp != NULL) {
16466 if_free(sc->ifp);
16467 }
16468
16469 pci_disable_busmaster(dev);
16470
16471 return (0);
16472 }
16473
16474 /*
16475 * Device shutdown function.
16476 *
16477 * Stops and resets the controller.
16478 *
16479 * Returns:
16480 * Nothing
16481 */
16482 static int
16483 bxe_shutdown(device_t dev)
16484 {
16485 struct bxe_softc *sc;
16486
16487 sc = device_get_softc(dev);
16488
16489 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16490
16491 /* stop the periodic callout */
16492 bxe_periodic_stop(sc);
16493
16494 if (sc->state != BXE_STATE_CLOSED) {
16495 BXE_CORE_LOCK(sc);
16496 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16497 BXE_CORE_UNLOCK(sc);
16498 }
16499
16500 return (0);
16501 }
16502
16503 void
16504 bxe_igu_ack_sb(struct bxe_softc *sc,
16505 uint8_t igu_sb_id,
16506 uint8_t segment,
16507 uint16_t index,
16508 uint8_t op,
16509 uint8_t update)
16510 {
16511 uint32_t igu_addr = sc->igu_base_addr;
16512 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16513 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16514 }
16515
16516 static void
16517 bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16518 uint8_t func,
16519 uint8_t idu_sb_id,
16520 uint8_t is_pf)
16521 {
16522 uint32_t data, ctl, cnt = 100;
16523 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16524 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16525 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16526 uint32_t sb_bit = 1 << (idu_sb_id%32);
16527 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16528 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16529
16530 /* Not supported in BC mode */
16531 if (CHIP_INT_MODE_IS_BC(sc)) {
16532 return;
16533 }
16534
16535 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16536 IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16537 IGU_REGULAR_CLEANUP_SET |
16538 IGU_REGULAR_BCLEANUP);
16539
16540 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16541 (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16542 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16543
16544 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16545 data, igu_addr_data);
16546 REG_WR(sc, igu_addr_data, data);
16547
16548 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16549 BUS_SPACE_BARRIER_WRITE);
16550 mb();
16551
16552 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16553 ctl, igu_addr_ctl);
16554 REG_WR(sc, igu_addr_ctl, ctl);
16555
16556 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16557 BUS_SPACE_BARRIER_WRITE);
16558 mb();
16559
16560 /* wait for clean up to finish */
16561 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16562 DELAY(20000);
16563 }
16564
16565 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16566 BLOGD(sc, DBG_LOAD,
16567 "Unable to finish IGU cleanup: "
16568 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16569 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16570 }
16571 }
16572
16573 static void
16574 bxe_igu_clear_sb(struct bxe_softc *sc,
16575 uint8_t idu_sb_id)
16576 {
16577 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16578 }
16579
16580
16581
16582
16583
16584
16585
16586 /*******************/
16587 /* ECORE CALLBACKS */
16588 /*******************/
16589
16590 static void
16591 bxe_reset_common(struct bxe_softc *sc)
16592 {
16593 uint32_t val = 0x1400;
16594
16595 /* reset_common */
16596 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16597
16598 if (CHIP_IS_E3(sc)) {
16599 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16600 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16601 }
16602
16603 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16604 }
16605
16606 static void
16607 bxe_common_init_phy(struct bxe_softc *sc)
16608 {
16609 uint32_t shmem_base[2];
16610 uint32_t shmem2_base[2];
16611
16612 /* Avoid common init in case MFW supports LFA */
16613 if (SHMEM2_RD(sc, size) >
16614 (uint32_t)offsetof(struct shmem2_region,
16615 lfa_host_addr[SC_PORT(sc)])) {
16616 return;
16617 }
16618
16619 shmem_base[0] = sc->devinfo.shmem_base;
16620 shmem2_base[0] = sc->devinfo.shmem2_base;
16621
16622 if (!CHIP_IS_E1x(sc)) {
16623 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr);
16624 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16625 }
16626
16627 bxe_acquire_phy_lock(sc);
16628 elink_common_init_phy(sc, shmem_base, shmem2_base,
16629 sc->devinfo.chip_id, 0);
16630 bxe_release_phy_lock(sc);
16631 }
16632
16633 static void
16634 bxe_pf_disable(struct bxe_softc *sc)
16635 {
16636 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16637
16638 val &= ~IGU_PF_CONF_FUNC_EN;
16639
16640 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16641 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16642 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16643 }
16644
16645 static void
16646 bxe_init_pxp(struct bxe_softc *sc)
16647 {
16648 uint16_t devctl;
16649 int r_order, w_order;
16650
16651 devctl = bxe_pcie_capability_read(sc, PCIER_DEVICE_CTL, 2);
16652
16653 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16654
16655 w_order = ((devctl & PCIEM_CTL_MAX_PAYLOAD) >> 5);
16656
16657 if (sc->mrrs == -1) {
16658 r_order = ((devctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12);
16659 } else {
16660 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16661 r_order = sc->mrrs;
16662 }
16663
16664 ecore_init_pxp_arb(sc, r_order, w_order);
16665 }
16666
16667 static uint32_t
16668 bxe_get_pretend_reg(struct bxe_softc *sc)
16669 {
16670 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16671 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16672 return (base + (SC_ABS_FUNC(sc)) * stride);
16673 }
16674
16675 /*
16676 * Called only on E1H or E2.
16677 * When pretending to be PF, the pretend value is the function number 0..7.
16678 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16679 * combination.
16680 */
16681 static int
16682 bxe_pretend_func(struct bxe_softc *sc,
16683 uint16_t pretend_func_val)
16684 {
16685 uint32_t pretend_reg;
16686
16687 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16688 return (-1);
16689 }
16690
16691 /* get my own pretend register */
16692 pretend_reg = bxe_get_pretend_reg(sc);
16693 REG_WR(sc, pretend_reg, pretend_func_val);
16694 REG_RD(sc, pretend_reg);
16695 return (0);
16696 }
16697
16698 static void
16699 bxe_iov_init_dmae(struct bxe_softc *sc)
16700 {
16701 return;
16702 }
16703
16704 static void
16705 bxe_iov_init_dq(struct bxe_softc *sc)
16706 {
16707 return;
16708 }
16709
16710 /* send a NIG loopback debug packet */
16711 static void
16712 bxe_lb_pckt(struct bxe_softc *sc)
16713 {
16714 uint32_t wb_write[3];
16715
16716 /* Ethernet source and destination addresses */
16717 wb_write[0] = 0x55555555;
16718 wb_write[1] = 0x55555555;
16719 wb_write[2] = 0x20; /* SOP */
16720 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16721
16722 /* NON-IP protocol */
16723 wb_write[0] = 0x09000000;
16724 wb_write[1] = 0x55555555;
16725 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
16726 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16727 }
16728
16729 /*
16730 * Some of the internal memories are not directly readable from the driver.
16731 * To test them we send debug packets.
16732 */
16733 static int
16734 bxe_int_mem_test(struct bxe_softc *sc)
16735 {
16736 int factor;
16737 int count, i;
16738 uint32_t val = 0;
16739
16740 if (CHIP_REV_IS_FPGA(sc)) {
16741 factor = 120;
16742 } else if (CHIP_REV_IS_EMUL(sc)) {
16743 factor = 200;
16744 } else {
16745 factor = 1;
16746 }
16747
16748 /* disable inputs of parser neighbor blocks */
16749 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16750 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16751 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16752 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16753
16754 /* write 0 to parser credits for CFC search request */
16755 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16756
16757 /* send Ethernet packet */
16758 bxe_lb_pckt(sc);
16759
16760 /* TODO do i reset NIG statistic? */
16761 /* Wait until NIG register shows 1 packet of size 0x10 */
16762 count = 1000 * factor;
16763 while (count) {
16764 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16765 val = *BXE_SP(sc, wb_data[0]);
16766 if (val == 0x10) {
16767 break;
16768 }
16769
16770 DELAY(10000);
16771 count--;
16772 }
16773
16774 if (val != 0x10) {
16775 BLOGE(sc, "NIG timeout val=0x%x\n", val);
16776 return (-1);
16777 }
16778
16779 /* wait until PRS register shows 1 packet */
16780 count = (1000 * factor);
16781 while (count) {
16782 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16783 if (val == 1) {
16784 break;
16785 }
16786
16787 DELAY(10000);
16788 count--;
16789 }
16790
16791 if (val != 0x1) {
16792 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16793 return (-2);
16794 }
16795
16796 /* Reset and init BRB, PRS */
16797 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16798 DELAY(50000);
16799 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16800 DELAY(50000);
16801 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16802 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16803
16804 /* Disable inputs of parser neighbor blocks */
16805 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16806 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16807 REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16808 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16809
16810 /* Write 0 to parser credits for CFC search request */
16811 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16812
16813 /* send 10 Ethernet packets */
16814 for (i = 0; i < 10; i++) {
16815 bxe_lb_pckt(sc);
16816 }
16817
16818 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16819 count = (1000 * factor);
16820 while (count) {
16821 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16822 val = *BXE_SP(sc, wb_data[0]);
16823 if (val == 0xb0) {
16824 break;
16825 }
16826
16827 DELAY(10000);
16828 count--;
16829 }
16830
16831 if (val != 0xb0) {
16832 BLOGE(sc, "NIG timeout val=0x%x\n", val);
16833 return (-3);
16834 }
16835
16836 /* Wait until PRS register shows 2 packets */
16837 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16838 if (val != 2) {
16839 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16840 }
16841
16842 /* Write 1 to parser credits for CFC search request */
16843 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16844
16845 /* Wait until PRS register shows 3 packets */
16846 DELAY(10000 * factor);
16847
16848 /* Wait until NIG register shows 1 packet of size 0x10 */
16849 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16850 if (val != 3) {
16851 BLOGE(sc, "PRS timeout val=0x%x\n", val);
16852 }
16853
16854 /* clear NIG EOP FIFO */
16855 for (i = 0; i < 11; i++) {
16856 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16857 }
16858
16859 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16860 if (val != 1) {
16861 BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16862 return (-4);
16863 }
16864
16865 /* Reset and init BRB, PRS, NIG */
16866 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16867 DELAY(50000);
16868 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16869 DELAY(50000);
16870 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16871 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16872 if (!CNIC_SUPPORT(sc)) {
16873 /* set NIC mode */
16874 REG_WR(sc, PRS_REG_NIC_MODE, 1);
16875 }
16876
16877 /* Enable inputs of parser neighbor blocks */
16878 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16879 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16880 REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16881 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16882
16883 return (0);
16884 }
16885
16886 static void
16887 bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16888 {
16889 int is_required;
16890 uint32_t val;
16891 int port;
16892
16893 is_required = 0;
16894 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16895 SHARED_HW_CFG_FAN_FAILURE_MASK);
16896
16897 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16898 is_required = 1;
16899 }
16900 /*
16901 * The fan failure mechanism is usually related to the PHY type since
16902 * the power consumption of the board is affected by the PHY. Currently,
16903 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16904 */
16905 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16906 for (port = PORT_0; port < PORT_MAX; port++) {
16907 is_required |= elink_fan_failure_det_req(sc,
16908 sc->devinfo.shmem_base,
16909 sc->devinfo.shmem2_base,
16910 port);
16911 }
16912 }
16913
16914 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16915
16916 if (is_required == 0) {
16917 return;
16918 }
16919
16920 /* Fan failure is indicated by SPIO 5 */
16921 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16922
16923 /* set to active low mode */
16924 val = REG_RD(sc, MISC_REG_SPIO_INT);
16925 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16926 REG_WR(sc, MISC_REG_SPIO_INT, val);
16927
16928 /* enable interrupt to signal the IGU */
16929 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16930 val |= MISC_SPIO_SPIO5;
16931 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16932 }
16933
16934 static void
16935 bxe_enable_blocks_attention(struct bxe_softc *sc)
16936 {
16937 uint32_t val;
16938
16939 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16940 if (!CHIP_IS_E1x(sc)) {
16941 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16942 } else {
16943 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16944 }
16945 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16946 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16947 /*
16948 * mask read length error interrupts in brb for parser
16949 * (parsing unit and 'checksum and crc' unit)
16950 * these errors are legal (PU reads fixed length and CAC can cause
16951 * read length error on truncated packets)
16952 */
16953 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16954 REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16955 REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16956 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16957 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16958 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16959 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16960 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16961 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16962 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16963 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16964 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16965 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16966 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16967 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16968 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16969 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16970 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16971 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16972
16973 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16974 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16975 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16976 if (!CHIP_IS_E1x(sc)) {
16977 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16978 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16979 }
16980 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16981
16982 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16983 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16984 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16985 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16986
16987 if (!CHIP_IS_E1x(sc)) {
16988 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16989 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16990 }
16991
16992 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16993 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16994 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16995 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
16996 }
16997
16998 /**
16999 * bxe_init_hw_common - initialize the HW at the COMMON phase.
17000 *
17001 * @sc: driver handle
17002 */
17003 static int
17004 bxe_init_hw_common(struct bxe_softc *sc)
17005 {
17006 uint8_t abs_func_id;
17007 uint32_t val;
17008
17009 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
17010 SC_ABS_FUNC(sc));
17011
17012 /*
17013 * take the RESET lock to protect undi_unload flow from accessing
17014 * registers while we are resetting the chip
17015 */
17016 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17017
17018 bxe_reset_common(sc);
17019
17020 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
17021
17022 val = 0xfffc;
17023 if (CHIP_IS_E3(sc)) {
17024 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
17025 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
17026 }
17027
17028 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
17029
17030 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17031
17032 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
17033 BLOGD(sc, DBG_LOAD, "after misc block init\n");
17034
17035 if (!CHIP_IS_E1x(sc)) {
17036 /*
17037 * 4-port mode or 2-port mode we need to turn off master-enable for
17038 * everyone. After that we turn it back on for self. So, we disregard
17039 * multi-function, and always disable all functions on the given path,
17040 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17041 */
17042 for (abs_func_id = SC_PATH(sc);
17043 abs_func_id < (E2_FUNC_MAX * 2);
17044 abs_func_id += 2) {
17045 if (abs_func_id == SC_ABS_FUNC(sc)) {
17046 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17047 continue;
17048 }
17049
17050 bxe_pretend_func(sc, abs_func_id);
17051
17052 /* clear pf enable */
17053 bxe_pf_disable(sc);
17054
17055 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17056 }
17057 }
17058
17059 BLOGD(sc, DBG_LOAD, "after pf disable\n");
17060
17061 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
17062
17063 if (CHIP_IS_E1(sc)) {
17064 /*
17065 * enable HW interrupt from PXP on USDM overflow
17066 * bit 16 on INT_MASK_0
17067 */
17068 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17069 }
17070
17071 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
17072 bxe_init_pxp(sc);
17073
17074 #ifdef __BIG_ENDIAN
17075 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
17076 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
17077 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
17078 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
17079 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
17080 /* make sure this value is 0 */
17081 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
17082
17083 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
17084 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
17085 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
17086 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
17087 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
17088 #endif
17089
17090 ecore_ilt_init_page_size(sc, INITOP_SET);
17091
17092 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
17093 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
17094 }
17095
17096 /* let the HW do it's magic... */
17097 DELAY(100000);
17098
17099 /* finish PXP init */
17100 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
17101 if (val != 1) {
17102 BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
17103 val);
17104 return (-1);
17105 }
17106 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
17107 if (val != 1) {
17108 BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
17109 return (-1);
17110 }
17111
17112 BLOGD(sc, DBG_LOAD, "after pxp init\n");
17113
17114 /*
17115 * Timer bug workaround for E2 only. We need to set the entire ILT to have
17116 * entries with value "" and valid bit on. This needs to be done by the
17117 * first PF that is loaded in a path (i.e. common phase)
17118 */
17119 if (!CHIP_IS_E1x(sc)) {
17120 /*
17121 * In E2 there is a bug in the timers block that can cause function 6 / 7
17122 * (i.e. vnic3) to start even if it is marked as "scan-off".
17123 * This occurs when a different function (func2,3) is being marked
17124 * as "scan-off". Real-life scenario for example: if a driver is being
17125 * load-unloaded while func6,7 are down. This will cause the timer to access
17126 * the ilt, translate to a logical address and send a request to read/write.
17127 * Since the ilt for the function that is down is not valid, this will cause
17128 * a translation error which is unrecoverable.
17129 * The Workaround is intended to make sure that when this happens nothing
17130 * fatal will occur. The workaround:
17131 * 1. First PF driver which loads on a path will:
17132 * a. After taking the chip out of reset, by using pretend,
17133 * it will write "" to the following registers of
17134 * the other vnics.
17135 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
17136 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
17137 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
17138 * And for itself it will write '1' to
17139 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
17140 * dmae-operations (writing to pram for example.)
17141 * note: can be done for only function 6,7 but cleaner this
17142 * way.
17143 * b. Write zero+valid to the entire ILT.
17144 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
17145 * VNIC3 (of that port). The range allocated will be the
17146 * entire ILT. This is needed to prevent ILT range error.
17147 * 2. Any PF driver load flow:
17148 * a. ILT update with the physical addresses of the allocated
17149 * logical pages.
17150 * b. Wait 20msec. - note that this timeout is needed to make
17151 * sure there are no requests in one of the PXP internal
17152 * queues with "old" ILT addresses.
17153 * c. PF enable in the PGLC.
17154 * d. Clear the was_error of the PF in the PGLC. (could have
17155 * occurred while driver was down)
17156 * e. PF enable in the CFC (WEAK + STRONG)
17157 * f. Timers scan enable
17158 * 3. PF driver unload flow:
17159 * a. Clear the Timers scan_en.
17160 * b. Polling for scan_on=0 for that PF.
17161 * c. Clear the PF enable bit in the PXP.
17162 * d. Clear the PF enable in the CFC (WEAK + STRONG)
17163 * e. Write zero+valid to all ILT entries (The valid bit must
17164 * stay set)
17165 * f. If this is VNIC 3 of a port then also init
17166 * first_timers_ilt_entry to zero and last_timers_ilt_entry
17167 * to the last entry in the ILT.
17168 *
17169 * Notes:
17170 * Currently the PF error in the PGLC is non recoverable.
17171 * In the future the there will be a recovery routine for this error.
17172 * Currently attention is masked.
17173 * Having an MCP lock on the load/unload process does not guarantee that
17174 * there is no Timer disable during Func6/7 enable. This is because the
17175 * Timers scan is currently being cleared by the MCP on FLR.
17176 * Step 2.d can be done only for PF6/7 and the driver can also check if
17177 * there is error before clearing it. But the flow above is simpler and
17178 * more general.
17179 * All ILT entries are written by zero+valid and not just PF6/7
17180 * ILT entries since in the future the ILT entries allocation for
17181 * PF-s might be dynamic.
17182 */
17183 struct ilt_client_info ilt_cli;
17184 struct ecore_ilt ilt;
17185
17186 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
17187 memset(&ilt, 0, sizeof(struct ecore_ilt));
17188
17189 /* initialize dummy TM client */
17190 ilt_cli.start = 0;
17191 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
17192 ilt_cli.client_num = ILT_CLIENT_TM;
17193
17194 /*
17195 * Step 1: set zeroes to all ilt page entries with valid bit on
17196 * Step 2: set the timers first/last ilt entry to point
17197 * to the entire range to prevent ILT range error for 3rd/4th
17198 * vnic (this code assumes existence of the vnic)
17199 *
17200 * both steps performed by call to ecore_ilt_client_init_op()
17201 * with dummy TM client
17202 *
17203 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
17204 * and his brother are split registers
17205 */
17206
17207 bxe_pretend_func(sc, (SC_PATH(sc) + 6));
17208 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
17209 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17210
17211 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
17212 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
17213 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
17214 }
17215
17216 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
17217 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
17218
17219 if (!CHIP_IS_E1x(sc)) {
17220 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17221 (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17222
17223 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17224 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17225
17226 /* let the HW do it's magic... */
17227 do {
17228 DELAY(200000);
17229 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17230 } while (factor-- && (val != 1));
17231
17232 if (val != 1) {
17233 BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
17234 return (-1);
17235 }
17236 }
17237
17238 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17239
17240 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17241
17242 bxe_iov_init_dmae(sc);
17243
17244 /* clean the DMAE memory */
17245 sc->dmae_ready = 1;
17246 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17247
17248 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17249
17250 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17251
17252 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17253
17254 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17255
17256 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17257 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17258 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17259 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17260
17261 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17262
17263 /* QM queues pointers table */
17264 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17265
17266 /* soft reset pulse */
17267 REG_WR(sc, QM_REG_SOFT_RESET, 1);
17268 REG_WR(sc, QM_REG_SOFT_RESET, 0);
17269
17270 if (CNIC_SUPPORT(sc))
17271 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17272
17273 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17274 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17275 if (!CHIP_REV_IS_SLOW(sc)) {
17276 /* enable hw interrupt from doorbell Q */
17277 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17278 }
17279
17280 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17281
17282 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17283 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17284
17285 if (!CHIP_IS_E1(sc)) {
17286 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17287 }
17288
17289 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17290 if (IS_MF_AFEX(sc)) {
17291 /*
17292 * configure that AFEX and VLAN headers must be
17293 * received in AFEX mode
17294 */
17295 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17296 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17297 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17298 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17299 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17300 } else {
17301 /*
17302 * Bit-map indicating which L2 hdrs may appear
17303 * after the basic Ethernet header
17304 */
17305 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17306 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17307 }
17308 }
17309
17310 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17311 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17312 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17313 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17314
17315 if (!CHIP_IS_E1x(sc)) {
17316 /* reset VFC memories */
17317 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17318 VFC_MEMORIES_RST_REG_CAM_RST |
17319 VFC_MEMORIES_RST_REG_RAM_RST);
17320 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17321 VFC_MEMORIES_RST_REG_CAM_RST |
17322 VFC_MEMORIES_RST_REG_RAM_RST);
17323
17324 DELAY(20000);
17325 }
17326
17327 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17328 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17329 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17330 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17331
17332 /* sync semi rtc */
17333 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17334 0x80000000);
17335 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17336 0x80000000);
17337
17338 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17339 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17340 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17341
17342 if (!CHIP_IS_E1x(sc)) {
17343 if (IS_MF_AFEX(sc)) {
17344 /*
17345 * configure that AFEX and VLAN headers must be
17346 * sent in AFEX mode
17347 */
17348 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17349 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17350 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17351 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17352 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17353 } else {
17354 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17355 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17356 }
17357 }
17358
17359 REG_WR(sc, SRC_REG_SOFT_RST, 1);
17360
17361 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17362
17363 if (CNIC_SUPPORT(sc)) {
17364 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17365 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17366 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17367 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17368 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17369 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17370 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17371 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17372 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17373 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17374 }
17375 REG_WR(sc, SRC_REG_SOFT_RST, 0);
17376
17377 if (sizeof(union cdu_context) != 1024) {
17378 /* we currently assume that a context is 1024 bytes */
17379 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17380 (long)sizeof(union cdu_context));
17381 }
17382
17383 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17384 val = (4 << 24) + (0 << 12) + 1024;
17385 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17386
17387 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17388
17389 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17390 /* enable context validation interrupt from CFC */
17391 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17392
17393 /* set the thresholds to prevent CFC/CDU race */
17394 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17395 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17396
17397 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17398 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17399 }
17400
17401 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17402 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17403
17404 /* Reset PCIE errors for debug */
17405 REG_WR(sc, 0x2814, 0xffffffff);
17406 REG_WR(sc, 0x3820, 0xffffffff);
17407
17408 if (!CHIP_IS_E1x(sc)) {
17409 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17410 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17411 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17412 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17413 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17414 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17415 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17416 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17417 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17418 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17419 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17420 }
17421
17422 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17423
17424 if (!CHIP_IS_E1(sc)) {
17425 /* in E3 this done in per-port section */
17426 if (!CHIP_IS_E3(sc))
17427 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17428 }
17429
17430 if (CHIP_IS_E1H(sc)) {
17431 /* not applicable for E2 (and above ...) */
17432 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17433 }
17434
17435 if (CHIP_REV_IS_SLOW(sc)) {
17436 DELAY(200000);
17437 }
17438
17439 /* finish CFC init */
17440 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17441 if (val != 1) {
17442 BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17443 return (-1);
17444 }
17445 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17446 if (val != 1) {
17447 BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17448 return (-1);
17449 }
17450 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17451 if (val != 1) {
17452 BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17453 return (-1);
17454 }
17455 REG_WR(sc, CFC_REG_DEBUG0, 0);
17456
17457 if (CHIP_IS_E1(sc)) {
17458 /* read NIG statistic to see if this is our first up since powerup */
17459 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17460 val = *BXE_SP(sc, wb_data[0]);
17461
17462 /* do internal memory self test */
17463 if ((val == 0) && bxe_int_mem_test(sc)) {
17464 BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17465 return (-1);
17466 }
17467 }
17468
17469 bxe_setup_fan_failure_detection(sc);
17470
17471 /* clear PXP2 attentions */
17472 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17473
17474 bxe_enable_blocks_attention(sc);
17475
17476 if (!CHIP_REV_IS_SLOW(sc)) {
17477 ecore_enable_blocks_parity(sc);
17478 }
17479
17480 if (!BXE_NOMCP(sc)) {
17481 if (CHIP_IS_E1x(sc)) {
17482 bxe_common_init_phy(sc);
17483 }
17484 }
17485
17486 return (0);
17487 }
17488
17489 /**
17490 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17491 *
17492 * @sc: driver handle
17493 */
17494 static int
17495 bxe_init_hw_common_chip(struct bxe_softc *sc)
17496 {
17497 int rc = bxe_init_hw_common(sc);
17498
17499 if (rc) {
17500 BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17501 return (rc);
17502 }
17503
17504 /* In E2 2-PORT mode, same ext phy is used for the two paths */
17505 if (!BXE_NOMCP(sc)) {
17506 bxe_common_init_phy(sc);
17507 }
17508
17509 return (0);
17510 }
17511
17512 static int
17513 bxe_init_hw_port(struct bxe_softc *sc)
17514 {
17515 int port = SC_PORT(sc);
17516 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17517 uint32_t low, high;
17518 uint32_t val;
17519
17520 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17521
17522 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17523
17524 ecore_init_block(sc, BLOCK_MISC, init_phase);
17525 ecore_init_block(sc, BLOCK_PXP, init_phase);
17526 ecore_init_block(sc, BLOCK_PXP2, init_phase);
17527
17528 /*
17529 * Timers bug workaround: disables the pf_master bit in pglue at
17530 * common phase, we need to enable it here before any dmae access are
17531 * attempted. Therefore we manually added the enable-master to the
17532 * port phase (it also happens in the function phase)
17533 */
17534 if (!CHIP_IS_E1x(sc)) {
17535 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17536 }
17537
17538 ecore_init_block(sc, BLOCK_ATC, init_phase);
17539 ecore_init_block(sc, BLOCK_DMAE, init_phase);
17540 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17541 ecore_init_block(sc, BLOCK_QM, init_phase);
17542
17543 ecore_init_block(sc, BLOCK_TCM, init_phase);
17544 ecore_init_block(sc, BLOCK_UCM, init_phase);
17545 ecore_init_block(sc, BLOCK_CCM, init_phase);
17546 ecore_init_block(sc, BLOCK_XCM, init_phase);
17547
17548 /* QM cid (connection) count */
17549 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17550
17551 if (CNIC_SUPPORT(sc)) {
17552 ecore_init_block(sc, BLOCK_TM, init_phase);
17553 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17554 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17555 }
17556
17557 ecore_init_block(sc, BLOCK_DORQ, init_phase);
17558
17559 ecore_init_block(sc, BLOCK_BRB1, init_phase);
17560
17561 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17562 if (IS_MF(sc)) {
17563 low = (BXE_ONE_PORT(sc) ? 160 : 246);
17564 } else if (sc->mtu > 4096) {
17565 if (BXE_ONE_PORT(sc)) {
17566 low = 160;
17567 } else {
17568 val = sc->mtu;
17569 /* (24*1024 + val*4)/256 */
17570 low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17571 }
17572 } else {
17573 low = (BXE_ONE_PORT(sc) ? 80 : 160);
17574 }
17575 high = (low + 56); /* 14*1024/256 */
17576 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17577 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17578 }
17579
17580 if (CHIP_IS_MODE_4_PORT(sc)) {
17581 REG_WR(sc, SC_PORT(sc) ?
17582 BRB1_REG_MAC_GUARANTIED_1 :
17583 BRB1_REG_MAC_GUARANTIED_0, 40);
17584 }
17585
17586 ecore_init_block(sc, BLOCK_PRS, init_phase);
17587 if (CHIP_IS_E3B0(sc)) {
17588 if (IS_MF_AFEX(sc)) {
17589 /* configure headers for AFEX mode */
17590 REG_WR(sc, SC_PORT(sc) ?
17591 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17592 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17593 REG_WR(sc, SC_PORT(sc) ?
17594 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17595 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17596 REG_WR(sc, SC_PORT(sc) ?
17597 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17598 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17599 } else {
17600 /* Ovlan exists only if we are in multi-function +
17601 * switch-dependent mode, in switch-independent there
17602 * is no ovlan headers
17603 */
17604 REG_WR(sc, SC_PORT(sc) ?
17605 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17606 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17607 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17608 }
17609 }
17610
17611 ecore_init_block(sc, BLOCK_TSDM, init_phase);
17612 ecore_init_block(sc, BLOCK_CSDM, init_phase);
17613 ecore_init_block(sc, BLOCK_USDM, init_phase);
17614 ecore_init_block(sc, BLOCK_XSDM, init_phase);
17615
17616 ecore_init_block(sc, BLOCK_TSEM, init_phase);
17617 ecore_init_block(sc, BLOCK_USEM, init_phase);
17618 ecore_init_block(sc, BLOCK_CSEM, init_phase);
17619 ecore_init_block(sc, BLOCK_XSEM, init_phase);
17620
17621 ecore_init_block(sc, BLOCK_UPB, init_phase);
17622 ecore_init_block(sc, BLOCK_XPB, init_phase);
17623
17624 ecore_init_block(sc, BLOCK_PBF, init_phase);
17625
17626 if (CHIP_IS_E1x(sc)) {
17627 /* configure PBF to work without PAUSE mtu 9000 */
17628 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17629
17630 /* update threshold */
17631 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17632 /* update init credit */
17633 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17634
17635 /* probe changes */
17636 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17637 DELAY(50);
17638 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17639 }
17640
17641 if (CNIC_SUPPORT(sc)) {
17642 ecore_init_block(sc, BLOCK_SRC, init_phase);
17643 }
17644
17645 ecore_init_block(sc, BLOCK_CDU, init_phase);
17646 ecore_init_block(sc, BLOCK_CFC, init_phase);
17647
17648 if (CHIP_IS_E1(sc)) {
17649 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17650 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17651 }
17652 ecore_init_block(sc, BLOCK_HC, init_phase);
17653
17654 ecore_init_block(sc, BLOCK_IGU, init_phase);
17655
17656 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17657 /* init aeu_mask_attn_func_0/1:
17658 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17659 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17660 * bits 4-7 are used for "per vn group attention" */
17661 val = IS_MF(sc) ? 0xF7 : 0x7;
17662 /* Enable DCBX attention for all but E1 */
17663 val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17664 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17665
17666 ecore_init_block(sc, BLOCK_NIG, init_phase);
17667
17668 if (!CHIP_IS_E1x(sc)) {
17669 /* Bit-map indicating which L2 hdrs may appear after the
17670 * basic Ethernet header
17671 */
17672 if (IS_MF_AFEX(sc)) {
17673 REG_WR(sc, SC_PORT(sc) ?
17674 NIG_REG_P1_HDRS_AFTER_BASIC :
17675 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17676 } else {
17677 REG_WR(sc, SC_PORT(sc) ?
17678 NIG_REG_P1_HDRS_AFTER_BASIC :
17679 NIG_REG_P0_HDRS_AFTER_BASIC,
17680 IS_MF_SD(sc) ? 7 : 6);
17681 }
17682
17683 if (CHIP_IS_E3(sc)) {
17684 REG_WR(sc, SC_PORT(sc) ?
17685 NIG_REG_LLH1_MF_MODE :
17686 NIG_REG_LLH_MF_MODE, IS_MF(sc));
17687 }
17688 }
17689 if (!CHIP_IS_E3(sc)) {
17690 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17691 }
17692
17693 if (!CHIP_IS_E1(sc)) {
17694 /* 0x2 disable mf_ov, 0x1 enable */
17695 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17696 (IS_MF_SD(sc) ? 0x1 : 0x2));
17697
17698 if (!CHIP_IS_E1x(sc)) {
17699 val = 0;
17700 switch (sc->devinfo.mf_info.mf_mode) {
17701 case MULTI_FUNCTION_SD:
17702 val = 1;
17703 break;
17704 case MULTI_FUNCTION_SI:
17705 case MULTI_FUNCTION_AFEX:
17706 val = 2;
17707 break;
17708 }
17709
17710 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17711 NIG_REG_LLH0_CLS_TYPE), val);
17712 }
17713 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17714 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17715 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17716 }
17717
17718 /* If SPIO5 is set to generate interrupts, enable it for this port */
17719 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17720 if (val & MISC_SPIO_SPIO5) {
17721 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17722 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17723 val = REG_RD(sc, reg_addr);
17724 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17725 REG_WR(sc, reg_addr, val);
17726 }
17727
17728 return (0);
17729 }
17730
17731 static uint32_t
17732 bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17733 uint32_t reg,
17734 uint32_t expected,
17735 uint32_t poll_count)
17736 {
17737 uint32_t cur_cnt = poll_count;
17738 uint32_t val;
17739
17740 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17741 DELAY(FLR_WAIT_INTERVAL);
17742 }
17743
17744 return (val);
17745 }
17746
17747 static int
17748 bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17749 uint32_t reg,
17750 char *msg,
17751 uint32_t poll_cnt)
17752 {
17753 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17754
17755 if (val != 0) {
17756 BLOGE(sc, "%s usage count=%d\n", msg, val);
17757 return (1);
17758 }
17759
17760 return (0);
17761 }
17762
17763 /* Common routines with VF FLR cleanup */
17764 static uint32_t
17765 bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17766 {
17767 /* adjust polling timeout */
17768 if (CHIP_REV_IS_EMUL(sc)) {
17769 return (FLR_POLL_CNT * 2000);
17770 }
17771
17772 if (CHIP_REV_IS_FPGA(sc)) {
17773 return (FLR_POLL_CNT * 120);
17774 }
17775
17776 return (FLR_POLL_CNT);
17777 }
17778
17779 static int
17780 bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17781 uint32_t poll_cnt)
17782 {
17783 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17784 if (bxe_flr_clnup_poll_hw_counter(sc,
17785 CFC_REG_NUM_LCIDS_INSIDE_PF,
17786 "CFC PF usage counter timed out",
17787 poll_cnt)) {
17788 return (1);
17789 }
17790
17791 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17792 if (bxe_flr_clnup_poll_hw_counter(sc,
17793 DORQ_REG_PF_USAGE_CNT,
17794 "DQ PF usage counter timed out",
17795 poll_cnt)) {
17796 return (1);
17797 }
17798
17799 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17800 if (bxe_flr_clnup_poll_hw_counter(sc,
17801 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17802 "QM PF usage counter timed out",
17803 poll_cnt)) {
17804 return (1);
17805 }
17806
17807 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17808 if (bxe_flr_clnup_poll_hw_counter(sc,
17809 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17810 "Timers VNIC usage counter timed out",
17811 poll_cnt)) {
17812 return (1);
17813 }
17814
17815 if (bxe_flr_clnup_poll_hw_counter(sc,
17816 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17817 "Timers NUM_SCANS usage counter timed out",
17818 poll_cnt)) {
17819 return (1);
17820 }
17821
17822 /* Wait DMAE PF usage counter to zero */
17823 if (bxe_flr_clnup_poll_hw_counter(sc,
17824 dmae_reg_go_c[INIT_DMAE_C(sc)],
17825 "DMAE dommand register timed out",
17826 poll_cnt)) {
17827 return (1);
17828 }
17829
17830 return (0);
17831 }
17832
17833 #define OP_GEN_PARAM(param) \
17834 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17835 #define OP_GEN_TYPE(type) \
17836 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17837 #define OP_GEN_AGG_VECT(index) \
17838 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17839
17840 static int
17841 bxe_send_final_clnup(struct bxe_softc *sc,
17842 uint8_t clnup_func,
17843 uint32_t poll_cnt)
17844 {
17845 uint32_t op_gen_command = 0;
17846 uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17847 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17848 int ret = 0;
17849
17850 if (REG_RD(sc, comp_addr)) {
17851 BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17852 return (1);
17853 }
17854
17855 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17856 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17857 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17858 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17859
17860 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17861 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17862
17863 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17864 BLOGE(sc, "FW final cleanup did not succeed\n");
17865 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17866 (REG_RD(sc, comp_addr)));
17867 bxe_panic(sc, ("FLR cleanup failed\n"));
17868 return (1);
17869 }
17870
17871 /* Zero completion for nxt FLR */
17872 REG_WR(sc, comp_addr, 0);
17873
17874 return (ret);
17875 }
17876
17877 static void
17878 bxe_pbf_pN_buf_flushed(struct bxe_softc *sc,
17879 struct pbf_pN_buf_regs *regs,
17880 uint32_t poll_count)
17881 {
17882 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17883 uint32_t cur_cnt = poll_count;
17884
17885 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17886 crd = crd_start = REG_RD(sc, regs->crd);
17887 init_crd = REG_RD(sc, regs->init_crd);
17888
17889 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17890 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd);
17891 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17892
17893 while ((crd != init_crd) &&
17894 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17895 (init_crd - crd_start))) {
17896 if (cur_cnt--) {
17897 DELAY(FLR_WAIT_INTERVAL);
17898 crd = REG_RD(sc, regs->crd);
17899 crd_freed = REG_RD(sc, regs->crd_freed);
17900 } else {
17901 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17902 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd);
17903 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17904 break;
17905 }
17906 }
17907
17908 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17909 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17910 }
17911
17912 static void
17913 bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc,
17914 struct pbf_pN_cmd_regs *regs,
17915 uint32_t poll_count)
17916 {
17917 uint32_t occup, to_free, freed, freed_start;
17918 uint32_t cur_cnt = poll_count;
17919
17920 occup = to_free = REG_RD(sc, regs->lines_occup);
17921 freed = freed_start = REG_RD(sc, regs->lines_freed);
17922
17923 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
17924 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17925
17926 while (occup &&
17927 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17928 if (cur_cnt--) {
17929 DELAY(FLR_WAIT_INTERVAL);
17930 occup = REG_RD(sc, regs->lines_occup);
17931 freed = REG_RD(sc, regs->lines_freed);
17932 } else {
17933 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17934 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
17935 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17936 break;
17937 }
17938 }
17939
17940 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17941 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17942 }
17943
17944 static void
17945 bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17946 {
17947 struct pbf_pN_cmd_regs cmd_regs[] = {
17948 {0, (CHIP_IS_E3B0(sc)) ?
17949 PBF_REG_TQ_OCCUPANCY_Q0 :
17950 PBF_REG_P0_TQ_OCCUPANCY,
17951 (CHIP_IS_E3B0(sc)) ?
17952 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17953 PBF_REG_P0_TQ_LINES_FREED_CNT},
17954 {1, (CHIP_IS_E3B0(sc)) ?
17955 PBF_REG_TQ_OCCUPANCY_Q1 :
17956 PBF_REG_P1_TQ_OCCUPANCY,
17957 (CHIP_IS_E3B0(sc)) ?
17958 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17959 PBF_REG_P1_TQ_LINES_FREED_CNT},
17960 {4, (CHIP_IS_E3B0(sc)) ?
17961 PBF_REG_TQ_OCCUPANCY_LB_Q :
17962 PBF_REG_P4_TQ_OCCUPANCY,
17963 (CHIP_IS_E3B0(sc)) ?
17964 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17965 PBF_REG_P4_TQ_LINES_FREED_CNT}
17966 };
17967
17968 struct pbf_pN_buf_regs buf_regs[] = {
17969 {0, (CHIP_IS_E3B0(sc)) ?
17970 PBF_REG_INIT_CRD_Q0 :
17971 PBF_REG_P0_INIT_CRD ,
17972 (CHIP_IS_E3B0(sc)) ?
17973 PBF_REG_CREDIT_Q0 :
17974 PBF_REG_P0_CREDIT,
17975 (CHIP_IS_E3B0(sc)) ?
17976 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17977 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17978 {1, (CHIP_IS_E3B0(sc)) ?
17979 PBF_REG_INIT_CRD_Q1 :
17980 PBF_REG_P1_INIT_CRD,
17981 (CHIP_IS_E3B0(sc)) ?
17982 PBF_REG_CREDIT_Q1 :
17983 PBF_REG_P1_CREDIT,
17984 (CHIP_IS_E3B0(sc)) ?
17985 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17986 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17987 {4, (CHIP_IS_E3B0(sc)) ?
17988 PBF_REG_INIT_CRD_LB_Q :
17989 PBF_REG_P4_INIT_CRD,
17990 (CHIP_IS_E3B0(sc)) ?
17991 PBF_REG_CREDIT_LB_Q :
17992 PBF_REG_P4_CREDIT,
17993 (CHIP_IS_E3B0(sc)) ?
17994 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17995 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17996 };
17997
17998 int i;
17999
18000 /* Verify the command queues are flushed P0, P1, P4 */
18001 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
18002 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
18003 }
18004
18005 /* Verify the transmission buffers are flushed P0, P1, P4 */
18006 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
18007 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
18008 }
18009 }
18010
18011 static void
18012 bxe_hw_enable_status(struct bxe_softc *sc)
18013 {
18014 uint32_t val;
18015
18016 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
18017 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
18018
18019 val = REG_RD(sc, PBF_REG_DISABLE_PF);
18020 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
18021
18022 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
18023 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
18024
18025 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
18026 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
18027
18028 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
18029 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
18030
18031 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
18032 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
18033
18034 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
18035 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
18036
18037 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
18038 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
18039 }
18040
18041 static int
18042 bxe_pf_flr_clnup(struct bxe_softc *sc)
18043 {
18044 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
18045
18046 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
18047
18048 /* Re-enable PF target read access */
18049 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
18050
18051 /* Poll HW usage counters */
18052 BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
18053 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
18054 return (-1);
18055 }
18056
18057 /* Zero the igu 'trailing edge' and 'leading edge' */
18058
18059 /* Send the FW cleanup command */
18060 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
18061 return (-1);
18062 }
18063
18064 /* ATC cleanup */
18065
18066 /* Verify TX hw is flushed */
18067 bxe_tx_hw_flushed(sc, poll_cnt);
18068
18069 /* Wait 100ms (not adjusted according to platform) */
18070 DELAY(100000);
18071
18072 /* Verify no pending pci transactions */
18073 if (bxe_is_pcie_pending(sc)) {
18074 BLOGE(sc, "PCIE Transactions still pending\n");
18075 }
18076
18077 /* Debug */
18078 bxe_hw_enable_status(sc);
18079
18080 /*
18081 * Master enable - Due to WB DMAE writes performed before this
18082 * register is re-initialized as part of the regular function init
18083 */
18084 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18085
18086 return (0);
18087 }
18088
18089 static int
18090 bxe_init_hw_func(struct bxe_softc *sc)
18091 {
18092 int port = SC_PORT(sc);
18093 int func = SC_FUNC(sc);
18094 int init_phase = PHASE_PF0 + func;
18095 struct ecore_ilt *ilt = sc->ilt;
18096 uint16_t cdu_ilt_start;
18097 uint32_t addr, val;
18098 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
18099 int i, main_mem_width, rc;
18100
18101 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
18102
18103 /* FLR cleanup */
18104 if (!CHIP_IS_E1x(sc)) {
18105 rc = bxe_pf_flr_clnup(sc);
18106 if (rc) {
18107 BLOGE(sc, "FLR cleanup failed!\n");
18108 // XXX bxe_fw_dump(sc);
18109 // XXX bxe_idle_chk(sc);
18110 return (rc);
18111 }
18112 }
18113
18114 /* set MSI reconfigure capability */
18115 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18116 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
18117 val = REG_RD(sc, addr);
18118 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
18119 REG_WR(sc, addr, val);
18120 }
18121
18122 ecore_init_block(sc, BLOCK_PXP, init_phase);
18123 ecore_init_block(sc, BLOCK_PXP2, init_phase);
18124
18125 ilt = sc->ilt;
18126 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18127
18128 for (i = 0; i < L2_ILT_LINES(sc); i++) {
18129 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18130 ilt->lines[cdu_ilt_start + i].page_mapping =
18131 sc->context[i].vcxt_dma.paddr;
18132 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18133 }
18134 ecore_ilt_init_op(sc, INITOP_SET);
18135
18136 /* Set NIC mode */
18137 REG_WR(sc, PRS_REG_NIC_MODE, 1);
18138 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
18139
18140 if (!CHIP_IS_E1x(sc)) {
18141 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
18142
18143 /* Turn on a single ISR mode in IGU if driver is going to use
18144 * INT#x or MSI
18145 */
18146 if (sc->interrupt_mode != INTR_MODE_MSIX) {
18147 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
18148 }
18149
18150 /*
18151 * Timers workaround bug: function init part.
18152 * Need to wait 20msec after initializing ILT,
18153 * needed to make sure there are no requests in
18154 * one of the PXP internal queues with "old" ILT addresses
18155 */
18156 DELAY(20000);
18157
18158 /*
18159 * Master enable - Due to WB DMAE writes performed before this
18160 * register is re-initialized as part of the regular function
18161 * init
18162 */
18163 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18164 /* Enable the function in IGU */
18165 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
18166 }
18167
18168 sc->dmae_ready = 1;
18169
18170 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
18171
18172 if (!CHIP_IS_E1x(sc))
18173 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
18174
18175 ecore_init_block(sc, BLOCK_ATC, init_phase);
18176 ecore_init_block(sc, BLOCK_DMAE, init_phase);
18177 ecore_init_block(sc, BLOCK_NIG, init_phase);
18178 ecore_init_block(sc, BLOCK_SRC, init_phase);
18179 ecore_init_block(sc, BLOCK_MISC, init_phase);
18180 ecore_init_block(sc, BLOCK_TCM, init_phase);
18181 ecore_init_block(sc, BLOCK_UCM, init_phase);
18182 ecore_init_block(sc, BLOCK_CCM, init_phase);
18183 ecore_init_block(sc, BLOCK_XCM, init_phase);
18184 ecore_init_block(sc, BLOCK_TSEM, init_phase);
18185 ecore_init_block(sc, BLOCK_USEM, init_phase);
18186 ecore_init_block(sc, BLOCK_CSEM, init_phase);
18187 ecore_init_block(sc, BLOCK_XSEM, init_phase);
18188
18189 if (!CHIP_IS_E1x(sc))
18190 REG_WR(sc, QM_REG_PF_EN, 1);
18191
18192 if (!CHIP_IS_E1x(sc)) {
18193 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18194 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18195 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18196 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18197 }
18198 ecore_init_block(sc, BLOCK_QM, init_phase);
18199
18200 ecore_init_block(sc, BLOCK_TM, init_phase);
18201 ecore_init_block(sc, BLOCK_DORQ, init_phase);
18202
18203 bxe_iov_init_dq(sc);
18204
18205 ecore_init_block(sc, BLOCK_BRB1, init_phase);
18206 ecore_init_block(sc, BLOCK_PRS, init_phase);
18207 ecore_init_block(sc, BLOCK_TSDM, init_phase);
18208 ecore_init_block(sc, BLOCK_CSDM, init_phase);
18209 ecore_init_block(sc, BLOCK_USDM, init_phase);
18210 ecore_init_block(sc, BLOCK_XSDM, init_phase);
18211 ecore_init_block(sc, BLOCK_UPB, init_phase);
18212 ecore_init_block(sc, BLOCK_XPB, init_phase);
18213 ecore_init_block(sc, BLOCK_PBF, init_phase);
18214 if (!CHIP_IS_E1x(sc))
18215 REG_WR(sc, PBF_REG_DISABLE_PF, 0);
18216
18217 ecore_init_block(sc, BLOCK_CDU, init_phase);
18218
18219 ecore_init_block(sc, BLOCK_CFC, init_phase);
18220
18221 if (!CHIP_IS_E1x(sc))
18222 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18223
18224 if (IS_MF(sc)) {
18225 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18226 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18227 }
18228
18229 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18230
18231 /* HC init per function */
18232 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18233 if (CHIP_IS_E1H(sc)) {
18234 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18235
18236 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18237 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18238 }
18239 ecore_init_block(sc, BLOCK_HC, init_phase);
18240
18241 } else {
18242 int num_segs, sb_idx, prod_offset;
18243
18244 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18245
18246 if (!CHIP_IS_E1x(sc)) {
18247 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18248 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18249 }
18250
18251 ecore_init_block(sc, BLOCK_IGU, init_phase);
18252
18253 if (!CHIP_IS_E1x(sc)) {
18254 int dsb_idx = 0;
18255 /**
18256 * Producer memory:
18257 * E2 mode: address 0-135 match to the mapping memory;
18258 * 136 - PF0 default prod; 137 - PF1 default prod;
18259 * 138 - PF2 default prod; 139 - PF3 default prod;
18260 * 140 - PF0 attn prod; 141 - PF1 attn prod;
18261 * 142 - PF2 attn prod; 143 - PF3 attn prod;
18262 * 144-147 reserved.
18263 *
18264 * E1.5 mode - In backward compatible mode;
18265 * for non default SB; each even line in the memory
18266 * holds the U producer and each odd line hold
18267 * the C producer. The first 128 producers are for
18268 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18269 * producers are for the DSB for each PF.
18270 * Each PF has five segments: (the order inside each
18271 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18272 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18273 * 144-147 attn prods;
18274 */
18275 /* non-default-status-blocks */
18276 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18277 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18278 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18279 prod_offset = (sc->igu_base_sb + sb_idx) *
18280 num_segs;
18281
18282 for (i = 0; i < num_segs; i++) {
18283 addr = IGU_REG_PROD_CONS_MEMORY +
18284 (prod_offset + i) * 4;
18285 REG_WR(sc, addr, 0);
18286 }
18287 /* send consumer update with value 0 */
18288 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18289 USTORM_ID, 0, IGU_INT_NOP, 1);
18290 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18291 }
18292
18293 /* default-status-blocks */
18294 num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18295 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18296
18297 if (CHIP_IS_MODE_4_PORT(sc))
18298 dsb_idx = SC_FUNC(sc);
18299 else
18300 dsb_idx = SC_VN(sc);
18301
18302 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18303 IGU_BC_BASE_DSB_PROD + dsb_idx :
18304 IGU_NORM_BASE_DSB_PROD + dsb_idx);
18305
18306 /*
18307 * igu prods come in chunks of E1HVN_MAX (4) -
18308 * does not matters what is the current chip mode
18309 */
18310 for (i = 0; i < (num_segs * E1HVN_MAX);
18311 i += E1HVN_MAX) {
18312 addr = IGU_REG_PROD_CONS_MEMORY +
18313 (prod_offset + i)*4;
18314 REG_WR(sc, addr, 0);
18315 }
18316 /* send consumer update with 0 */
18317 if (CHIP_INT_MODE_IS_BC(sc)) {
18318 bxe_ack_sb(sc, sc->igu_dsb_id,
18319 USTORM_ID, 0, IGU_INT_NOP, 1);
18320 bxe_ack_sb(sc, sc->igu_dsb_id,
18321 CSTORM_ID, 0, IGU_INT_NOP, 1);
18322 bxe_ack_sb(sc, sc->igu_dsb_id,
18323 XSTORM_ID, 0, IGU_INT_NOP, 1);
18324 bxe_ack_sb(sc, sc->igu_dsb_id,
18325 TSTORM_ID, 0, IGU_INT_NOP, 1);
18326 bxe_ack_sb(sc, sc->igu_dsb_id,
18327 ATTENTION_ID, 0, IGU_INT_NOP, 1);
18328 } else {
18329 bxe_ack_sb(sc, sc->igu_dsb_id,
18330 USTORM_ID, 0, IGU_INT_NOP, 1);
18331 bxe_ack_sb(sc, sc->igu_dsb_id,
18332 ATTENTION_ID, 0, IGU_INT_NOP, 1);
18333 }
18334 bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18335
18336 /* !!! these should become driver const once
18337 rf-tool supports split-68 const */
18338 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18339 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18340 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18341 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18342 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18343 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18344 }
18345 }
18346
18347 /* Reset PCIE errors for debug */
18348 REG_WR(sc, 0x2114, 0xffffffff);
18349 REG_WR(sc, 0x2120, 0xffffffff);
18350
18351 if (CHIP_IS_E1x(sc)) {
18352 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18353 main_mem_base = HC_REG_MAIN_MEMORY +
18354 SC_PORT(sc) * (main_mem_size * 4);
18355 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18356 main_mem_width = 8;
18357
18358 val = REG_RD(sc, main_mem_prty_clr);
18359 if (val) {
18360 BLOGD(sc, DBG_LOAD,
18361 "Parity errors in HC block during function init (0x%x)!\n",
18362 val);
18363 }
18364
18365 /* Clear "false" parity errors in MSI-X table */
18366 for (i = main_mem_base;
18367 i < main_mem_base + main_mem_size * 4;
18368 i += main_mem_width) {
18369 bxe_read_dmae(sc, i, main_mem_width / 4);
18370 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18371 i, main_mem_width / 4);
18372 }
18373 /* Clear HC parity attention */
18374 REG_RD(sc, main_mem_prty_clr);
18375 }
18376
18377 #if 1
18378 /* Enable STORMs SP logging */
18379 REG_WR8(sc, BAR_USTRORM_INTMEM +
18380 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18381 REG_WR8(sc, BAR_TSTRORM_INTMEM +
18382 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18383 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18384 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18385 REG_WR8(sc, BAR_XSTRORM_INTMEM +
18386 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18387 #endif
18388
18389 elink_phy_probe(&sc->link_params);
18390
18391 return (0);
18392 }
18393
18394 static void
18395 bxe_link_reset(struct bxe_softc *sc)
18396 {
18397 if (!BXE_NOMCP(sc)) {
18398 bxe_acquire_phy_lock(sc);
18399 elink_lfa_reset(&sc->link_params, &sc->link_vars);
18400 bxe_release_phy_lock(sc);
18401 } else {
18402 if (!CHIP_REV_IS_SLOW(sc)) {
18403 BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18404 }
18405 }
18406 }
18407
18408 static void
18409 bxe_reset_port(struct bxe_softc *sc)
18410 {
18411 int port = SC_PORT(sc);
18412 uint32_t val;
18413
18414 ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18415 /* reset physical Link */
18416 bxe_link_reset(sc);
18417
18418 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18419
18420 /* Do not rcv packets to BRB */
18421 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18422 /* Do not direct rcv packets that are not for MCP to the BRB */
18423 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18424 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18425
18426 /* Configure AEU */
18427 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18428
18429 DELAY(100000);
18430
18431 /* Check for BRB port occupancy */
18432 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18433 if (val) {
18434 BLOGD(sc, DBG_LOAD,
18435 "BRB1 is not empty, %d blocks are occupied\n", val);
18436 }
18437
18438 /* TODO: Close Doorbell port? */
18439 }
18440
18441 static void
18442 bxe_ilt_wr(struct bxe_softc *sc,
18443 uint32_t index,
18444 bus_addr_t addr)
18445 {
18446 int reg;
18447 uint32_t wb_write[2];
18448
18449 if (CHIP_IS_E1(sc)) {
18450 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18451 } else {
18452 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18453 }
18454
18455 wb_write[0] = ONCHIP_ADDR1(addr);
18456 wb_write[1] = ONCHIP_ADDR2(addr);
18457 REG_WR_DMAE(sc, reg, wb_write, 2);
18458 }
18459
18460 static void
18461 bxe_clear_func_ilt(struct bxe_softc *sc,
18462 uint32_t func)
18463 {
18464 uint32_t i, base = FUNC_ILT_BASE(func);
18465 for (i = base; i < base + ILT_PER_FUNC; i++) {
18466 bxe_ilt_wr(sc, i, 0);
18467 }
18468 }
18469
18470 static void
18471 bxe_reset_func(struct bxe_softc *sc)
18472 {
18473 struct bxe_fastpath *fp;
18474 int port = SC_PORT(sc);
18475 int func = SC_FUNC(sc);
18476 int i;
18477
18478 /* Disable the function in the FW */
18479 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18480 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18481 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18482 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18483
18484 /* FP SBs */
18485 FOR_EACH_ETH_QUEUE(sc, i) {
18486 fp = &sc->fp[i];
18487 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18488 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18489 SB_DISABLED);
18490 }
18491
18492 /* SP SB */
18493 REG_WR8(sc, BAR_CSTRORM_INTMEM +
18494 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18495 SB_DISABLED);
18496
18497 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18498 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18499 }
18500
18501 /* Configure IGU */
18502 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18503 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18504 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18505 } else {
18506 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18507 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18508 }
18509
18510 if (CNIC_LOADED(sc)) {
18511 /* Disable Timer scan */
18512 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18513 /*
18514 * Wait for at least 10ms and up to 2 second for the timers
18515 * scan to complete
18516 */
18517 for (i = 0; i < 200; i++) {
18518 DELAY(10000);
18519 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18520 break;
18521 }
18522 }
18523
18524 /* Clear ILT */
18525 bxe_clear_func_ilt(sc, func);
18526
18527 /*
18528 * Timers workaround bug for E2: if this is vnic-3,
18529 * we need to set the entire ilt range for this timers.
18530 */
18531 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18532 struct ilt_client_info ilt_cli;
18533 /* use dummy TM client */
18534 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18535 ilt_cli.start = 0;
18536 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18537 ilt_cli.client_num = ILT_CLIENT_TM;
18538
18539 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18540 }
18541
18542 /* this assumes that reset_port() called before reset_func()*/
18543 if (!CHIP_IS_E1x(sc)) {
18544 bxe_pf_disable(sc);
18545 }
18546
18547 sc->dmae_ready = 0;
18548 }
18549
18550 static int
18551 bxe_gunzip_init(struct bxe_softc *sc)
18552 {
18553 return (0);
18554 }
18555
18556 static void
18557 bxe_gunzip_end(struct bxe_softc *sc)
18558 {
18559 return;
18560 }
18561
18562 static int
18563 bxe_init_firmware(struct bxe_softc *sc)
18564 {
18565 if (CHIP_IS_E1(sc)) {
18566 ecore_init_e1_firmware(sc);
18567 sc->iro_array = e1_iro_arr;
18568 } else if (CHIP_IS_E1H(sc)) {
18569 ecore_init_e1h_firmware(sc);
18570 sc->iro_array = e1h_iro_arr;
18571 } else if (!CHIP_IS_E1x(sc)) {
18572 ecore_init_e2_firmware(sc);
18573 sc->iro_array = e2_iro_arr;
18574 } else {
18575 BLOGE(sc, "Unsupported chip revision\n");
18576 return (-1);
18577 }
18578
18579 return (0);
18580 }
18581
18582 static void
18583 bxe_release_firmware(struct bxe_softc *sc)
18584 {
18585 /* Do nothing */
18586 return;
18587 }
18588
18589 static int
18590 ecore_gunzip(struct bxe_softc *sc,
18591 const uint8_t *zbuf,
18592 int len)
18593 {
18594 /* XXX : Implement... */
18595 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18596 return (FALSE);
18597 }
18598
18599 static void
18600 ecore_reg_wr_ind(struct bxe_softc *sc,
18601 uint32_t addr,
18602 uint32_t val)
18603 {
18604 bxe_reg_wr_ind(sc, addr, val);
18605 }
18606
18607 static void
18608 ecore_write_dmae_phys_len(struct bxe_softc *sc,
18609 bus_addr_t phys_addr,
18610 uint32_t addr,
18611 uint32_t len)
18612 {
18613 bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18614 }
18615
18616 void
18617 ecore_storm_memset_struct(struct bxe_softc *sc,
18618 uint32_t addr,
18619 size_t size,
18620 uint32_t *data)
18621 {
18622 uint8_t i;
18623 for (i = 0; i < size/4; i++) {
18624 REG_WR(sc, addr + (i * 4), data[i]);
18625 }
18626 }
18627
18628
18629 /*
18630 * character device - ioctl interface definitions
18631 */
18632
18633
18634 #include "bxe_dump.h"
18635 #include "bxe_ioctl.h"
18636 #include <sys/conf.h>
18637
18638 static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18639 struct thread *td);
18640
18641 static struct cdevsw bxe_cdevsw = {
18642 .d_version = D_VERSION,
18643 .d_ioctl = bxe_eioctl,
18644 .d_name = "bxecnic",
18645 };
18646
18647 #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18648
18649
18650 #define DUMP_ALL_PRESETS 0x1FFF
18651 #define DUMP_MAX_PRESETS 13
18652 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18653 #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18654 #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18655 #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18656 #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18657
18658 #define IS_REG_IN_PRESET(presets, idx) \
18659 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18660
18661
18662 static int
18663 bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18664 {
18665 if (CHIP_IS_E1(sc))
18666 return dump_num_registers[0][preset-1];
18667 else if (CHIP_IS_E1H(sc))
18668 return dump_num_registers[1][preset-1];
18669 else if (CHIP_IS_E2(sc))
18670 return dump_num_registers[2][preset-1];
18671 else if (CHIP_IS_E3A0(sc))
18672 return dump_num_registers[3][preset-1];
18673 else if (CHIP_IS_E3B0(sc))
18674 return dump_num_registers[4][preset-1];
18675 else
18676 return 0;
18677 }
18678
18679 static int
18680 bxe_get_total_regs_len32(struct bxe_softc *sc)
18681 {
18682 uint32_t preset_idx;
18683 int regdump_len32 = 0;
18684
18685
18686 /* Calculate the total preset regs length */
18687 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18688 regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18689 }
18690
18691 return regdump_len32;
18692 }
18693
18694 static const uint32_t *
18695 __bxe_get_page_addr_ar(struct bxe_softc *sc)
18696 {
18697 if (CHIP_IS_E2(sc))
18698 return page_vals_e2;
18699 else if (CHIP_IS_E3(sc))
18700 return page_vals_e3;
18701 else
18702 return NULL;
18703 }
18704
18705 static uint32_t
18706 __bxe_get_page_reg_num(struct bxe_softc *sc)
18707 {
18708 if (CHIP_IS_E2(sc))
18709 return PAGE_MODE_VALUES_E2;
18710 else if (CHIP_IS_E3(sc))
18711 return PAGE_MODE_VALUES_E3;
18712 else
18713 return 0;
18714 }
18715
18716 static const uint32_t *
18717 __bxe_get_page_write_ar(struct bxe_softc *sc)
18718 {
18719 if (CHIP_IS_E2(sc))
18720 return page_write_regs_e2;
18721 else if (CHIP_IS_E3(sc))
18722 return page_write_regs_e3;
18723 else
18724 return NULL;
18725 }
18726
18727 static uint32_t
18728 __bxe_get_page_write_num(struct bxe_softc *sc)
18729 {
18730 if (CHIP_IS_E2(sc))
18731 return PAGE_WRITE_REGS_E2;
18732 else if (CHIP_IS_E3(sc))
18733 return PAGE_WRITE_REGS_E3;
18734 else
18735 return 0;
18736 }
18737
18738 static const struct reg_addr *
18739 __bxe_get_page_read_ar(struct bxe_softc *sc)
18740 {
18741 if (CHIP_IS_E2(sc))
18742 return page_read_regs_e2;
18743 else if (CHIP_IS_E3(sc))
18744 return page_read_regs_e3;
18745 else
18746 return NULL;
18747 }
18748
18749 static uint32_t
18750 __bxe_get_page_read_num(struct bxe_softc *sc)
18751 {
18752 if (CHIP_IS_E2(sc))
18753 return PAGE_READ_REGS_E2;
18754 else if (CHIP_IS_E3(sc))
18755 return PAGE_READ_REGS_E3;
18756 else
18757 return 0;
18758 }
18759
18760 static bool
18761 bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18762 {
18763 if (CHIP_IS_E1(sc))
18764 return IS_E1_REG(reg_info->chips);
18765 else if (CHIP_IS_E1H(sc))
18766 return IS_E1H_REG(reg_info->chips);
18767 else if (CHIP_IS_E2(sc))
18768 return IS_E2_REG(reg_info->chips);
18769 else if (CHIP_IS_E3A0(sc))
18770 return IS_E3A0_REG(reg_info->chips);
18771 else if (CHIP_IS_E3B0(sc))
18772 return IS_E3B0_REG(reg_info->chips);
18773 else
18774 return 0;
18775 }
18776
18777 static bool
18778 bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18779 {
18780 if (CHIP_IS_E1(sc))
18781 return IS_E1_REG(wreg_info->chips);
18782 else if (CHIP_IS_E1H(sc))
18783 return IS_E1H_REG(wreg_info->chips);
18784 else if (CHIP_IS_E2(sc))
18785 return IS_E2_REG(wreg_info->chips);
18786 else if (CHIP_IS_E3A0(sc))
18787 return IS_E3A0_REG(wreg_info->chips);
18788 else if (CHIP_IS_E3B0(sc))
18789 return IS_E3B0_REG(wreg_info->chips);
18790 else
18791 return 0;
18792 }
18793
18794 /**
18795 * bxe_read_pages_regs - read "paged" registers
18796 *
18797 * @bp device handle
18798 * @p output buffer
18799 *
18800 * Reads "paged" memories: memories that may only be read by first writing to a
18801 * specific address ("write address") and then reading from a specific address
18802 * ("read address"). There may be more than one write address per "page" and
18803 * more than one read address per write address.
18804 */
18805 static void
18806 bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18807 {
18808 uint32_t i, j, k, n;
18809
18810 /* addresses of the paged registers */
18811 const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18812 /* number of paged registers */
18813 int num_pages = __bxe_get_page_reg_num(sc);
18814 /* write addresses */
18815 const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18816 /* number of write addresses */
18817 int write_num = __bxe_get_page_write_num(sc);
18818 /* read addresses info */
18819 const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18820 /* number of read addresses */
18821 int read_num = __bxe_get_page_read_num(sc);
18822 uint32_t addr, size;
18823
18824 for (i = 0; i < num_pages; i++) {
18825 for (j = 0; j < write_num; j++) {
18826 REG_WR(sc, write_addr[j], page_addr[i]);
18827
18828 for (k = 0; k < read_num; k++) {
18829 if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18830 size = read_addr[k].size;
18831 for (n = 0; n < size; n++) {
18832 addr = read_addr[k].addr + n*4;
18833 *p++ = REG_RD(sc, addr);
18834 }
18835 }
18836 }
18837 }
18838 }
18839 return;
18840 }
18841
18842
18843 static int
18844 bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18845 {
18846 uint32_t i, j, addr;
18847 const struct wreg_addr *wreg_addr_p = NULL;
18848
18849 if (CHIP_IS_E1(sc))
18850 wreg_addr_p = &wreg_addr_e1;
18851 else if (CHIP_IS_E1H(sc))
18852 wreg_addr_p = &wreg_addr_e1h;
18853 else if (CHIP_IS_E2(sc))
18854 wreg_addr_p = &wreg_addr_e2;
18855 else if (CHIP_IS_E3A0(sc))
18856 wreg_addr_p = &wreg_addr_e3;
18857 else if (CHIP_IS_E3B0(sc))
18858 wreg_addr_p = &wreg_addr_e3b0;
18859 else
18860 return (-1);
18861
18862 /* Read the idle_chk registers */
18863 for (i = 0; i < IDLE_REGS_COUNT; i++) {
18864 if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18865 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18866 for (j = 0; j < idle_reg_addrs[i].size; j++)
18867 *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18868 }
18869 }
18870
18871 /* Read the regular registers */
18872 for (i = 0; i < REGS_COUNT; i++) {
18873 if (bxe_is_reg_in_chip(sc, ®_addrs[i]) &&
18874 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18875 for (j = 0; j < reg_addrs[i].size; j++)
18876 *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18877 }
18878 }
18879
18880 /* Read the CAM registers */
18881 if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18882 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18883 for (i = 0; i < wreg_addr_p->size; i++) {
18884 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18885
18886 /* In case of wreg_addr register, read additional
18887 registers from read_regs array
18888 */
18889 for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18890 addr = *(wreg_addr_p->read_regs);
18891 *p++ = REG_RD(sc, addr + j*4);
18892 }
18893 }
18894 }
18895
18896 /* Paged registers are supported in E2 & E3 only */
18897 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18898 /* Read "paged" registers */
18899 bxe_read_pages_regs(sc, p, preset);
18900 }
18901
18902 return 0;
18903 }
18904
18905 int
18906 bxe_grc_dump(struct bxe_softc *sc)
18907 {
18908 int rval = 0;
18909 uint32_t preset_idx;
18910 uint8_t *buf;
18911 uint32_t size;
18912 struct dump_header *d_hdr;
18913 uint32_t i;
18914 uint32_t reg_val;
18915 uint32_t reg_addr;
18916 uint32_t cmd_offset;
18917 struct ecore_ilt *ilt = SC_ILT(sc);
18918 struct bxe_fastpath *fp;
18919 struct ilt_client_info *ilt_cli;
18920 int grc_dump_size;
18921
18922
18923 if (sc->grcdump_done || sc->grcdump_started)
18924 return (rval);
18925
18926 sc->grcdump_started = 1;
18927 BLOGI(sc, "Started collecting grcdump\n");
18928
18929 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18930 sizeof(struct dump_header);
18931
18932 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18933
18934 if (sc->grc_dump == NULL) {
18935 BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18936 return(ENOMEM);
18937 }
18938
18939
18940
18941 /* Disable parity attentions as long as following dump may
18942 * cause false alarms by reading never written registers. We
18943 * will re-enable parity attentions right after the dump.
18944 */
18945
18946 /* Disable parity on path 0 */
18947 bxe_pretend_func(sc, 0);
18948
18949 ecore_disable_blocks_parity(sc);
18950
18951 /* Disable parity on path 1 */
18952 bxe_pretend_func(sc, 1);
18953 ecore_disable_blocks_parity(sc);
18954
18955 /* Return to current function */
18956 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18957
18958 buf = sc->grc_dump;
18959 d_hdr = sc->grc_dump;
18960
18961 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1;
18962 d_hdr->version = BNX2X_DUMP_VERSION;
18963 d_hdr->preset = DUMP_ALL_PRESETS;
18964
18965 if (CHIP_IS_E1(sc)) {
18966 d_hdr->dump_meta_data = DUMP_CHIP_E1;
18967 } else if (CHIP_IS_E1H(sc)) {
18968 d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18969 } else if (CHIP_IS_E2(sc)) {
18970 d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18971 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18972 } else if (CHIP_IS_E3A0(sc)) {
18973 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18974 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18975 } else if (CHIP_IS_E3B0(sc)) {
18976 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18977 (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18978 }
18979
18980 buf += sizeof(struct dump_header);
18981
18982 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18983
18984 /* Skip presets with IOR */
18985 if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18986 (preset_idx == 11))
18987 continue;
18988
18989 rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18990
18991 if (rval)
18992 break;
18993
18994 size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18995
18996 buf += size;
18997 }
18998
18999 bxe_pretend_func(sc, 0);
19000 ecore_clear_blocks_parity(sc);
19001 ecore_enable_blocks_parity(sc);
19002
19003 bxe_pretend_func(sc, 1);
19004 ecore_clear_blocks_parity(sc);
19005 ecore_enable_blocks_parity(sc);
19006
19007 /* Return to current function */
19008 bxe_pretend_func(sc, SC_ABS_FUNC(sc));
19009
19010
19011
19012 if(sc->state == BXE_STATE_OPEN) {
19013 if(sc->fw_stats_req != NULL) {
19014 BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
19015 (uintmax_t)sc->fw_stats_req_mapping,
19016 (uintmax_t)sc->fw_stats_data_mapping,
19017 sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
19018 }
19019 if(sc->def_sb != NULL) {
19020 BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
19021 (void *)sc->def_sb_dma.paddr, sc->def_sb,
19022 sizeof(struct host_sp_status_block));
19023 }
19024 if(sc->eq_dma.vaddr != NULL) {
19025 BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
19026 (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
19027 }
19028 if(sc->sp_dma.vaddr != NULL) {
19029 BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
19030 (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
19031 sizeof(struct bxe_slowpath));
19032 }
19033 if(sc->spq_dma.vaddr != NULL) {
19034 BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
19035 (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
19036 }
19037 if(sc->gz_buf_dma.vaddr != NULL) {
19038 BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
19039 (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
19040 FW_BUF_SIZE);
19041 }
19042 for (i = 0; i < sc->num_queues; i++) {
19043 fp = &sc->fp[i];
19044 if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
19045 fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
19046 fp->rx_sge_dma.vaddr != NULL) {
19047
19048 BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19049 (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
19050 sizeof(union bxe_host_hc_status_block));
19051 BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19052 (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
19053 (BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
19054 BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19055 (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
19056 (BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
19057 BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19058 (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
19059 (BCM_PAGE_SIZE * RCQ_NUM_PAGES));
19060 BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19061 (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
19062 (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
19063 }
19064 }
19065 if(ilt != NULL ) {
19066 ilt_cli = &ilt->clients[1];
19067 if(ilt->lines != NULL) {
19068 for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
19069 BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
19070 (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
19071 ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
19072 }
19073 }
19074 }
19075
19076
19077 cmd_offset = DMAE_REG_CMD_MEM;
19078 for (i = 0; i < 224; i++) {
19079 reg_addr = (cmd_offset +(i * 4));
19080 reg_val = REG_RD(sc, reg_addr);
19081 BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
19082 reg_addr, reg_val);
19083 }
19084 }
19085
19086 BLOGI(sc, "Collection of grcdump done\n");
19087 sc->grcdump_done = 1;
19088 return(rval);
19089 }
19090
19091 static int
19092 bxe_add_cdev(struct bxe_softc *sc)
19093 {
19094 sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
19095
19096 if (sc->eeprom == NULL) {
19097 BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
19098 return (-1);
19099 }
19100
19101 sc->ioctl_dev = make_dev(&bxe_cdevsw,
19102 if_getdunit(sc->ifp),
19103 UID_ROOT,
19104 GID_WHEEL,
19105 0600,
19106 "%s",
19107 if_name(sc->ifp));
19108
19109 if (sc->ioctl_dev == NULL) {
19110 free(sc->eeprom, M_DEVBUF);
19111 sc->eeprom = NULL;
19112 return (-1);
19113 }
19114
19115 sc->ioctl_dev->si_drv1 = sc;
19116
19117 return (0);
19118 }
19119
19120 static void
19121 bxe_del_cdev(struct bxe_softc *sc)
19122 {
19123 if (sc->ioctl_dev != NULL)
19124 destroy_dev(sc->ioctl_dev);
19125
19126 if (sc->eeprom != NULL) {
19127 free(sc->eeprom, M_DEVBUF);
19128 sc->eeprom = NULL;
19129 }
19130 sc->ioctl_dev = NULL;
19131
19132 return;
19133 }
19134
19135 static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
19136 {
19137
19138 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
19139 return FALSE;
19140
19141 return TRUE;
19142 }
19143
19144
19145 static int
19146 bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19147 {
19148 int rval = 0;
19149
19150 if(!bxe_is_nvram_accessible(sc)) {
19151 BLOGW(sc, "Cannot access eeprom when interface is down\n");
19152 return (-EAGAIN);
19153 }
19154 rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
19155
19156
19157 return (rval);
19158 }
19159
19160 static int
19161 bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19162 {
19163 int rval = 0;
19164
19165 if(!bxe_is_nvram_accessible(sc)) {
19166 BLOGW(sc, "Cannot access eeprom when interface is down\n");
19167 return (-EAGAIN);
19168 }
19169 rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
19170
19171 return (rval);
19172 }
19173
19174 static int
19175 bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
19176 {
19177 int rval = 0;
19178
19179 switch (eeprom->eeprom_cmd) {
19180
19181 case BXE_EEPROM_CMD_SET_EEPROM:
19182
19183 rval = copyin(eeprom->eeprom_data, sc->eeprom,
19184 eeprom->eeprom_data_len);
19185
19186 if (rval)
19187 break;
19188
19189 rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19190 eeprom->eeprom_data_len);
19191 break;
19192
19193 case BXE_EEPROM_CMD_GET_EEPROM:
19194
19195 rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19196 eeprom->eeprom_data_len);
19197
19198 if (rval) {
19199 break;
19200 }
19201
19202 rval = copyout(sc->eeprom, eeprom->eeprom_data,
19203 eeprom->eeprom_data_len);
19204 break;
19205
19206 default:
19207 rval = EINVAL;
19208 break;
19209 }
19210
19211 if (rval) {
19212 BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval);
19213 }
19214
19215 return (rval);
19216 }
19217
19218 static int
19219 bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
19220 {
19221 uint32_t ext_phy_config;
19222 int port = SC_PORT(sc);
19223 int cfg_idx = bxe_get_link_cfg_idx(sc);
19224
19225 dev_p->supported = sc->port.supported[cfg_idx] |
19226 (sc->port.supported[cfg_idx ^ 1] &
19227 (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
19228 dev_p->advertising = sc->port.advertising[cfg_idx];
19229 if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
19230 ELINK_ETH_PHY_SFP_1G_FIBER) {
19231 dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
19232 dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
19233 }
19234 if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
19235 !(sc->flags & BXE_MF_FUNC_DIS)) {
19236 dev_p->duplex = sc->link_vars.duplex;
19237 if (IS_MF(sc) && !BXE_NOMCP(sc))
19238 dev_p->speed = bxe_get_mf_speed(sc);
19239 else
19240 dev_p->speed = sc->link_vars.line_speed;
19241 } else {
19242 dev_p->duplex = DUPLEX_UNKNOWN;
19243 dev_p->speed = SPEED_UNKNOWN;
19244 }
19245
19246 dev_p->port = bxe_media_detect(sc);
19247
19248 ext_phy_config = SHMEM_RD(sc,
19249 dev_info.port_hw_config[port].external_phy_config);
19250 if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
19251 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
19252 dev_p->phy_address = sc->port.phy_addr;
19253 else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19254 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
19255 ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19256 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
19257 dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
19258 else
19259 dev_p->phy_address = 0;
19260
19261 if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19262 dev_p->autoneg = AUTONEG_ENABLE;
19263 else
19264 dev_p->autoneg = AUTONEG_DISABLE;
19265
19266
19267 return 0;
19268 }
19269
19270 static int
19271 bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19272 struct thread *td)
19273 {
19274 struct bxe_softc *sc;
19275 int rval = 0;
19276 bxe_grcdump_t *dump = NULL;
19277 int grc_dump_size;
19278 bxe_drvinfo_t *drv_infop = NULL;
19279 bxe_dev_setting_t *dev_p;
19280 bxe_dev_setting_t dev_set;
19281 bxe_get_regs_t *reg_p;
19282 bxe_reg_rdw_t *reg_rdw_p;
19283 bxe_pcicfg_rdw_t *cfg_rdw_p;
19284 bxe_perm_mac_addr_t *mac_addr_p;
19285
19286
19287 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19288 return ENXIO;
19289
19290 dump = (bxe_grcdump_t *)data;
19291
19292 switch(cmd) {
19293
19294 case BXE_GRC_DUMP_SIZE:
19295 dump->pci_func = sc->pcie_func;
19296 dump->grcdump_size =
19297 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19298 sizeof(struct dump_header);
19299 break;
19300
19301 case BXE_GRC_DUMP:
19302
19303 grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19304 sizeof(struct dump_header);
19305 if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19306 (dump->grcdump_size < grc_dump_size)) {
19307 rval = EINVAL;
19308 break;
19309 }
19310
19311 if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19312 (!sc->grcdump_started)) {
19313 rval = bxe_grc_dump(sc);
19314 }
19315
19316 if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19317 (sc->grc_dump != NULL)) {
19318 dump->grcdump_dwords = grc_dump_size >> 2;
19319 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19320 free(sc->grc_dump, M_DEVBUF);
19321 sc->grc_dump = NULL;
19322 sc->grcdump_started = 0;
19323 sc->grcdump_done = 0;
19324 }
19325
19326 break;
19327
19328 case BXE_DRV_INFO:
19329 drv_infop = (bxe_drvinfo_t *)data;
19330 snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19331 snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19332 BXE_DRIVER_VERSION);
19333 snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19334 sc->devinfo.bc_ver_str);
19335 snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19336 "%s", sc->fw_ver_str);
19337 drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19338 drv_infop->reg_dump_len =
19339 (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19340 + sizeof(struct dump_header);
19341 snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19342 sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19343 break;
19344
19345 case BXE_DEV_SETTING:
19346 dev_p = (bxe_dev_setting_t *)data;
19347 bxe_get_settings(sc, &dev_set);
19348 dev_p->supported = dev_set.supported;
19349 dev_p->advertising = dev_set.advertising;
19350 dev_p->speed = dev_set.speed;
19351 dev_p->duplex = dev_set.duplex;
19352 dev_p->port = dev_set.port;
19353 dev_p->phy_address = dev_set.phy_address;
19354 dev_p->autoneg = dev_set.autoneg;
19355
19356 break;
19357
19358 case BXE_GET_REGS:
19359
19360 reg_p = (bxe_get_regs_t *)data;
19361 grc_dump_size = reg_p->reg_buf_len;
19362
19363 if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19364 bxe_grc_dump(sc);
19365 }
19366 if((sc->grcdump_done) && (sc->grcdump_started) &&
19367 (sc->grc_dump != NULL)) {
19368 rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19369 free(sc->grc_dump, M_DEVBUF);
19370 sc->grc_dump = NULL;
19371 sc->grcdump_started = 0;
19372 sc->grcdump_done = 0;
19373 }
19374
19375 break;
19376
19377 case BXE_RDW_REG:
19378 reg_rdw_p = (bxe_reg_rdw_t *)data;
19379 if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19380 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19381 reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19382
19383 if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19384 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19385 REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19386
19387 break;
19388
19389 case BXE_RDW_PCICFG:
19390 cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19391 if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19392
19393 cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19394 cfg_rdw_p->cfg_width);
19395
19396 } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19397 pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19398 cfg_rdw_p->cfg_width);
19399 } else {
19400 BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19401 }
19402 break;
19403
19404 case BXE_MAC_ADDR:
19405 mac_addr_p = (bxe_perm_mac_addr_t *)data;
19406 snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19407 sc->mac_addr_str);
19408 break;
19409
19410 case BXE_EEPROM:
19411 rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19412 break;
19413
19414
19415 default:
19416 break;
19417 }
19418
19419 return (rval);
19420 }
19421
19422 #ifdef DEBUGNET
19423 static void
19424 bxe_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
19425 {
19426 struct bxe_softc *sc;
19427
19428 sc = if_getsoftc(ifp);
19429 BXE_CORE_LOCK(sc);
19430 *nrxr = sc->num_queues;
19431 *ncl = DEBUGNET_MAX_IN_FLIGHT;
19432 *clsize = sc->fp[0].mbuf_alloc_size;
19433 BXE_CORE_UNLOCK(sc);
19434 }
19435
19436 static void
19437 bxe_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused)
19438 {
19439 }
19440
19441 static int
19442 bxe_debugnet_transmit(if_t ifp, struct mbuf *m)
19443 {
19444 struct bxe_softc *sc;
19445 int error;
19446
19447 sc = if_getsoftc(ifp);
19448 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
19449 IFF_DRV_RUNNING || !sc->link_vars.link_up)
19450 return (ENOENT);
19451
19452 error = bxe_tx_encap(&sc->fp[0], &m);
19453 if (error != 0 && m != NULL)
19454 m_freem(m);
19455 return (error);
19456 }
19457
19458 static int
19459 bxe_debugnet_poll(if_t ifp, int count)
19460 {
19461 struct bxe_softc *sc;
19462 int i;
19463
19464 sc = if_getsoftc(ifp);
19465 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
19466 !sc->link_vars.link_up)
19467 return (ENOENT);
19468
19469 for (i = 0; i < sc->num_queues; i++)
19470 (void)bxe_rxeof(sc, &sc->fp[i]);
19471 (void)bxe_txeof(sc, &sc->fp[0]);
19472 return (0);
19473 }
19474 #endif /* DEBUGNET */
Cache object: cb4ca7cbbf48f3fc87644b5803d7f66a
|