FreeBSD/Linux Kernel Cross Reference
sys/dev/bce/if_bce.c
1 /*-
2 * Copyright (c) 2006-2007 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 /*
35 * The following controllers are supported by this driver:
36 * BCM5706C A2, A3
37 * BCM5708C B1, B2
38 *
39 * The following controllers are not supported by this driver:
40 * BCM5706C A0, A1
41 * BCM5706S A0, A1, A2, A3
42 * BCM5708C A0, B0
43 * BCM5708S A0, B0, B1, B2
44 */
45
46 #include "opt_bce.h"
47
48 #include <dev/bce/if_bcereg.h>
49 #include <dev/bce/if_bcefw.h>
50
51 /****************************************************************************/
52 /* BCE Debug Options */
53 /****************************************************************************/
54 #ifdef BCE_DEBUG
55 u32 bce_debug = BCE_WARN;
56
57 /* 0 = Never */
58 /* 1 = 1 in 2,147,483,648 */
59 /* 256 = 1 in 8,388,608 */
60 /* 2048 = 1 in 1,048,576 */
61 /* 65536 = 1 in 32,768 */
62 /* 1048576 = 1 in 2,048 */
63 /* 268435456 = 1 in 8 */
64 /* 536870912 = 1 in 4 */
65 /* 1073741824 = 1 in 2 */
66
67 /* Controls how often the l2_fhdr frame error check will fail. */
68 int bce_debug_l2fhdr_status_check = 0;
69
70 /* Controls how often the unexpected attention check will fail. */
71 int bce_debug_unexpected_attention = 0;
72
73 /* Controls how often to simulate an mbuf allocation failure. */
74 int bce_debug_mbuf_allocation_failure = 0;
75
76 /* Controls how often to simulate a DMA mapping failure. */
77 int bce_debug_dma_map_addr_failure = 0;
78
79 /* Controls how often to simulate a bootcode failure. */
80 int bce_debug_bootcode_running_failure = 0;
81 #endif
82
83
84 /****************************************************************************/
85 /* PCI Device ID Table */
86 /* */
87 /* Used by bce_probe() to identify the devices supported by this driver. */
88 /****************************************************************************/
89 #define BCE_DEVDESC_MAX 64
90
91 static struct bce_type bce_devs[] = {
92 /* BCM5706C Controllers and OEM boards. */
93 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
94 "HP NC370T Multifunction Gigabit Server Adapter" },
95 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
96 "HP NC370i Multifunction Gigabit Server Adapter" },
97 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
98 "Broadcom NetXtreme II BCM5706 1000Base-T" },
99
100 /* BCM5706S controllers and OEM boards. */
101 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
102 "HP NC370F Multifunction Gigabit Server Adapter" },
103 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
104 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
105
106 /* BCM5708C controllers and OEM boards. */
107 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
108 "Broadcom NetXtreme II BCM5708 1000Base-T" },
109
110 /* BCM5708S controllers and OEM boards. */
111 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
112 "Broadcom NetXtreme II BCM5708 1000Base-SX" },
113 { 0, 0, 0, 0, NULL }
114 };
115
116
117 /****************************************************************************/
118 /* Supported Flash NVRAM device data. */
119 /****************************************************************************/
120 static struct flash_spec flash_table[] =
121 {
122 /* Slow EEPROM */
123 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
124 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
125 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
126 "EEPROM - slow"},
127 /* Expansion entry 0001 */
128 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
129 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
130 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
131 "Entry 0001"},
132 /* Saifun SA25F010 (non-buffered flash) */
133 /* strap, cfg1, & write1 need updates */
134 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
137 "Non-buffered flash (128kB)"},
138 /* Saifun SA25F020 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
140 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
143 "Non-buffered flash (256kB)"},
144 /* Expansion entry 0100 */
145 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
148 "Entry 0100"},
149 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
150 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
151 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
152 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
153 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
154 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
155 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
156 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
157 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
158 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
159 /* Saifun SA25F005 (non-buffered flash) */
160 /* strap, cfg1, & write1 need updates */
161 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
162 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
163 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
164 "Non-buffered flash (64kB)"},
165 /* Fast EEPROM */
166 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
167 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
168 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
169 "EEPROM - fast"},
170 /* Expansion entry 1001 */
171 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
172 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
174 "Entry 1001"},
175 /* Expansion entry 1010 */
176 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
177 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 1010"},
180 /* ATMEL AT45DB011B (buffered flash) */
181 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
182 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
183 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
184 "Buffered flash (128kB)"},
185 /* Expansion entry 1100 */
186 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
187 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
189 "Entry 1100"},
190 /* Expansion entry 1101 */
191 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
192 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194 "Entry 1101"},
195 /* Ateml Expansion entry 1110 */
196 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
197 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
198 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
199 "Entry 1110 (Atmel)"},
200 /* ATMEL AT45DB021B (buffered flash) */
201 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
202 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
203 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
204 "Buffered flash (256kB)"},
205 };
206
207
208 /****************************************************************************/
209 /* FreeBSD device entry points. */
210 /****************************************************************************/
211 static int bce_probe (device_t);
212 static int bce_attach (device_t);
213 static int bce_detach (device_t);
214 static int bce_shutdown (device_t);
215
216
217 /****************************************************************************/
218 /* BCE Debug Data Structure Dump Routines */
219 /****************************************************************************/
220 #ifdef BCE_DEBUG
221 static void bce_dump_mbuf (struct bce_softc *, struct mbuf *);
222 static void bce_dump_tx_mbuf_chain (struct bce_softc *, int, int);
223 static void bce_dump_rx_mbuf_chain (struct bce_softc *, int, int);
224 static void bce_dump_txbd (struct bce_softc *, int, struct tx_bd *);
225 static void bce_dump_rxbd (struct bce_softc *, int, struct rx_bd *);
226 static void bce_dump_l2fhdr (struct bce_softc *, int, struct l2_fhdr *);
227 static void bce_dump_tx_chain (struct bce_softc *, int, int);
228 static void bce_dump_rx_chain (struct bce_softc *, int, int);
229 static void bce_dump_status_block (struct bce_softc *);
230 static void bce_dump_stats_block (struct bce_softc *);
231 static void bce_dump_driver_state (struct bce_softc *);
232 static void bce_dump_hw_state (struct bce_softc *);
233 static void bce_dump_bc_state (struct bce_softc *);
234 static void bce_breakpoint (struct bce_softc *);
235 #endif
236
237
238 /****************************************************************************/
239 /* BCE Register/Memory Access Routines */
240 /****************************************************************************/
241 static u32 bce_reg_rd_ind (struct bce_softc *, u32);
242 static void bce_reg_wr_ind (struct bce_softc *, u32, u32);
243 static void bce_ctx_wr (struct bce_softc *, u32, u32, u32);
244 static int bce_miibus_read_reg (device_t, int, int);
245 static int bce_miibus_write_reg (device_t, int, int, int);
246 static void bce_miibus_statchg (device_t);
247
248
249 /****************************************************************************/
250 /* BCE NVRAM Access Routines */
251 /****************************************************************************/
252 static int bce_acquire_nvram_lock (struct bce_softc *);
253 static int bce_release_nvram_lock (struct bce_softc *);
254 static void bce_enable_nvram_access (struct bce_softc *);
255 static void bce_disable_nvram_access(struct bce_softc *);
256 static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32);
257 static int bce_init_nvram (struct bce_softc *);
258 static int bce_nvram_read (struct bce_softc *, u32, u8 *, int);
259 static int bce_nvram_test (struct bce_softc *);
260 #ifdef BCE_NVRAM_WRITE_SUPPORT
261 static int bce_enable_nvram_write (struct bce_softc *);
262 static void bce_disable_nvram_write (struct bce_softc *);
263 static int bce_nvram_erase_page (struct bce_softc *, u32);
264 static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32);
265 static int bce_nvram_write (struct bce_softc *, u32, u8 *, int);
266 #endif
267
268 /****************************************************************************/
269 /* */
270 /****************************************************************************/
271 static void bce_dma_map_addr (void *, bus_dma_segment_t *, int, int);
272 static int bce_dma_alloc (device_t);
273 static void bce_dma_free (struct bce_softc *);
274 static void bce_release_resources (struct bce_softc *);
275
276 /****************************************************************************/
277 /* BCE Firmware Synchronization and Load */
278 /****************************************************************************/
279 static int bce_fw_sync (struct bce_softc *, u32);
280 static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32);
281 static void bce_load_cpu_fw (struct bce_softc *, struct cpu_reg *, struct fw_info *);
282 static void bce_init_cpus (struct bce_softc *);
283
284 static void bce_stop (struct bce_softc *);
285 static int bce_reset (struct bce_softc *, u32);
286 static int bce_chipinit (struct bce_softc *);
287 static int bce_blockinit (struct bce_softc *);
288 static int bce_get_buf (struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
289
290 static int bce_init_tx_chain (struct bce_softc *);
291 static void bce_fill_rx_chain (struct bce_softc *);
292 static int bce_init_rx_chain (struct bce_softc *);
293 static void bce_free_rx_chain (struct bce_softc *);
294 static void bce_free_tx_chain (struct bce_softc *);
295
296 static int bce_tx_encap (struct bce_softc *, struct mbuf **);
297 static void bce_start_locked (struct ifnet *);
298 static void bce_start (struct ifnet *);
299 static int bce_ioctl (struct ifnet *, u_long, caddr_t);
300 static void bce_watchdog (struct bce_softc *);
301 static int bce_ifmedia_upd (struct ifnet *);
302 static void bce_ifmedia_upd_locked (struct ifnet *);
303 static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *);
304 static void bce_init_locked (struct bce_softc *);
305 static void bce_init (void *);
306 static void bce_mgmt_init_locked (struct bce_softc *sc);
307
308 static void bce_init_context (struct bce_softc *);
309 static void bce_get_mac_addr (struct bce_softc *);
310 static void bce_set_mac_addr (struct bce_softc *);
311 static void bce_phy_intr (struct bce_softc *);
312 static void bce_rx_intr (struct bce_softc *);
313 static void bce_tx_intr (struct bce_softc *);
314 static void bce_disable_intr (struct bce_softc *);
315 static void bce_enable_intr (struct bce_softc *);
316
317 #ifdef DEVICE_POLLING
318 static void bce_poll_locked (struct ifnet *, enum poll_cmd, int);
319 static void bce_poll (struct ifnet *, enum poll_cmd, int);
320 #endif
321 static void bce_intr (void *);
322 static void bce_set_rx_mode (struct bce_softc *);
323 static void bce_stats_update (struct bce_softc *);
324 static void bce_tick (void *);
325 static void bce_pulse (void *);
326 static void bce_add_sysctls (struct bce_softc *);
327
328
329 /****************************************************************************/
330 /* FreeBSD device dispatch table. */
331 /****************************************************************************/
332 static device_method_t bce_methods[] = {
333 /* Device interface */
334 DEVMETHOD(device_probe, bce_probe),
335 DEVMETHOD(device_attach, bce_attach),
336 DEVMETHOD(device_detach, bce_detach),
337 DEVMETHOD(device_shutdown, bce_shutdown),
338
339 /* bus interface */
340 DEVMETHOD(bus_print_child, bus_generic_print_child),
341 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
342
343 /* MII interface */
344 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
345 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
346 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
347
348 { 0, 0 }
349 };
350
351 static driver_t bce_driver = {
352 "bce",
353 bce_methods,
354 sizeof(struct bce_softc)
355 };
356
357 static devclass_t bce_devclass;
358
359 MODULE_DEPEND(bce, pci, 1, 1, 1);
360 MODULE_DEPEND(bce, ether, 1, 1, 1);
361 MODULE_DEPEND(bce, miibus, 1, 1, 1);
362
363 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
364 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
365
366
367 /****************************************************************************/
368 /* Tunable device values */
369 /****************************************************************************/
370 static int bce_tso_enable = TRUE;
371 static int bce_msi_enable = 1;
372
373 /* Allowable values are TRUE or FALSE */
374 TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
375 /* Allowable values are 0 (IRQ only) and 1 (IRQ or MSI) */
376 TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
377
378 SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
379 SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
380 "TSO Enable/Disable");
381 SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
382 "MSI | INTx selector");
383
384 /****************************************************************************/
385 /* Device probe function. */
386 /* */
387 /* Compares the device to the driver's list of supported devices and */
388 /* reports back to the OS whether this is the right driver for the device. */
389 /* */
390 /* Returns: */
391 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
392 /****************************************************************************/
393 static int
394 bce_probe(device_t dev)
395 {
396 struct bce_type *t;
397 struct bce_softc *sc;
398 char *descbuf;
399 u16 vid = 0, did = 0, svid = 0, sdid = 0;
400
401 t = bce_devs;
402
403 sc = device_get_softc(dev);
404 bzero(sc, sizeof(struct bce_softc));
405 sc->bce_unit = device_get_unit(dev);
406 sc->bce_dev = dev;
407
408 /* Get the data for the device to be probed. */
409 vid = pci_get_vendor(dev);
410 did = pci_get_device(dev);
411 svid = pci_get_subvendor(dev);
412 sdid = pci_get_subdevice(dev);
413
414 DBPRINT(sc, BCE_VERBOSE_LOAD,
415 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
416 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
417
418 /* Look through the list of known devices for a match. */
419 while(t->bce_name != NULL) {
420
421 if ((vid == t->bce_vid) && (did == t->bce_did) &&
422 ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
423 ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
424
425 descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
426
427 if (descbuf == NULL)
428 return(ENOMEM);
429
430 /* Print out the device identity. */
431 snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
432 t->bce_name,
433 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
434 (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
435
436 device_set_desc_copy(dev, descbuf);
437 free(descbuf, M_TEMP);
438 return(BUS_PROBE_DEFAULT);
439 }
440 t++;
441 }
442
443 return(ENXIO);
444 }
445
446
447 /****************************************************************************/
448 /* Device attach function. */
449 /* */
450 /* Allocates device resources, performs secondary chip identification, */
451 /* resets and initializes the hardware, and initializes driver instance */
452 /* variables. */
453 /* */
454 /* Returns: */
455 /* 0 on success, positive value on failure. */
456 /****************************************************************************/
457 static int
458 bce_attach(device_t dev)
459 {
460 struct bce_softc *sc;
461 struct ifnet *ifp;
462 u32 val;
463 int count, mbuf, rid, rc = 0;
464
465 sc = device_get_softc(dev);
466 sc->bce_dev = dev;
467
468 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
469
470 mbuf = device_get_unit(dev);
471
472 /* Set initial device and PHY flags */
473 sc->bce_flags = 0;
474 sc->bce_phy_flags = 0;
475
476 sc->bce_unit = mbuf;
477
478 pci_enable_busmaster(dev);
479
480 /* Allocate PCI memory resources. */
481 rid = PCIR_BAR(0);
482 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
483 &rid, RF_ACTIVE | PCI_RF_DENSE);
484
485 if (sc->bce_res_mem == NULL) {
486 BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
487 __FILE__, __LINE__);
488 rc = ENXIO;
489 goto bce_attach_fail;
490 }
491
492 /* Get various resource handles. */
493 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
494 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
495 sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
496
497 /* If MSI is enabled in the driver, get the vector count. */
498 count = bce_msi_enable ? pci_msi_count(dev) : 0;
499
500 /* Allocate PCI IRQ resources. */
501 if (count == 1 && pci_alloc_msi(dev, &count) == 0 && count == 1) {
502 rid = 1;
503 sc->bce_flags |= BCE_USING_MSI_FLAG;
504 DBPRINT(sc, BCE_VERBOSE_LOAD,
505 "Allocating %d MSI interrupt(s)\n", count);
506 } else {
507 rid = 0;
508 DBPRINT(sc, BCE_VERBOSE_LOAD, "Allocating IRQ interrupt\n");
509 }
510
511 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
512 RF_SHAREABLE | RF_ACTIVE);
513
514 if (sc->bce_res_irq == NULL) {
515 BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
516 __FILE__, __LINE__);
517 rc = ENXIO;
518 goto bce_attach_fail;
519 }
520
521 /* Initialize mutex for the current device instance. */
522 BCE_LOCK_INIT(sc, device_get_nameunit(dev));
523
524 /*
525 * Configure byte swap and enable indirect register access.
526 * Rely on CPU to do target byte swapping on big endian systems.
527 * Access to registers outside of PCI configurtion space are not
528 * valid until this is done.
529 */
530 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
531 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
532 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
533
534 /* Save ASIC revsion info. */
535 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
536
537 /* Weed out any non-production controller revisions. */
538 switch(BCE_CHIP_ID(sc)) {
539 case BCE_CHIP_ID_5706_A0:
540 case BCE_CHIP_ID_5706_A1:
541 case BCE_CHIP_ID_5708_A0:
542 case BCE_CHIP_ID_5708_B0:
543 BCE_PRINTF("%s(%d): Unsupported controller revision (%c%d)!\n",
544 __FILE__, __LINE__,
545 (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
546 (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
547 rc = ENODEV;
548 goto bce_attach_fail;
549 }
550
551 /*
552 * The embedded PCIe to PCI-X bridge (EPB)
553 * in the 5708 cannot address memory above
554 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
555 */
556 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
557 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
558 else
559 sc->max_bus_addr = BUS_SPACE_MAXADDR;
560
561 /*
562 * Find the base address for shared memory access.
563 * Newer versions of bootcode use a signature and offset
564 * while older versions use a fixed address.
565 */
566 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
567 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
568 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
569 else
570 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
571
572 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
573 __FUNCTION__, sc->bce_shmem_base);
574
575 sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base +
576 BCE_DEV_INFO_BC_REV);
577 DBPRINT(sc, BCE_INFO_FIRMWARE, "%s(): bce_fw_ver = 0x%08X\n",
578 __FUNCTION__, sc->bce_fw_ver);
579
580 /* Check if any management firmware is running. */
581 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
582 if (val & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) {
583 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
584 DBPRINT(sc, BCE_INFO_LOAD, "%s(): BCE_MFW_ENABLE_FLAG\n",
585 __FUNCTION__);
586 }
587
588 /* Get PCI bus information (speed and type). */
589 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
590 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
591 u32 clkreg;
592
593 sc->bce_flags |= BCE_PCIX_FLAG;
594
595 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
596
597 clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
598 switch (clkreg) {
599 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
600 sc->bus_speed_mhz = 133;
601 break;
602
603 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
604 sc->bus_speed_mhz = 100;
605 break;
606
607 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
608 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
609 sc->bus_speed_mhz = 66;
610 break;
611
612 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
613 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
614 sc->bus_speed_mhz = 50;
615 break;
616
617 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
618 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
619 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
620 sc->bus_speed_mhz = 33;
621 break;
622 }
623 } else {
624 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
625 sc->bus_speed_mhz = 66;
626 else
627 sc->bus_speed_mhz = 33;
628 }
629
630 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
631 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
632
633 /* Reset the controller and announce to bootcode that driver is present. */
634 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
635 BCE_PRINTF("%s(%d): Controller reset failed!\n",
636 __FILE__, __LINE__);
637 rc = ENXIO;
638 goto bce_attach_fail;
639 }
640
641 /* Initialize the controller. */
642 if (bce_chipinit(sc)) {
643 BCE_PRINTF("%s(%d): Controller initialization failed!\n",
644 __FILE__, __LINE__);
645 rc = ENXIO;
646 goto bce_attach_fail;
647 }
648
649 /* Perform NVRAM test. */
650 if (bce_nvram_test(sc)) {
651 BCE_PRINTF("%s(%d): NVRAM test failed!\n",
652 __FILE__, __LINE__);
653 rc = ENXIO;
654 goto bce_attach_fail;
655 }
656
657 /* Fetch the permanent Ethernet MAC address. */
658 bce_get_mac_addr(sc);
659
660 /*
661 * Trip points control how many BDs
662 * should be ready before generating an
663 * interrupt while ticks control how long
664 * a BD can sit in the chain before
665 * generating an interrupt. Set the default
666 * values for the RX and TX chains.
667 */
668
669 #ifdef BCE_DEBUG
670 /* Force more frequent interrupts. */
671 sc->bce_tx_quick_cons_trip_int = 1;
672 sc->bce_tx_quick_cons_trip = 1;
673 sc->bce_tx_ticks_int = 0;
674 sc->bce_tx_ticks = 0;
675
676 sc->bce_rx_quick_cons_trip_int = 1;
677 sc->bce_rx_quick_cons_trip = 1;
678 sc->bce_rx_ticks_int = 0;
679 sc->bce_rx_ticks = 0;
680 #else
681 /* Improve throughput at the expense of increased latency. */
682 sc->bce_tx_quick_cons_trip_int = 20;
683 sc->bce_tx_quick_cons_trip = 20;
684 sc->bce_tx_ticks_int = 80;
685 sc->bce_tx_ticks = 80;
686
687 sc->bce_rx_quick_cons_trip_int = 6;
688 sc->bce_rx_quick_cons_trip = 6;
689 sc->bce_rx_ticks_int = 18;
690 sc->bce_rx_ticks = 18;
691 #endif
692
693 /* Update statistics once every second. */
694 sc->bce_stats_ticks = 1000000 & 0xffff00;
695
696 /*
697 * The SerDes based NetXtreme II controllers
698 * that support 2.5Gb operation (currently
699 * 5708S) use a PHY at address 2, otherwise
700 * the PHY is present at address 1.
701 */
702 sc->bce_phy_addr = 1;
703
704 if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
705 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
706 sc->bce_flags |= BCE_NO_WOL_FLAG;
707 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
708 sc->bce_phy_addr = 2;
709 val = REG_RD_IND(sc, sc->bce_shmem_base +
710 BCE_SHARED_HW_CFG_CONFIG);
711 if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
712 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
713 DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb capable adapter\n");
714 }
715 }
716 }
717
718 /* Store data needed by PHY driver for backplane applications */
719 sc->bce_shared_hw_cfg = REG_RD_IND(sc, sc->bce_shmem_base +
720 BCE_SHARED_HW_CFG_CONFIG);
721 sc->bce_port_hw_cfg = REG_RD_IND(sc, sc->bce_shmem_base +
722 BCE_SHARED_HW_CFG_CONFIG);
723
724 /* Allocate DMA memory resources. */
725 if (bce_dma_alloc(dev)) {
726 BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
727 __FILE__, __LINE__);
728 rc = ENXIO;
729 goto bce_attach_fail;
730 }
731
732 /* Allocate an ifnet structure. */
733 ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
734 if (ifp == NULL) {
735 BCE_PRINTF("%s(%d): Interface allocation failed!\n",
736 __FILE__, __LINE__);
737 rc = ENXIO;
738 goto bce_attach_fail;
739 }
740
741 /* Initialize the ifnet interface. */
742 ifp->if_softc = sc;
743 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
744 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
745 ifp->if_ioctl = bce_ioctl;
746 ifp->if_start = bce_start;
747 ifp->if_init = bce_init;
748 ifp->if_mtu = ETHERMTU;
749
750 if (bce_tso_enable) {
751 ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO;
752 ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4;
753 } else {
754 ifp->if_hwassist = BCE_IF_HWASSIST;
755 ifp->if_capabilities = BCE_IF_CAPABILITIES;
756 }
757
758 ifp->if_capenable = ifp->if_capabilities;
759
760 /* Assume a standard 1500 byte MTU size for mbuf allocations. */
761 sc->mbuf_alloc_size = MCLBYTES;
762 #ifdef DEVICE_POLLING
763 ifp->if_capabilities |= IFCAP_POLLING;
764 #endif
765
766 ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
767 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
768 IFQ_SET_READY(&ifp->if_snd);
769
770 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
771 ifp->if_baudrate = IF_Mbps(2500ULL);
772 else
773 ifp->if_baudrate = IF_Mbps(1000);
774
775 /* Check for an MII child bus by probing the PHY. */
776 if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
777 bce_ifmedia_sts)) {
778 BCE_PRINTF("%s(%d): No PHY found on child MII bus!\n",
779 __FILE__, __LINE__);
780 rc = ENXIO;
781 goto bce_attach_fail;
782 }
783
784 /* Attach to the Ethernet interface list. */
785 ether_ifattach(ifp, sc->eaddr);
786
787 #if __FreeBSD_version < 500000
788 callout_init(&sc->bce_tick_callout);
789 callout_init(&sc->bce_pulse_callout);
790 #else
791 callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
792 callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
793 #endif
794
795 /* Hookup IRQ last. */
796 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL,
797 bce_intr, sc, &sc->bce_intrhand);
798
799 if (rc) {
800 BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
801 __FILE__, __LINE__);
802 bce_detach(dev);
803 goto bce_attach_exit;
804 }
805
806 /*
807 * At this point we've acquired all the resources
808 * we need to run so there's no turning back, we're
809 * cleared for launch.
810 */
811
812 /* Print some important debugging info. */
813 DBRUN(BCE_INFO, bce_dump_driver_state(sc));
814
815 /* Add the supported sysctls to the kernel. */
816 bce_add_sysctls(sc);
817
818 BCE_LOCK(sc);
819 /*
820 * The chip reset earlier notified the bootcode that
821 * a driver is present. We now need to start our pulse
822 * routine so that the bootcode is reminded that we're
823 * still running.
824 */
825 bce_pulse(sc);
826
827 bce_mgmt_init_locked(sc);
828 BCE_UNLOCK(sc);
829
830 /* Finally, print some useful adapter info */
831 BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
832 printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
833 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
834 printf("Bus (PCI%s, %s, %dMHz); ",
835 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
836 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
837 sc->bus_speed_mhz);
838 printf("F/W (0x%08X); Flags( ", sc->bce_fw_ver);
839 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
840 printf("MFW ");
841 if (sc->bce_flags & BCE_USING_MSI_FLAG)
842 printf("MSI ");
843 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
844 printf("2.5G ");
845 printf(")\n");
846
847 goto bce_attach_exit;
848
849 bce_attach_fail:
850 bce_release_resources(sc);
851
852 bce_attach_exit:
853
854 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
855
856 return(rc);
857 }
858
859
860 /****************************************************************************/
861 /* Device detach function. */
862 /* */
863 /* Stops the controller, resets the controller, and releases resources. */
864 /* */
865 /* Returns: */
866 /* 0 on success, positive value on failure. */
867 /****************************************************************************/
868 static int
869 bce_detach(device_t dev)
870 {
871 struct bce_softc *sc = device_get_softc(dev);
872 struct ifnet *ifp;
873 u32 msg;
874
875 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
876
877 ifp = sc->bce_ifp;
878
879 #ifdef DEVICE_POLLING
880 if (ifp->if_capenable & IFCAP_POLLING)
881 ether_poll_deregister(ifp);
882 #endif
883
884 /* Stop the pulse so the bootcode can go to driver absent state. */
885 callout_stop(&sc->bce_pulse_callout);
886
887 /* Stop and reset the controller. */
888 BCE_LOCK(sc);
889 bce_stop(sc);
890 if (sc->bce_flags & BCE_NO_WOL_FLAG)
891 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
892 else
893 msg = BCE_DRV_MSG_CODE_UNLOAD;
894 bce_reset(sc, msg);
895 BCE_UNLOCK(sc);
896
897 ether_ifdetach(ifp);
898
899 /* If we have a child device on the MII bus remove it too. */
900 bus_generic_detach(dev);
901 device_delete_child(dev, sc->bce_miibus);
902
903 /* Release all remaining resources. */
904 bce_release_resources(sc);
905
906 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
907
908 return(0);
909 }
910
911
912 /****************************************************************************/
913 /* Device shutdown function. */
914 /* */
915 /* Stops and resets the controller. */
916 /* */
917 /* Returns: */
918 /* 0 on success, positive value on failure. */
919 /****************************************************************************/
920 static int
921 bce_shutdown(device_t dev)
922 {
923 struct bce_softc *sc = device_get_softc(dev);
924 u32 msg;
925
926 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Entering %s()\n", __FUNCTION__);
927
928 BCE_LOCK(sc);
929 bce_stop(sc);
930 if (sc->bce_flags & BCE_NO_WOL_FLAG)
931 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
932 else
933 msg = BCE_DRV_MSG_CODE_UNLOAD;
934 bce_reset(sc, msg);
935 BCE_UNLOCK(sc);
936
937 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Exiting %s()\n", __FUNCTION__);
938
939 return (0);
940 }
941
942
943 /****************************************************************************/
944 /* Indirect register read. */
945 /* */
946 /* Reads NetXtreme II registers using an index/data register pair in PCI */
947 /* configuration space. Using this mechanism avoids issues with posted */
948 /* reads but is much slower than memory-mapped I/O. */
949 /* */
950 /* Returns: */
951 /* The value of the register. */
952 /****************************************************************************/
953 static u32
954 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
955 {
956 device_t dev;
957 dev = sc->bce_dev;
958
959 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
960 #ifdef BCE_DEBUG
961 {
962 u32 val;
963 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
964 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
965 __FUNCTION__, offset, val);
966 return val;
967 }
968 #else
969 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
970 #endif
971 }
972
973
974 /****************************************************************************/
975 /* Indirect register write. */
976 /* */
977 /* Writes NetXtreme II registers using an index/data register pair in PCI */
978 /* configuration space. Using this mechanism avoids issues with posted */
979 /* writes but is muchh slower than memory-mapped I/O. */
980 /* */
981 /* Returns: */
982 /* Nothing. */
983 /****************************************************************************/
984 static void
985 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
986 {
987 device_t dev;
988 dev = sc->bce_dev;
989
990 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
991 __FUNCTION__, offset, val);
992
993 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
994 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
995 }
996
997
998 /****************************************************************************/
999 /* Context memory write. */
1000 /* */
1001 /* The NetXtreme II controller uses context memory to track connection */
1002 /* information for L2 and higher network protocols. */
1003 /* */
1004 /* Returns: */
1005 /* Nothing. */
1006 /****************************************************************************/
1007 static void
1008 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
1009 {
1010
1011 DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1012 "val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
1013
1014 offset += cid_addr;
1015 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1016 REG_WR(sc, BCE_CTX_DATA, val);
1017 }
1018
1019
1020 /****************************************************************************/
1021 /* PHY register read. */
1022 /* */
1023 /* Implements register reads on the MII bus. */
1024 /* */
1025 /* Returns: */
1026 /* The value of the register. */
1027 /****************************************************************************/
1028 static int
1029 bce_miibus_read_reg(device_t dev, int phy, int reg)
1030 {
1031 struct bce_softc *sc;
1032 u32 val;
1033 int i;
1034
1035 sc = device_get_softc(dev);
1036
1037 /* Make sure we are accessing the correct PHY address. */
1038 if (phy != sc->bce_phy_addr) {
1039 DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY read!\n", phy);
1040 return(0);
1041 }
1042
1043 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1044 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1045 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1046
1047 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1048 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1049
1050 DELAY(40);
1051 }
1052
1053 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1054 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1055 BCE_EMAC_MDIO_COMM_START_BUSY;
1056 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1057
1058 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1059 DELAY(10);
1060
1061 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1062 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1063 DELAY(5);
1064
1065 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1066 val &= BCE_EMAC_MDIO_COMM_DATA;
1067
1068 break;
1069 }
1070 }
1071
1072 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1073 BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1074 __FILE__, __LINE__, phy, reg);
1075 val = 0x0;
1076 } else {
1077 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1078 }
1079
1080 DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1081 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1082
1083 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1084 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1085 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1086
1087 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1088 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1089
1090 DELAY(40);
1091 }
1092
1093 return (val & 0xffff);
1094
1095 }
1096
1097
1098 /****************************************************************************/
1099 /* PHY register write. */
1100 /* */
1101 /* Implements register writes on the MII bus. */
1102 /* */
1103 /* Returns: */
1104 /* The value of the register. */
1105 /****************************************************************************/
1106 static int
1107 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1108 {
1109 struct bce_softc *sc;
1110 u32 val1;
1111 int i;
1112
1113 sc = device_get_softc(dev);
1114
1115 /* Make sure we are accessing the correct PHY address. */
1116 if (phy != sc->bce_phy_addr) {
1117 DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY write!\n", phy);
1118 return(0);
1119 }
1120
1121 DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1122 __FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1123
1124 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1125 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1126 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1127
1128 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1129 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1130
1131 DELAY(40);
1132 }
1133
1134 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1135 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1136 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1137 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1138
1139 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1140 DELAY(10);
1141
1142 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1143 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1144 DELAY(5);
1145 break;
1146 }
1147 }
1148
1149 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1150 BCE_PRINTF("%s(%d): PHY write timeout!\n",
1151 __FILE__, __LINE__);
1152
1153 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1154 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1155 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1156
1157 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1158 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1159
1160 DELAY(40);
1161 }
1162
1163 return 0;
1164 }
1165
1166
1167 /****************************************************************************/
1168 /* MII bus status change. */
1169 /* */
1170 /* Called by the MII bus driver when the PHY establishes link to set the */
1171 /* MAC interface registers. */
1172 /* */
1173 /* Returns: */
1174 /* Nothing. */
1175 /****************************************************************************/
1176 static void
1177 bce_miibus_statchg(device_t dev)
1178 {
1179 struct bce_softc *sc;
1180 struct mii_data *mii;
1181 int val;
1182
1183 sc = device_get_softc(dev);
1184
1185 mii = device_get_softc(sc->bce_miibus);
1186
1187 val = REG_RD(sc, BCE_EMAC_MODE);
1188 val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
1189 BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
1190 BCE_EMAC_MODE_25G);
1191
1192 /* Set MII or GMII interface based on the speed negotiated by the PHY. */
1193 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1194 case IFM_10_T:
1195 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1196 DBPRINT(sc, BCE_INFO, "Enabling 10Mb interface.\n");
1197 val |= BCE_EMAC_MODE_PORT_MII_10;
1198 break;
1199 }
1200 /* fall-through */
1201 case IFM_100_TX:
1202 DBPRINT(sc, BCE_INFO, "Enabling MII interface.\n");
1203 val |= BCE_EMAC_MODE_PORT_MII;
1204 break;
1205 case IFM_2500_SX:
1206 DBPRINT(sc, BCE_INFO, "Enabling 2.5G MAC mode.\n");
1207 val |= BCE_EMAC_MODE_25G;
1208 /* fall-through */
1209 case IFM_1000_T:
1210 case IFM_1000_SX:
1211 DBPRINT(sc, BCE_INFO, "Enabling GMII interface.\n");
1212 val |= BCE_EMAC_MODE_PORT_GMII;
1213 break;
1214 default:
1215 DBPRINT(sc, BCE_INFO, "Enabling default GMII interface.\n");
1216 val |= BCE_EMAC_MODE_PORT_GMII;
1217 }
1218
1219 /* Set half or full duplex based on the duplicity negotiated by the PHY. */
1220 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1221 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1222 val |= BCE_EMAC_MODE_HALF_DUPLEX;
1223 } else
1224 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1225
1226 REG_WR(sc, BCE_EMAC_MODE, val);
1227
1228 #if 0
1229 /* Todo: Enable flow control support in brgphy and bge. */
1230 /* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */
1231 if (mii->mii_media_active & IFM_FLAG0)
1232 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1233 if (mii->mii_media_active & IFM_FLAG1)
1234 BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1235 #endif
1236
1237 }
1238
1239
1240 /****************************************************************************/
1241 /* Acquire NVRAM lock. */
1242 /* */
1243 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1244 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1245 /* for use by the driver. */
1246 /* */
1247 /* Returns: */
1248 /* 0 on success, positive value on failure. */
1249 /****************************************************************************/
1250 static int
1251 bce_acquire_nvram_lock(struct bce_softc *sc)
1252 {
1253 u32 val;
1254 int j;
1255
1256 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Acquiring NVRAM lock.\n");
1257
1258 /* Request access to the flash interface. */
1259 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1260 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1261 val = REG_RD(sc, BCE_NVM_SW_ARB);
1262 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1263 break;
1264
1265 DELAY(5);
1266 }
1267
1268 if (j >= NVRAM_TIMEOUT_COUNT) {
1269 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1270 return EBUSY;
1271 }
1272
1273 return 0;
1274 }
1275
1276
1277 /****************************************************************************/
1278 /* Release NVRAM lock. */
1279 /* */
1280 /* When the caller is finished accessing NVRAM the lock must be released. */
1281 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1282 /* for use by the driver. */
1283 /* */
1284 /* Returns: */
1285 /* 0 on success, positive value on failure. */
1286 /****************************************************************************/
1287 static int
1288 bce_release_nvram_lock(struct bce_softc *sc)
1289 {
1290 int j;
1291 u32 val;
1292
1293 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Releasing NVRAM lock.\n");
1294
1295 /*
1296 * Relinquish nvram interface.
1297 */
1298 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1299
1300 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1301 val = REG_RD(sc, BCE_NVM_SW_ARB);
1302 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1303 break;
1304
1305 DELAY(5);
1306 }
1307
1308 if (j >= NVRAM_TIMEOUT_COUNT) {
1309 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1310 return EBUSY;
1311 }
1312
1313 return 0;
1314 }
1315
1316
1317 #ifdef BCE_NVRAM_WRITE_SUPPORT
1318 /****************************************************************************/
1319 /* Enable NVRAM write access. */
1320 /* */
1321 /* Before writing to NVRAM the caller must enable NVRAM writes. */
1322 /* */
1323 /* Returns: */
1324 /* 0 on success, positive value on failure. */
1325 /****************************************************************************/
1326 static int
1327 bce_enable_nvram_write(struct bce_softc *sc)
1328 {
1329 u32 val;
1330
1331 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM write.\n");
1332
1333 val = REG_RD(sc, BCE_MISC_CFG);
1334 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1335
1336 if (!sc->bce_flash_info->buffered) {
1337 int j;
1338
1339 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1340 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1341
1342 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1343 DELAY(5);
1344
1345 val = REG_RD(sc, BCE_NVM_COMMAND);
1346 if (val & BCE_NVM_COMMAND_DONE)
1347 break;
1348 }
1349
1350 if (j >= NVRAM_TIMEOUT_COUNT) {
1351 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1352 return EBUSY;
1353 }
1354 }
1355 return 0;
1356 }
1357
1358
1359 /****************************************************************************/
1360 /* Disable NVRAM write access. */
1361 /* */
1362 /* When the caller is finished writing to NVRAM write access must be */
1363 /* disabled. */
1364 /* */
1365 /* Returns: */
1366 /* Nothing. */
1367 /****************************************************************************/
1368 static void
1369 bce_disable_nvram_write(struct bce_softc *sc)
1370 {
1371 u32 val;
1372
1373 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Disabling NVRAM write.\n");
1374
1375 val = REG_RD(sc, BCE_MISC_CFG);
1376 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1377 }
1378 #endif
1379
1380
1381 /****************************************************************************/
1382 /* Enable NVRAM access. */
1383 /* */
1384 /* Before accessing NVRAM for read or write operations the caller must */
1385 /* enabled NVRAM access. */
1386 /* */
1387 /* Returns: */
1388 /* Nothing. */
1389 /****************************************************************************/
1390 static void
1391 bce_enable_nvram_access(struct bce_softc *sc)
1392 {
1393 u32 val;
1394
1395 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM access.\n");
1396
1397 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1398 /* Enable both bits, even on read. */
1399 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1400 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1401 }
1402
1403
1404 /****************************************************************************/
1405 /* Disable NVRAM access. */
1406 /* */
1407 /* When the caller is finished accessing NVRAM access must be disabled. */
1408 /* */
1409 /* Returns: */
1410 /* Nothing. */
1411 /****************************************************************************/
1412 static void
1413 bce_disable_nvram_access(struct bce_softc *sc)
1414 {
1415 u32 val;
1416
1417 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Disabling NVRAM access.\n");
1418
1419 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1420
1421 /* Disable both bits, even after read. */
1422 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1423 val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1424 BCE_NVM_ACCESS_ENABLE_WR_EN));
1425 }
1426
1427
1428 #ifdef BCE_NVRAM_WRITE_SUPPORT
1429 /****************************************************************************/
1430 /* Erase NVRAM page before writing. */
1431 /* */
1432 /* Non-buffered flash parts require that a page be erased before it is */
1433 /* written. */
1434 /* */
1435 /* Returns: */
1436 /* 0 on success, positive value on failure. */
1437 /****************************************************************************/
1438 static int
1439 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1440 {
1441 u32 cmd;
1442 int j;
1443
1444 /* Buffered flash doesn't require an erase. */
1445 if (sc->bce_flash_info->buffered)
1446 return 0;
1447
1448 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Erasing NVRAM page.\n");
1449
1450 /* Build an erase command. */
1451 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1452 BCE_NVM_COMMAND_DOIT;
1453
1454 /*
1455 * Clear the DONE bit separately, set the NVRAM adress to erase,
1456 * and issue the erase command.
1457 */
1458 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1459 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1460 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1461
1462 /* Wait for completion. */
1463 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1464 u32 val;
1465
1466 DELAY(5);
1467
1468 val = REG_RD(sc, BCE_NVM_COMMAND);
1469 if (val & BCE_NVM_COMMAND_DONE)
1470 break;
1471 }
1472
1473 if (j >= NVRAM_TIMEOUT_COUNT) {
1474 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1475 return EBUSY;
1476 }
1477
1478 return 0;
1479 }
1480 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1481
1482
1483 /****************************************************************************/
1484 /* Read a dword (32 bits) from NVRAM. */
1485 /* */
1486 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1487 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1488 /* */
1489 /* Returns: */
1490 /* 0 on success and the 32 bit value read, positive value on failure. */
1491 /****************************************************************************/
1492 static int
1493 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1494 u32 cmd_flags)
1495 {
1496 u32 cmd;
1497 int i, rc = 0;
1498
1499 /* Build the command word. */
1500 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1501
1502 /* Calculate the offset for buffered flash. */
1503 if (sc->bce_flash_info->buffered) {
1504 offset = ((offset / sc->bce_flash_info->page_size) <<
1505 sc->bce_flash_info->page_bits) +
1506 (offset % sc->bce_flash_info->page_size);
1507 }
1508
1509 /*
1510 * Clear the DONE bit separately, set the address to read,
1511 * and issue the read.
1512 */
1513 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1514 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1515 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1516
1517 /* Wait for completion. */
1518 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1519 u32 val;
1520
1521 DELAY(5);
1522
1523 val = REG_RD(sc, BCE_NVM_COMMAND);
1524 if (val & BCE_NVM_COMMAND_DONE) {
1525 val = REG_RD(sc, BCE_NVM_READ);
1526
1527 val = bce_be32toh(val);
1528 memcpy(ret_val, &val, 4);
1529 break;
1530 }
1531 }
1532
1533 /* Check for errors. */
1534 if (i >= NVRAM_TIMEOUT_COUNT) {
1535 BCE_PRINTF("%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1536 __FILE__, __LINE__, offset);
1537 rc = EBUSY;
1538 }
1539
1540 return(rc);
1541 }
1542
1543
1544 #ifdef BCE_NVRAM_WRITE_SUPPORT
1545 /****************************************************************************/
1546 /* Write a dword (32 bits) to NVRAM. */
1547 /* */
1548 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */
1549 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
1550 /* enabled NVRAM write access. */
1551 /* */
1552 /* Returns: */
1553 /* 0 on success, positive value on failure. */
1554 /****************************************************************************/
1555 static int
1556 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1557 u32 cmd_flags)
1558 {
1559 u32 cmd, val32;
1560 int j;
1561
1562 /* Build the command word. */
1563 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1564
1565 /* Calculate the offset for buffered flash. */
1566 if (sc->bce_flash_info->buffered) {
1567 offset = ((offset / sc->bce_flash_info->page_size) <<
1568 sc->bce_flash_info->page_bits) +
1569 (offset % sc->bce_flash_info->page_size);
1570 }
1571
1572 /*
1573 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1574 * set the NVRAM address to write, and issue the write command
1575 */
1576 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1577 memcpy(&val32, val, 4);
1578 val32 = htobe32(val32);
1579 REG_WR(sc, BCE_NVM_WRITE, val32);
1580 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1581 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1582
1583 /* Wait for completion. */
1584 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1585 DELAY(5);
1586
1587 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1588 break;
1589 }
1590 if (j >= NVRAM_TIMEOUT_COUNT) {
1591 BCE_PRINTF("%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1592 __FILE__, __LINE__, offset);
1593 return EBUSY;
1594 }
1595
1596 return 0;
1597 }
1598 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1599
1600
1601 /****************************************************************************/
1602 /* Initialize NVRAM access. */
1603 /* */
1604 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1605 /* access that device. */
1606 /* */
1607 /* Returns: */
1608 /* 0 on success, positive value on failure. */
1609 /****************************************************************************/
1610 static int
1611 bce_init_nvram(struct bce_softc *sc)
1612 {
1613 u32 val;
1614 int j, entry_count, rc;
1615 struct flash_spec *flash;
1616
1617 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Entering %s()\n", __FUNCTION__);
1618
1619 /* Determine the selected interface. */
1620 val = REG_RD(sc, BCE_NVM_CFG1);
1621
1622 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1623
1624 rc = 0;
1625
1626 /*
1627 * Flash reconfiguration is required to support additional
1628 * NVRAM devices not directly supported in hardware.
1629 * Check if the flash interface was reconfigured
1630 * by the bootcode.
1631 */
1632
1633 if (val & 0x40000000) {
1634 /* Flash interface reconfigured by bootcode. */
1635
1636 DBPRINT(sc,BCE_INFO_LOAD,
1637 "bce_init_nvram(): Flash WAS reconfigured.\n");
1638
1639 for (j = 0, flash = &flash_table[0]; j < entry_count;
1640 j++, flash++) {
1641 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1642 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1643 sc->bce_flash_info = flash;
1644 break;
1645 }
1646 }
1647 } else {
1648 /* Flash interface not yet reconfigured. */
1649 u32 mask;
1650
1651 DBPRINT(sc,BCE_INFO_LOAD,
1652 "bce_init_nvram(): Flash was NOT reconfigured.\n");
1653
1654 if (val & (1 << 23))
1655 mask = FLASH_BACKUP_STRAP_MASK;
1656 else
1657 mask = FLASH_STRAP_MASK;
1658
1659 /* Look for the matching NVRAM device configuration data. */
1660 for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1661
1662 /* Check if the device matches any of the known devices. */
1663 if ((val & mask) == (flash->strapping & mask)) {
1664 /* Found a device match. */
1665 sc->bce_flash_info = flash;
1666
1667 /* Request access to the flash interface. */
1668 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1669 return rc;
1670
1671 /* Reconfigure the flash interface. */
1672 bce_enable_nvram_access(sc);
1673 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1674 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1675 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1676 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1677 bce_disable_nvram_access(sc);
1678 bce_release_nvram_lock(sc);
1679
1680 break;
1681 }
1682 }
1683 }
1684
1685 /* Check if a matching device was found. */
1686 if (j == entry_count) {
1687 sc->bce_flash_info = NULL;
1688 BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
1689 __FILE__, __LINE__);
1690 rc = ENODEV;
1691 }
1692
1693 /* Write the flash config data to the shared memory interface. */
1694 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1695 val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1696 if (val)
1697 sc->bce_flash_size = val;
1698 else
1699 sc->bce_flash_size = sc->bce_flash_info->total_size;
1700
1701 DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1702 sc->bce_flash_info->total_size);
1703
1704 DBPRINT(sc, BCE_VERBOSE_NVRAM, "Exiting %s()\n", __FUNCTION__);
1705
1706 return rc;
1707 }
1708
1709
1710 /****************************************************************************/
1711 /* Read an arbitrary range of data from NVRAM. */
1712 /* */
1713 /* Prepares the NVRAM interface for access and reads the requested data */
1714 /* into the supplied buffer. */
1715 /* */
1716 /* Returns: */
1717 /* 0 on success and the data read, positive value on failure. */
1718 /****************************************************************************/
1719 static int
1720 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1721 int buf_size)
1722 {
1723 int rc = 0;
1724 u32 cmd_flags, offset32, len32, extra;
1725
1726 if (buf_size == 0)
1727 return 0;
1728
1729 /* Request access to the flash interface. */
1730 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1731 return rc;
1732
1733 /* Enable access to flash interface */
1734 bce_enable_nvram_access(sc);
1735
1736 len32 = buf_size;
1737 offset32 = offset;
1738 extra = 0;
1739
1740 cmd_flags = 0;
1741
1742 if (offset32 & 3) {
1743 u8 buf[4];
1744 u32 pre_len;
1745
1746 offset32 &= ~3;
1747 pre_len = 4 - (offset & 3);
1748
1749 if (pre_len >= len32) {
1750 pre_len = len32;
1751 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1752 }
1753 else {
1754 cmd_flags = BCE_NVM_COMMAND_FIRST;
1755 }
1756
1757 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1758
1759 if (rc)
1760 return rc;
1761
1762 memcpy(ret_buf, buf + (offset & 3), pre_len);
1763
1764 offset32 += 4;
1765 ret_buf += pre_len;
1766 len32 -= pre_len;
1767 }
1768
1769 if (len32 & 3) {
1770 extra = 4 - (len32 & 3);
1771 len32 = (len32 + 4) & ~3;
1772 }
1773
1774 if (len32 == 4) {
1775 u8 buf[4];
1776
1777 if (cmd_flags)
1778 cmd_flags = BCE_NVM_COMMAND_LAST;
1779 else
1780 cmd_flags = BCE_NVM_COMMAND_FIRST |
1781 BCE_NVM_COMMAND_LAST;
1782
1783 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1784
1785 memcpy(ret_buf, buf, 4 - extra);
1786 }
1787 else if (len32 > 0) {
1788 u8 buf[4];
1789
1790 /* Read the first word. */
1791 if (cmd_flags)
1792 cmd_flags = 0;
1793 else
1794 cmd_flags = BCE_NVM_COMMAND_FIRST;
1795
1796 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1797
1798 /* Advance to the next dword. */
1799 offset32 += 4;
1800 ret_buf += 4;
1801 len32 -= 4;
1802
1803 while (len32 > 4 && rc == 0) {
1804 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1805
1806 /* Advance to the next dword. */
1807 offset32 += 4;
1808 ret_buf += 4;
1809 len32 -= 4;
1810 }
1811
1812 if (rc)
1813 return rc;
1814
1815 cmd_flags = BCE_NVM_COMMAND_LAST;
1816 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1817
1818 memcpy(ret_buf, buf, 4 - extra);
1819 }
1820
1821 /* Disable access to flash interface and release the lock. */
1822 bce_disable_nvram_access(sc);
1823 bce_release_nvram_lock(sc);
1824
1825 return rc;
1826 }
1827
1828
1829 #ifdef BCE_NVRAM_WRITE_SUPPORT
1830 /****************************************************************************/
1831 /* Write an arbitrary range of data from NVRAM. */
1832 /* */
1833 /* Prepares the NVRAM interface for write access and writes the requested */
1834 /* data from the supplied buffer. The caller is responsible for */
1835 /* calculating any appropriate CRCs. */
1836 /* */
1837 /* Returns: */
1838 /* 0 on success, positive value on failure. */
1839 /****************************************************************************/
1840 static int
1841 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1842 int buf_size)
1843 {
1844 u32 written, offset32, len32;
1845 u8 *buf, start[4], end[4];
1846 int rc = 0;
1847 int align_start, align_end;
1848
1849 buf = data_buf;
1850 offset32 = offset;
1851 len32 = buf_size;
1852 align_start = align_end = 0;
1853
1854 if ((align_start = (offset32 & 3))) {
1855 offset32 &= ~3;
1856 len32 += align_start;
1857 if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1858 return rc;
1859 }
1860
1861 if (len32 & 3) {
1862 if ((len32 > 4) || !align_start) {
1863 align_end = 4 - (len32 & 3);
1864 len32 += align_end;
1865 if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1866 end, 4))) {
1867 return rc;
1868 }
1869 }
1870 }
1871
1872 if (align_start || align_end) {
1873 buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1874 if (buf == 0)
1875 return ENOMEM;
1876 if (align_start) {
1877 memcpy(buf, start, 4);
1878 }
1879 if (align_end) {
1880 memcpy(buf + len32 - 4, end, 4);
1881 }
1882 memcpy(buf + align_start, data_buf, buf_size);
1883 }
1884
1885 written = 0;
1886 while ((written < len32) && (rc == 0)) {
1887 u32 page_start, page_end, data_start, data_end;
1888 u32 addr, cmd_flags;
1889 int i;
1890 u8 flash_buffer[264];
1891
1892 /* Find the page_start addr */
1893 page_start = offset32 + written;
1894 page_start -= (page_start % sc->bce_flash_info->page_size);
1895 /* Find the page_end addr */
1896 page_end = page_start + sc->bce_flash_info->page_size;
1897 /* Find the data_start addr */
1898 data_start = (written == 0) ? offset32 : page_start;
1899 /* Find the data_end addr */
1900 data_end = (page_end > offset32 + len32) ?
1901 (offset32 + len32) : page_end;
1902
1903 /* Request access to the flash interface. */
1904 if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1905 goto nvram_write_end;
1906
1907 /* Enable access to flash interface */
1908 bce_enable_nvram_access(sc);
1909
1910 cmd_flags = BCE_NVM_COMMAND_FIRST;
1911 if (sc->bce_flash_info->buffered == 0) {
1912 int j;
1913
1914 /* Read the whole page into the buffer
1915 * (non-buffer flash only) */
1916 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1917 if (j == (sc->bce_flash_info->page_size - 4)) {
1918 cmd_flags |= BCE_NVM_COMMAND_LAST;
1919 }
1920 rc = bce_nvram_read_dword(sc,
1921 page_start + j,
1922 &flash_buffer[j],
1923 cmd_flags);
1924
1925 if (rc)
1926 goto nvram_write_end;
1927
1928 cmd_flags = 0;
1929 }
1930 }
1931
1932 /* Enable writes to flash interface (unlock write-protect) */
1933 if ((rc = bce_enable_nvram_write(sc)) != 0)
1934 goto nvram_write_end;
1935
1936 /* Erase the page */
1937 if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1938 goto nvram_write_end;
1939
1940 /* Re-enable the write again for the actual write */
1941 bce_enable_nvram_write(sc);
1942
1943 /* Loop to write back the buffer data from page_start to
1944 * data_start */
1945 i = 0;
1946 if (sc->bce_flash_info->buffered == 0) {
1947 for (addr = page_start; addr < data_start;
1948 addr += 4, i += 4) {
1949
1950 rc = bce_nvram_write_dword(sc, addr,
1951 &flash_buffer[i], cmd_flags);
1952
1953 if (rc != 0)
1954 goto nvram_write_end;
1955
1956 cmd_flags = 0;
1957 }
1958 }
1959
1960 /* Loop to write the new data from data_start to data_end */
1961 for (addr = data_start; addr < data_end; addr += 4, i++) {
1962 if ((addr == page_end - 4) ||
1963 ((sc->bce_flash_info->buffered) &&
1964 (addr == data_end - 4))) {
1965
1966 cmd_flags |= BCE_NVM_COMMAND_LAST;
1967 }
1968 rc = bce_nvram_write_dword(sc, addr, buf,
1969 cmd_flags);
1970
1971 if (rc != 0)
1972 goto nvram_write_end;
1973
1974 cmd_flags = 0;
1975 buf += 4;
1976 }
1977
1978 /* Loop to write back the buffer data from data_end
1979 * to page_end */
1980 if (sc->bce_flash_info->buffered == 0) {
1981 for (addr = data_end; addr < page_end;
1982 addr += 4, i += 4) {
1983
1984 if (addr == page_end-4) {
1985 cmd_flags = BCE_NVM_COMMAND_LAST;
1986 }
1987 rc = bce_nvram_write_dword(sc, addr,
1988 &flash_buffer[i], cmd_flags);
1989
1990 if (rc != 0)
1991 goto nvram_write_end;
1992
1993 cmd_flags = 0;
1994 }
1995 }
1996
1997 /* Disable writes to flash interface (lock write-protect) */
1998 bce_disable_nvram_write(sc);
1999
2000 /* Disable access to flash interface */
2001 bce_disable_nvram_access(sc);
2002 bce_release_nvram_lock(sc);
2003
2004 /* Increment written */
2005 written += data_end - data_start;
2006 }
2007
2008 nvram_write_end:
2009 if (align_start || align_end)
2010 free(buf, M_DEVBUF);
2011
2012 return rc;
2013 }
2014 #endif /* BCE_NVRAM_WRITE_SUPPORT */
2015
2016
2017 /****************************************************************************/
2018 /* Verifies that NVRAM is accessible and contains valid data. */
2019 /* */
2020 /* Reads the configuration data from NVRAM and verifies that the CRC is */
2021 /* correct. */
2022 /* */
2023 /* Returns: */
2024 /* 0 on success, positive value on failure. */
2025 /****************************************************************************/
2026 static int
2027 bce_nvram_test(struct bce_softc *sc)
2028 {
2029 u32 buf[BCE_NVRAM_SIZE / 4];
2030 u8 *data = (u8 *) buf;
2031 int rc = 0;
2032 u32 magic, csum;
2033
2034
2035 /*
2036 * Check that the device NVRAM is valid by reading
2037 * the magic value at offset 0.
2038 */
2039 if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
2040 goto bce_nvram_test_done;
2041
2042
2043 magic = bce_be32toh(buf[0]);
2044 if (magic != BCE_NVRAM_MAGIC) {
2045 rc = ENODEV;
2046 BCE_PRINTF("%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
2047 "Found: 0x%08X\n",
2048 __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2049 goto bce_nvram_test_done;
2050 }
2051
2052 /*
2053 * Verify that the device NVRAM includes valid
2054 * configuration data.
2055 */
2056 if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
2057 goto bce_nvram_test_done;
2058
2059 csum = ether_crc32_le(data, 0x100);
2060 if (csum != BCE_CRC32_RESIDUAL) {
2061 rc = ENODEV;
2062 BCE_PRINTF("%s(%d): Invalid Manufacturing Information NVRAM CRC! "
2063 "Expected: 0x%08X, Found: 0x%08X\n",
2064 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2065 goto bce_nvram_test_done;
2066 }
2067
2068 csum = ether_crc32_le(data + 0x100, 0x100);
2069 if (csum != BCE_CRC32_RESIDUAL) {
2070 BCE_PRINTF("%s(%d): Invalid Feature Configuration Information "
2071 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2072 __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2073 rc = ENODEV;
2074 }
2075
2076 bce_nvram_test_done:
2077 return rc;
2078 }
2079
2080
2081 /****************************************************************************/
2082 /* Free any DMA memory owned by the driver. */
2083 /* */
2084 /* Scans through each data structre that requires DMA memory and frees */
2085 /* the memory if allocated. */
2086 /* */
2087 /* Returns: */
2088 /* Nothing. */
2089 /****************************************************************************/
2090 static void
2091 bce_dma_free(struct bce_softc *sc)
2092 {
2093 int i;
2094
2095 DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2096
2097 /* Destroy the status block. */
2098 if (sc->status_block != NULL)
2099 bus_dmamem_free(
2100 sc->status_tag,
2101 sc->status_block,
2102 sc->status_map);
2103
2104 if (sc->status_map != NULL) {
2105 bus_dmamap_unload(
2106 sc->status_tag,
2107 sc->status_map);
2108 bus_dmamap_destroy(sc->status_tag,
2109 sc->status_map);
2110 }
2111
2112 if (sc->status_tag != NULL)
2113 bus_dma_tag_destroy(sc->status_tag);
2114
2115
2116 /* Destroy the statistics block. */
2117 if (sc->stats_block != NULL)
2118 bus_dmamem_free(
2119 sc->stats_tag,
2120 sc->stats_block,
2121 sc->stats_map);
2122
2123 if (sc->stats_map != NULL) {
2124 bus_dmamap_unload(
2125 sc->stats_tag,
2126 sc->stats_map);
2127 bus_dmamap_destroy(sc->stats_tag,
2128 sc->stats_map);
2129 }
2130
2131 if (sc->stats_tag != NULL)
2132 bus_dma_tag_destroy(sc->stats_tag);
2133
2134
2135 /* Free, unmap and destroy all TX buffer descriptor chain pages. */
2136 for (i = 0; i < TX_PAGES; i++ ) {
2137 if (sc->tx_bd_chain[i] != NULL)
2138 bus_dmamem_free(
2139 sc->tx_bd_chain_tag,
2140 sc->tx_bd_chain[i],
2141 sc->tx_bd_chain_map[i]);
2142
2143 if (sc->tx_bd_chain_map[i] != NULL) {
2144 bus_dmamap_unload(
2145 sc->tx_bd_chain_tag,
2146 sc->tx_bd_chain_map[i]);
2147 bus_dmamap_destroy(
2148 sc->tx_bd_chain_tag,
2149 sc->tx_bd_chain_map[i]);
2150 }
2151
2152 }
2153
2154 /* Destroy the TX buffer descriptor tag. */
2155 if (sc->tx_bd_chain_tag != NULL)
2156 bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2157
2158
2159 /* Free, unmap and destroy all RX buffer descriptor chain pages. */
2160 for (i = 0; i < RX_PAGES; i++ ) {
2161 if (sc->rx_bd_chain[i] != NULL)
2162 bus_dmamem_free(
2163 sc->rx_bd_chain_tag,
2164 sc->rx_bd_chain[i],
2165 sc->rx_bd_chain_map[i]);
2166
2167 if (sc->rx_bd_chain_map[i] != NULL) {
2168 bus_dmamap_unload(
2169 sc->rx_bd_chain_tag,
2170 sc->rx_bd_chain_map[i]);
2171 bus_dmamap_destroy(
2172 sc->rx_bd_chain_tag,
2173 sc->rx_bd_chain_map[i]);
2174 }
2175 }
2176
2177 /* Destroy the RX buffer descriptor tag. */
2178 if (sc->rx_bd_chain_tag != NULL)
2179 bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2180
2181
2182 /* Unload and destroy the TX mbuf maps. */
2183 for (i = 0; i < TOTAL_TX_BD; i++) {
2184 if (sc->tx_mbuf_map[i] != NULL) {
2185 bus_dmamap_unload(sc->tx_mbuf_tag,
2186 sc->tx_mbuf_map[i]);
2187 bus_dmamap_destroy(sc->tx_mbuf_tag,
2188 sc->tx_mbuf_map[i]);
2189 }
2190 }
2191
2192 /* Destroy the TX mbuf tag. */
2193 if (sc->tx_mbuf_tag != NULL)
2194 bus_dma_tag_destroy(sc->tx_mbuf_tag);
2195
2196
2197 /* Unload and destroy the RX mbuf maps. */
2198 for (i = 0; i < TOTAL_RX_BD; i++) {
2199 if (sc->rx_mbuf_map[i] != NULL) {
2200 bus_dmamap_unload(sc->rx_mbuf_tag,
2201 sc->rx_mbuf_map[i]);
2202 bus_dmamap_destroy(sc->rx_mbuf_tag,
2203 sc->rx_mbuf_map[i]);
2204 }
2205 }
2206
2207 /* Destroy the RX mbuf tag. */
2208 if (sc->rx_mbuf_tag != NULL)
2209 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2210
2211
2212 /* Destroy the parent tag */
2213 if (sc->parent_tag != NULL)
2214 bus_dma_tag_destroy(sc->parent_tag);
2215
2216 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2217
2218 }
2219
2220
2221 /****************************************************************************/
2222 /* Get DMA memory from the OS. */
2223 /* */
2224 /* Validates that the OS has provided DMA buffers in response to a */
2225 /* bus_dmamap_load() call and saves the physical address of those buffers. */
2226 /* When the callback is used the OS will return 0 for the mapping function */
2227 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
2228 /* failures back to the caller. */
2229 /* */
2230 /* Returns: */
2231 /* Nothing. */
2232 /****************************************************************************/
2233 static void
2234 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2235 {
2236 bus_addr_t *busaddr = arg;
2237
2238 /* Simulate a mapping failure. */
2239 DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2240 printf("bce: %s(%d): Simulating DMA mapping error.\n",
2241 __FILE__, __LINE__);
2242 error = ENOMEM);
2243
2244 /* Check for an error and signal the caller that an error occurred. */
2245 if (error) {
2246 printf("bce %s(%d): DMA mapping error! error = %d, "
2247 "nseg = %d\n", __FILE__, __LINE__, error, nseg);
2248 *busaddr = 0;
2249 return;
2250 }
2251
2252 *busaddr = segs->ds_addr;
2253 return;
2254 }
2255
2256
2257 /****************************************************************************/
2258 /* Allocate any DMA memory needed by the driver. */
2259 /* */
2260 /* Allocates DMA memory needed for the various global structures needed by */
2261 /* hardware. */
2262 /* */
2263 /* Returns: */
2264 /* 0 for success, positive value for failure. */
2265 /****************************************************************************/
2266 static int
2267 bce_dma_alloc(device_t dev)
2268 {
2269 struct bce_softc *sc;
2270 int i, error, rc = 0;
2271 bus_addr_t busaddr;
2272 bus_size_t max_size, max_seg_size;
2273 int max_segments;
2274
2275 sc = device_get_softc(dev);
2276
2277 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2278
2279 /*
2280 * Allocate the parent bus DMA tag appropriate for PCI.
2281 */
2282 if (bus_dma_tag_create(NULL,
2283 1,
2284 BCE_DMA_BOUNDARY,
2285 sc->max_bus_addr,
2286 BUS_SPACE_MAXADDR,
2287 NULL, NULL,
2288 MAXBSIZE,
2289 BUS_SPACE_UNRESTRICTED,
2290 BUS_SPACE_MAXSIZE_32BIT,
2291 0,
2292 NULL, NULL,
2293 &sc->parent_tag)) {
2294 BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
2295 __FILE__, __LINE__);
2296 rc = ENOMEM;
2297 goto bce_dma_alloc_exit;
2298 }
2299
2300 /*
2301 * Create a DMA tag for the status block, allocate and clear the
2302 * memory, map the memory into DMA space, and fetch the physical
2303 * address of the block.
2304 */
2305 if (bus_dma_tag_create(sc->parent_tag,
2306 BCE_DMA_ALIGN,
2307 BCE_DMA_BOUNDARY,
2308 sc->max_bus_addr,
2309 BUS_SPACE_MAXADDR,
2310 NULL, NULL,
2311 BCE_STATUS_BLK_SZ,
2312 1,
2313 BCE_STATUS_BLK_SZ,
2314 0,
2315 NULL, NULL,
2316 &sc->status_tag)) {
2317 BCE_PRINTF("%s(%d): Could not allocate status block DMA tag!\n",
2318 __FILE__, __LINE__);
2319 rc = ENOMEM;
2320 goto bce_dma_alloc_exit;
2321 }
2322
2323 if(bus_dmamem_alloc(sc->status_tag,
2324 (void **)&sc->status_block,
2325 BUS_DMA_NOWAIT,
2326 &sc->status_map)) {
2327 BCE_PRINTF("%s(%d): Could not allocate status block DMA memory!\n",
2328 __FILE__, __LINE__);
2329 rc = ENOMEM;
2330 goto bce_dma_alloc_exit;
2331 }
2332
2333 bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2334
2335 error = bus_dmamap_load(sc->status_tag,
2336 sc->status_map,
2337 sc->status_block,
2338 BCE_STATUS_BLK_SZ,
2339 bce_dma_map_addr,
2340 &busaddr,
2341 BUS_DMA_NOWAIT);
2342
2343 if (error) {
2344 BCE_PRINTF("%s(%d): Could not map status block DMA memory!\n",
2345 __FILE__, __LINE__);
2346 rc = ENOMEM;
2347 goto bce_dma_alloc_exit;
2348 }
2349
2350 sc->status_block_paddr = busaddr;
2351 /* DRC - Fix for 64 bit addresses. */
2352 DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2353 (u32) sc->status_block_paddr);
2354
2355 /*
2356 * Create a DMA tag for the statistics block, allocate and clear the
2357 * memory, map the memory into DMA space, and fetch the physical
2358 * address of the block.
2359 */
2360 if (bus_dma_tag_create(sc->parent_tag,
2361 BCE_DMA_ALIGN,
2362 BCE_DMA_BOUNDARY,
2363 sc->max_bus_addr,
2364 BUS_SPACE_MAXADDR,
2365 NULL, NULL,
2366 BCE_STATS_BLK_SZ,
2367 1,
2368 BCE_STATS_BLK_SZ,
2369 0,
2370 NULL, NULL,
2371 &sc->stats_tag)) {
2372 BCE_PRINTF("%s(%d): Could not allocate statistics block DMA tag!\n",
2373 __FILE__, __LINE__);
2374 rc = ENOMEM;
2375 goto bce_dma_alloc_exit;
2376 }
2377
2378 if (bus_dmamem_alloc(sc->stats_tag,
2379 (void **)&sc->stats_block,
2380 BUS_DMA_NOWAIT,
2381 &sc->stats_map)) {
2382 BCE_PRINTF("%s(%d): Could not allocate statistics block DMA memory!\n",
2383 __FILE__, __LINE__);
2384 rc = ENOMEM;
2385 goto bce_dma_alloc_exit;
2386 }
2387
2388 bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2389
2390 error = bus_dmamap_load(sc->stats_tag,
2391 sc->stats_map,
2392 sc->stats_block,
2393 BCE_STATS_BLK_SZ,
2394 bce_dma_map_addr,
2395 &busaddr,
2396 BUS_DMA_NOWAIT);
2397
2398 if(error) {
2399 BCE_PRINTF("%s(%d): Could not map statistics block DMA memory!\n",
2400 __FILE__, __LINE__);
2401 rc = ENOMEM;
2402 goto bce_dma_alloc_exit;
2403 }
2404
2405 sc->stats_block_paddr = busaddr;
2406 /* DRC - Fix for 64 bit address. */
2407 DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n",
2408 (u32) sc->stats_block_paddr);
2409
2410 /*
2411 * Create a DMA tag for the TX buffer descriptor chain,
2412 * allocate and clear the memory, and fetch the
2413 * physical address of the block.
2414 */
2415 if(bus_dma_tag_create(sc->parent_tag,
2416 BCM_PAGE_SIZE,
2417 BCE_DMA_BOUNDARY,
2418 sc->max_bus_addr,
2419 BUS_SPACE_MAXADDR,
2420 NULL, NULL,
2421 BCE_TX_CHAIN_PAGE_SZ,
2422 1,
2423 BCE_TX_CHAIN_PAGE_SZ,
2424 0,
2425 NULL, NULL,
2426 &sc->tx_bd_chain_tag)) {
2427 BCE_PRINTF("%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2428 __FILE__, __LINE__);
2429 rc = ENOMEM;
2430 goto bce_dma_alloc_exit;
2431 }
2432
2433 for (i = 0; i < TX_PAGES; i++) {
2434
2435 if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
2436 (void **)&sc->tx_bd_chain[i],
2437 BUS_DMA_NOWAIT,
2438 &sc->tx_bd_chain_map[i])) {
2439 BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
2440 "chain DMA memory!\n", __FILE__, __LINE__);
2441 rc = ENOMEM;
2442 goto bce_dma_alloc_exit;
2443 }
2444
2445 error = bus_dmamap_load(sc->tx_bd_chain_tag,
2446 sc->tx_bd_chain_map[i],
2447 sc->tx_bd_chain[i],
2448 BCE_TX_CHAIN_PAGE_SZ,
2449 bce_dma_map_addr,
2450 &busaddr,
2451 BUS_DMA_NOWAIT);
2452
2453 if (error) {
2454 BCE_PRINTF("%s(%d): Could not map TX descriptor chain DMA memory!\n",
2455 __FILE__, __LINE__);
2456 rc = ENOMEM;
2457 goto bce_dma_alloc_exit;
2458 }
2459
2460 sc->tx_bd_chain_paddr[i] = busaddr;
2461 /* DRC - Fix for 64 bit systems. */
2462 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2463 i, (u32) sc->tx_bd_chain_paddr[i]);
2464 }
2465
2466 /* Check the required size before mapping to conserve resources. */
2467 if (bce_tso_enable) {
2468 max_size = BCE_TSO_MAX_SIZE;
2469 max_segments = BCE_MAX_SEGMENTS;
2470 max_seg_size = BCE_TSO_MAX_SEG_SIZE;
2471 } else {
2472 max_size = MCLBYTES * BCE_MAX_SEGMENTS;
2473 max_segments = BCE_MAX_SEGMENTS;
2474 max_seg_size = MCLBYTES;
2475 }
2476
2477 /* Create a DMA tag for TX mbufs. */
2478 if (bus_dma_tag_create(sc->parent_tag,
2479 1,
2480 BCE_DMA_BOUNDARY,
2481 sc->max_bus_addr,
2482 BUS_SPACE_MAXADDR,
2483 NULL, NULL,
2484 max_size,
2485 max_segments,
2486 max_seg_size,
2487 0,
2488 NULL, NULL,
2489 &sc->tx_mbuf_tag)) {
2490 BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
2491 __FILE__, __LINE__);
2492 rc = ENOMEM;
2493 goto bce_dma_alloc_exit;
2494 }
2495
2496 /* Create DMA maps for the TX mbufs clusters. */
2497 for (i = 0; i < TOTAL_TX_BD; i++) {
2498 if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
2499 &sc->tx_mbuf_map[i])) {
2500 BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA map!\n",
2501 __FILE__, __LINE__);
2502 rc = ENOMEM;
2503 goto bce_dma_alloc_exit;
2504 }
2505 }
2506
2507 /*
2508 * Create a DMA tag for the RX buffer descriptor chain,
2509 * allocate and clear the memory, and fetch the physical
2510 * address of the blocks.
2511 */
2512 if (bus_dma_tag_create(sc->parent_tag,
2513 BCM_PAGE_SIZE,
2514 BCE_DMA_BOUNDARY,
2515 BUS_SPACE_MAXADDR,
2516 sc->max_bus_addr,
2517 NULL, NULL,
2518 BCE_RX_CHAIN_PAGE_SZ,
2519 1,
2520 BCE_RX_CHAIN_PAGE_SZ,
2521 0,
2522 NULL, NULL,
2523 &sc->rx_bd_chain_tag)) {
2524 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2525 __FILE__, __LINE__);
2526 rc = ENOMEM;
2527 goto bce_dma_alloc_exit;
2528 }
2529
2530 for (i = 0; i < RX_PAGES; i++) {
2531
2532 if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
2533 (void **)&sc->rx_bd_chain[i],
2534 BUS_DMA_NOWAIT,
2535 &sc->rx_bd_chain_map[i])) {
2536 BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
2537 "DMA memory!\n", __FILE__, __LINE__);
2538 rc = ENOMEM;
2539 goto bce_dma_alloc_exit;
2540 }
2541
2542 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2543
2544 error = bus_dmamap_load(sc->rx_bd_chain_tag,
2545 sc->rx_bd_chain_map[i],
2546 sc->rx_bd_chain[i],
2547 BCE_RX_CHAIN_PAGE_SZ,
2548 bce_dma_map_addr,
2549 &busaddr,
2550 BUS_DMA_NOWAIT);
2551
2552 if (error) {
2553 BCE_PRINTF("%s(%d): Could not map RX descriptor chain DMA memory!\n",
2554 __FILE__, __LINE__);
2555 rc = ENOMEM;
2556 goto bce_dma_alloc_exit;
2557 }
2558
2559 sc->rx_bd_chain_paddr[i] = busaddr;
2560 /* DRC - Fix for 64 bit systems. */
2561 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2562 i, (u32) sc->rx_bd_chain_paddr[i]);
2563 }
2564
2565 /*
2566 * Create a DMA tag for RX mbufs.
2567 */
2568 if (bus_dma_tag_create(sc->parent_tag,
2569 1,
2570 BCE_DMA_BOUNDARY,
2571 sc->max_bus_addr,
2572 BUS_SPACE_MAXADDR,
2573 NULL, NULL,
2574 MJUM9BYTES,
2575 BCE_MAX_SEGMENTS,
2576 MJUM9BYTES,
2577 0,
2578 NULL, NULL,
2579 &sc->rx_mbuf_tag)) {
2580 BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
2581 __FILE__, __LINE__);
2582 rc = ENOMEM;
2583 goto bce_dma_alloc_exit;
2584 }
2585
2586 /* Create DMA maps for the RX mbuf clusters. */
2587 for (i = 0; i < TOTAL_RX_BD; i++) {
2588 if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2589 &sc->rx_mbuf_map[i])) {
2590 BCE_PRINTF("%s(%d): Unable to create RX mbuf DMA map!\n",
2591 __FILE__, __LINE__);
2592 rc = ENOMEM;
2593 goto bce_dma_alloc_exit;
2594 }
2595 }
2596
2597 bce_dma_alloc_exit:
2598 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2599
2600 return(rc);
2601 }
2602
2603
2604 /****************************************************************************/
2605 /* Release all resources used by the driver. */
2606 /* */
2607 /* Releases all resources acquired by the driver including interrupts, */
2608 /* interrupt handler, interfaces, mutexes, and DMA memory. */
2609 /* */
2610 /* Returns: */
2611 /* Nothing. */
2612 /****************************************************************************/
2613 static void
2614 bce_release_resources(struct bce_softc *sc)
2615 {
2616 device_t dev;
2617
2618 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2619
2620 dev = sc->bce_dev;
2621
2622 bce_dma_free(sc);
2623
2624 if (sc->bce_intrhand != NULL) {
2625 DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
2626 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
2627 }
2628
2629 if (sc->bce_res_irq != NULL) {
2630 DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
2631 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
2632 sc->bce_res_irq);
2633 }
2634
2635 if (sc->bce_flags & BCE_USING_MSI_FLAG) {
2636 DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI vector.\n");
2637 pci_release_msi(dev);
2638 }
2639
2640 if (sc->bce_res_mem != NULL) {
2641 DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
2642 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem);
2643 }
2644
2645 if (sc->bce_ifp != NULL) {
2646 DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
2647 if_free(sc->bce_ifp);
2648 }
2649
2650 if (mtx_initialized(&sc->bce_mtx))
2651 BCE_LOCK_DESTROY(sc);
2652
2653 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2654
2655 }
2656
2657
2658 /****************************************************************************/
2659 /* Firmware synchronization. */
2660 /* */
2661 /* Before performing certain events such as a chip reset, synchronize with */
2662 /* the firmware first. */
2663 /* */
2664 /* Returns: */
2665 /* 0 for success, positive value for failure. */
2666 /****************************************************************************/
2667 static int
2668 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2669 {
2670 int i, rc = 0;
2671 u32 val;
2672
2673 /* Don't waste any time if we've timed out before. */
2674 if (sc->bce_fw_timed_out) {
2675 rc = EBUSY;
2676 goto bce_fw_sync_exit;
2677 }
2678
2679 /* Increment the message sequence number. */
2680 sc->bce_fw_wr_seq++;
2681 msg_data |= sc->bce_fw_wr_seq;
2682
2683 DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2684
2685 /* Send the message to the bootcode driver mailbox. */
2686 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2687
2688 /* Wait for the bootcode to acknowledge the message. */
2689 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2690 /* Check for a response in the bootcode firmware mailbox. */
2691 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2692 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2693 break;
2694 DELAY(1000);
2695 }
2696
2697 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2698 if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2699 ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2700
2701 BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
2702 "msg_data = 0x%08X\n",
2703 __FILE__, __LINE__, msg_data);
2704
2705 msg_data &= ~BCE_DRV_MSG_CODE;
2706 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2707
2708 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2709
2710 sc->bce_fw_timed_out = 1;
2711 rc = EBUSY;
2712 }
2713
2714 bce_fw_sync_exit:
2715 return (rc);
2716 }
2717
2718
2719 /****************************************************************************/
2720 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2721 /* */
2722 /* Returns: */
2723 /* Nothing. */
2724 /****************************************************************************/
2725 static void
2726 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
2727 u32 rv2p_code_len, u32 rv2p_proc)
2728 {
2729 int i;
2730 u32 val;
2731
2732 for (i = 0; i < rv2p_code_len; i += 8) {
2733 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2734 rv2p_code++;
2735 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2736 rv2p_code++;
2737
2738 if (rv2p_proc == RV2P_PROC1) {
2739 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2740 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2741 }
2742 else {
2743 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2744 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2745 }
2746 }
2747
2748 /* Reset the processor, un-stall is done later. */
2749 if (rv2p_proc == RV2P_PROC1) {
2750 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2751 }
2752 else {
2753 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2754 }
2755 }
2756
2757
2758 /****************************************************************************/
2759 /* Load RISC processor firmware. */
2760 /* */
2761 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */
2762 /* associated with a particular processor. */
2763 /* */
2764 /* Returns: */
2765 /* Nothing. */
2766 /****************************************************************************/
2767 static void
2768 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2769 struct fw_info *fw)
2770 {
2771 u32 offset;
2772 u32 val;
2773
2774 /* Halt the CPU. */
2775 val = REG_RD_IND(sc, cpu_reg->mode);
2776 val |= cpu_reg->mode_value_halt;
2777 REG_WR_IND(sc, cpu_reg->mode, val);
2778 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2779
2780 /* Load the Text area. */
2781 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2782 if (fw->text) {
2783 int j;
2784
2785 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2786 REG_WR_IND(sc, offset, fw->text[j]);
2787 }
2788 }
2789
2790 /* Load the Data area. */
2791 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2792 if (fw->data) {
2793 int j;
2794
2795 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2796 REG_WR_IND(sc, offset, fw->data[j]);
2797 }
2798 }
2799
2800 /* Load the SBSS area. */
2801 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2802 if (fw->sbss) {
2803 int j;
2804
2805 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2806 REG_WR_IND(sc, offset, fw->sbss[j]);
2807 }
2808 }
2809
2810 /* Load the BSS area. */
2811 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2812 if (fw->bss) {
2813 int j;
2814
2815 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2816 REG_WR_IND(sc, offset, fw->bss[j]);
2817 }
2818 }
2819
2820 /* Load the Read-Only area. */
2821 offset = cpu_reg->spad_base +
2822 (fw->rodata_addr - cpu_reg->mips_view_base);
2823 if (fw->rodata) {
2824 int j;
2825
2826 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2827 REG_WR_IND(sc, offset, fw->rodata[j]);
2828 }
2829 }
2830
2831 /* Clear the pre-fetch instruction. */
2832 REG_WR_IND(sc, cpu_reg->inst, 0);
2833 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2834
2835 /* Start the CPU. */
2836 val = REG_RD_IND(sc, cpu_reg->mode);
2837 val &= ~cpu_reg->mode_value_halt;
2838 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2839 REG_WR_IND(sc, cpu_reg->mode, val);
2840 }
2841
2842
2843 /****************************************************************************/
2844 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */
2845 /* */
2846 /* Loads the firmware for each CPU and starts the CPU. */
2847 /* */
2848 /* Returns: */
2849 /* Nothing. */
2850 /****************************************************************************/
2851 static void
2852 bce_init_cpus(struct bce_softc *sc)
2853 {
2854 struct cpu_reg cpu_reg;
2855 struct fw_info fw;
2856
2857 /* Initialize the RV2P processor. */
2858 bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2859 bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2860
2861 /* Initialize the RX Processor. */
2862 cpu_reg.mode = BCE_RXP_CPU_MODE;
2863 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2864 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2865 cpu_reg.state = BCE_RXP_CPU_STATE;
2866 cpu_reg.state_value_clear = 0xffffff;
2867 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2868 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2869 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2870 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2871 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2872 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2873 cpu_reg.mips_view_base = 0x8000000;
2874
2875 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2876 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2877 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2878 fw.start_addr = bce_RXP_b06FwStartAddr;
2879
2880 fw.text_addr = bce_RXP_b06FwTextAddr;
2881 fw.text_len = bce_RXP_b06FwTextLen;
2882 fw.text_index = 0;
2883 fw.text = bce_RXP_b06FwText;
2884
2885 fw.data_addr = bce_RXP_b06FwDataAddr;
2886 fw.data_len = bce_RXP_b06FwDataLen;
2887 fw.data_index = 0;
2888 fw.data = bce_RXP_b06FwData;
2889
2890 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2891 fw.sbss_len = bce_RXP_b06FwSbssLen;
2892 fw.sbss_index = 0;
2893 fw.sbss = bce_RXP_b06FwSbss;
2894
2895 fw.bss_addr = bce_RXP_b06FwBssAddr;
2896 fw.bss_len = bce_RXP_b06FwBssLen;
2897 fw.bss_index = 0;
2898 fw.bss = bce_RXP_b06FwBss;
2899
2900 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2901 fw.rodata_len = bce_RXP_b06FwRodataLen;
2902 fw.rodata_index = 0;
2903 fw.rodata = bce_RXP_b06FwRodata;
2904
2905 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2906 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2907
2908 /* Initialize the TX Processor. */
2909 cpu_reg.mode = BCE_TXP_CPU_MODE;
2910 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2911 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2912 cpu_reg.state = BCE_TXP_CPU_STATE;
2913 cpu_reg.state_value_clear = 0xffffff;
2914 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2915 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2916 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2917 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2918 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2919 cpu_reg.spad_base = BCE_TXP_SCRATCH;
2920 cpu_reg.mips_view_base = 0x8000000;
2921
2922 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2923 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2924 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2925 fw.start_addr = bce_TXP_b06FwStartAddr;
2926
2927 fw.text_addr = bce_TXP_b06FwTextAddr;
2928 fw.text_len = bce_TXP_b06FwTextLen;
2929 fw.text_index = 0;
2930 fw.text = bce_TXP_b06FwText;
2931
2932 fw.data_addr = bce_TXP_b06FwDataAddr;
2933 fw.data_len = bce_TXP_b06FwDataLen;
2934 fw.data_index = 0;
2935 fw.data = bce_TXP_b06FwData;
2936
2937 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2938 fw.sbss_len = bce_TXP_b06FwSbssLen;
2939 fw.sbss_index = 0;
2940 fw.sbss = bce_TXP_b06FwSbss;
2941
2942 fw.bss_addr = bce_TXP_b06FwBssAddr;
2943 fw.bss_len = bce_TXP_b06FwBssLen;
2944 fw.bss_index = 0;
2945 fw.bss = bce_TXP_b06FwBss;
2946
2947 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2948 fw.rodata_len = bce_TXP_b06FwRodataLen;
2949 fw.rodata_index = 0;
2950 fw.rodata = bce_TXP_b06FwRodata;
2951
2952 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2953 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2954
2955 /* Initialize the TX Patch-up Processor. */
2956 cpu_reg.mode = BCE_TPAT_CPU_MODE;
2957 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2958 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2959 cpu_reg.state = BCE_TPAT_CPU_STATE;
2960 cpu_reg.state_value_clear = 0xffffff;
2961 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2962 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2963 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2964 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2965 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2966 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2967 cpu_reg.mips_view_base = 0x8000000;
2968
2969 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2970 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2971 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2972 fw.start_addr = bce_TPAT_b06FwStartAddr;
2973
2974 fw.text_addr = bce_TPAT_b06FwTextAddr;
2975 fw.text_len = bce_TPAT_b06FwTextLen;
2976 fw.text_index = 0;
2977 fw.text = bce_TPAT_b06FwText;
2978
2979 fw.data_addr = bce_TPAT_b06FwDataAddr;
2980 fw.data_len = bce_TPAT_b06FwDataLen;
2981 fw.data_index = 0;
2982 fw.data = bce_TPAT_b06FwData;
2983
2984 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2985 fw.sbss_len = bce_TPAT_b06FwSbssLen;
2986 fw.sbss_index = 0;
2987 fw.sbss = bce_TPAT_b06FwSbss;
2988
2989 fw.bss_addr = bce_TPAT_b06FwBssAddr;
2990 fw.bss_len = bce_TPAT_b06FwBssLen;
2991 fw.bss_index = 0;
2992 fw.bss = bce_TPAT_b06FwBss;
2993
2994 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2995 fw.rodata_len = bce_TPAT_b06FwRodataLen;
2996 fw.rodata_index = 0;
2997 fw.rodata = bce_TPAT_b06FwRodata;
2998
2999 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3000 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3001
3002 /* Initialize the Completion Processor. */
3003 cpu_reg.mode = BCE_COM_CPU_MODE;
3004 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3005 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3006 cpu_reg.state = BCE_COM_CPU_STATE;
3007 cpu_reg.state_value_clear = 0xffffff;
3008 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3009 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3010 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3011 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3012 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3013 cpu_reg.spad_base = BCE_COM_SCRATCH;
3014 cpu_reg.mips_view_base = 0x8000000;
3015
3016 fw.ver_major = bce_COM_b06FwReleaseMajor;
3017 fw.ver_minor = bce_COM_b06FwReleaseMinor;
3018 fw.ver_fix = bce_COM_b06FwReleaseFix;
3019 fw.start_addr = bce_COM_b06FwStartAddr;
3020
3021 fw.text_addr = bce_COM_b06FwTextAddr;
3022 fw.text_len = bce_COM_b06FwTextLen;
3023 fw.text_index = 0;
3024 fw.text = bce_COM_b06FwText;
3025
3026 fw.data_addr = bce_COM_b06FwDataAddr;
3027 fw.data_len = bce_COM_b06FwDataLen;
3028 fw.data_index = 0;
3029 fw.data = bce_COM_b06FwData;
3030
3031 fw.sbss_addr = bce_COM_b06FwSbssAddr;
3032 fw.sbss_len = bce_COM_b06FwSbssLen;
3033 fw.sbss_index = 0;
3034 fw.sbss = bce_COM_b06FwSbss;
3035
3036 fw.bss_addr = bce_COM_b06FwBssAddr;
3037 fw.bss_len = bce_COM_b06FwBssLen;
3038 fw.bss_index = 0;
3039 fw.bss = bce_COM_b06FwBss;
3040
3041 fw.rodata_addr = bce_COM_b06FwRodataAddr;
3042 fw.rodata_len = bce_COM_b06FwRodataLen;
3043 fw.rodata_index = 0;
3044 fw.rodata = bce_COM_b06FwRodata;
3045
3046 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
3047 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3048 }
3049
3050
3051 /****************************************************************************/
3052 /* Initialize context memory. */
3053 /* */
3054 /* Clears the memory associated with each Context ID (CID). */
3055 /* */
3056 /* Returns: */
3057 /* Nothing. */
3058 /****************************************************************************/
3059 static void
3060 bce_init_context(struct bce_softc *sc)
3061 {
3062 u32 vcid;
3063
3064 vcid = 96;
3065 while (vcid) {
3066 u32 vcid_addr, pcid_addr, offset;
3067
3068 vcid--;
3069
3070 vcid_addr = GET_CID_ADDR(vcid);
3071 pcid_addr = vcid_addr;
3072
3073 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
3074 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3075
3076 /* Zero out the context. */
3077 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
3078 CTX_WR(sc, 0x00, offset, 0);
3079 }
3080
3081 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3082 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3083 }
3084 }
3085
3086
3087 /****************************************************************************/
3088 /* Fetch the permanent MAC address of the controller. */
3089 /* */
3090 /* Returns: */
3091 /* Nothing. */
3092 /****************************************************************************/
3093 static void
3094 bce_get_mac_addr(struct bce_softc *sc)
3095 {
3096 u32 mac_lo = 0, mac_hi = 0;
3097
3098 /*
3099 * The NetXtreme II bootcode populates various NIC
3100 * power-on and runtime configuration items in a
3101 * shared memory area. The factory configured MAC
3102 * address is available from both NVRAM and the
3103 * shared memory area so we'll read the value from
3104 * shared memory for speed.
3105 */
3106
3107 mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3108 BCE_PORT_HW_CFG_MAC_UPPER);
3109 mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3110 BCE_PORT_HW_CFG_MAC_LOWER);
3111
3112 if ((mac_lo == 0) && (mac_hi == 0)) {
3113 BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
3114 __FILE__, __LINE__);
3115 } else {
3116 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3117 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3118 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3119 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3120 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3121 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3122 }
3123
3124 DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3125 }
3126
3127
3128 /****************************************************************************/
3129 /* Program the MAC address. */
3130 /* */
3131 /* Returns: */
3132 /* Nothing. */
3133 /****************************************************************************/
3134 static void
3135 bce_set_mac_addr(struct bce_softc *sc)
3136 {
3137 u32 val;
3138 u8 *mac_addr = sc->eaddr;
3139
3140 DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3141
3142 val = (mac_addr[0] << 8) | mac_addr[1];
3143
3144 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3145
3146 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3147 (mac_addr[4] << 8) | mac_addr[5];
3148
3149 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3150 }
3151
3152
3153 /****************************************************************************/
3154 /* Stop the controller. */
3155 /* */
3156 /* Returns: */
3157 /* Nothing. */
3158 /****************************************************************************/
3159 static void
3160 bce_stop(struct bce_softc *sc)
3161 {
3162 struct ifnet *ifp;
3163 struct ifmedia_entry *ifm;
3164 struct mii_data *mii = NULL;
3165 int mtmp, itmp;
3166
3167 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3168
3169 BCE_LOCK_ASSERT(sc);
3170
3171 ifp = sc->bce_ifp;
3172
3173 mii = device_get_softc(sc->bce_miibus);
3174
3175 callout_stop(&sc->bce_tick_callout);
3176
3177 /* Disable the transmit/receive blocks. */
3178 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3179 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3180 DELAY(20);
3181
3182 bce_disable_intr(sc);
3183
3184 /* Free RX buffers. */
3185 bce_free_rx_chain(sc);
3186
3187 /* Free TX buffers. */
3188 bce_free_tx_chain(sc);
3189
3190 /*
3191 * Isolate/power down the PHY, but leave the media selection
3192 * unchanged so that things will be put back to normal when
3193 * we bring the interface back up.
3194 */
3195
3196 itmp = ifp->if_flags;
3197 ifp->if_flags |= IFF_UP;
3198
3199 /* If we are called from bce_detach(), mii is already NULL. */
3200 if (mii != NULL) {
3201 ifm = mii->mii_media.ifm_cur;
3202 mtmp = ifm->ifm_media;
3203 ifm->ifm_media = IFM_ETHER | IFM_NONE;
3204 mii_mediachg(mii);
3205 ifm->ifm_media = mtmp;
3206 }
3207
3208 ifp->if_flags = itmp;
3209 sc->watchdog_timer = 0;
3210
3211 sc->bce_link = 0;
3212
3213 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3214
3215 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3216 }
3217
3218
3219 static int
3220 bce_reset(struct bce_softc *sc, u32 reset_code)
3221 {
3222 u32 val;
3223 int i, rc = 0;
3224
3225 DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
3226 __FUNCTION__, reset_code);
3227
3228 /* Wait for pending PCI transactions to complete. */
3229 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3230 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3231 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3232 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3233 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3234 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3235 DELAY(5);
3236
3237 /* Assume bootcode is running. */
3238 sc->bce_fw_timed_out = 0;
3239
3240 /* Give the firmware a chance to prepare for the reset. */
3241 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3242 if (rc)
3243 goto bce_reset_exit;
3244
3245 /* Set a firmware reminder that this is a soft reset. */
3246 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3247 BCE_DRV_RESET_SIGNATURE_MAGIC);
3248
3249 /* Dummy read to force the chip to complete all current transactions. */
3250 val = REG_RD(sc, BCE_MISC_ID);
3251
3252 /* Chip reset. */
3253 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3254 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3255 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3256 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3257
3258 /* Allow up to 30us for reset to complete. */
3259 for (i = 0; i < 10; i++) {
3260 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3261 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3262 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3263 break;
3264 }
3265 DELAY(10);
3266 }
3267
3268 /* Check that reset completed successfully. */
3269 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3270 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3271 BCE_PRINTF("%s(%d): Reset failed!\n",
3272 __FILE__, __LINE__);
3273 rc = EBUSY;
3274 goto bce_reset_exit;
3275 }
3276
3277 /* Make sure byte swapping is properly configured. */
3278 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3279 if (val != 0x01020304) {
3280 BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
3281 __FILE__, __LINE__);
3282 rc = ENODEV;
3283 goto bce_reset_exit;
3284 }
3285
3286 /* Just completed a reset, assume that firmware is running again. */
3287 sc->bce_fw_timed_out = 0;
3288
3289 /* Wait for the firmware to finish its initialization. */
3290 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3291 if (rc)
3292 BCE_PRINTF("%s(%d): Firmware did not complete initialization!\n",
3293 __FILE__, __LINE__);
3294
3295 bce_reset_exit:
3296 return (rc);
3297 }
3298
3299
3300 static int
3301 bce_chipinit(struct bce_softc *sc)
3302 {
3303 u32 val;
3304 int rc = 0;
3305
3306 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3307
3308 /* Make sure the interrupt is not active. */
3309 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3310
3311 /*
3312 * Initialize DMA byte/word swapping, configure the number of DMA
3313 * channels and PCI clock compensation delay.
3314 */
3315 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3316 BCE_DMA_CONFIG_DATA_WORD_SWAP |
3317 #if BYTE_ORDER == BIG_ENDIAN
3318 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3319 #endif
3320 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3321 DMA_READ_CHANS << 12 |
3322 DMA_WRITE_CHANS << 16;
3323
3324 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3325
3326 if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3327 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3328
3329 /*
3330 * This setting resolves a problem observed on certain Intel PCI
3331 * chipsets that cannot handle multiple outstanding DMA operations.
3332 * See errata E9_5706A1_65.
3333 */
3334 if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3335 (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3336 !(sc->bce_flags & BCE_PCIX_FLAG))
3337 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3338
3339 REG_WR(sc, BCE_DMA_CONFIG, val);
3340
3341 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3342 if (sc->bce_flags & BCE_PCIX_FLAG) {
3343 u16 val;
3344
3345 val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3346 pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3347 }
3348
3349 /* Enable the RX_V2P and Context state machines before access. */
3350 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3351 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3352 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3353 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3354
3355 /* Initialize context mapping and zero out the quick contexts. */
3356 bce_init_context(sc);
3357
3358 /* Initialize the on-boards CPUs */
3359 bce_init_cpus(sc);
3360
3361 /* Prepare NVRAM for access. */
3362 if (bce_init_nvram(sc)) {
3363 rc = ENODEV;
3364 goto bce_chipinit_exit;
3365 }
3366
3367 /* Set the kernel bypass block size */
3368 val = REG_RD(sc, BCE_MQ_CONFIG);
3369 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3370 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3371 REG_WR(sc, BCE_MQ_CONFIG, val);
3372
3373 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3374 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3375 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3376
3377 /* Set the page size and clear the RV2P processor stall bits. */
3378 val = (BCM_PAGE_BITS - 8) << 24;
3379 REG_WR(sc, BCE_RV2P_CONFIG, val);
3380
3381 /* Configure page size. */
3382 val = REG_RD(sc, BCE_TBDR_CONFIG);
3383 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3384 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3385 REG_WR(sc, BCE_TBDR_CONFIG, val);
3386
3387 bce_chipinit_exit:
3388 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3389
3390 return(rc);
3391 }
3392
3393
3394 /****************************************************************************/
3395 /* Initialize the controller in preparation to send/receive traffic. */
3396 /* */
3397 /* Returns: */
3398 /* 0 for success, positive value for failure. */
3399 /****************************************************************************/
3400 static int
3401 bce_blockinit(struct bce_softc *sc)
3402 {
3403 u32 reg, val;
3404 int rc = 0;
3405
3406 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3407
3408 /* Load the hardware default MAC address. */
3409 bce_set_mac_addr(sc);
3410
3411 /* Set the Ethernet backoff seed value */
3412 val = sc->eaddr[0] + (sc->eaddr[1] << 8) +
3413 (sc->eaddr[2] << 16) + (sc->eaddr[3] ) +
3414 (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3415 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3416
3417 sc->last_status_idx = 0;
3418 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3419
3420 /* Set up link change interrupt generation. */
3421 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3422
3423 /* Program the physical address of the status block. */
3424 REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3425 BCE_ADDR_LO(sc->status_block_paddr));
3426 REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3427 BCE_ADDR_HI(sc->status_block_paddr));
3428
3429 /* Program the physical address of the statistics block. */
3430 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3431 BCE_ADDR_LO(sc->stats_block_paddr));
3432 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3433 BCE_ADDR_HI(sc->stats_block_paddr));
3434
3435 /* Program various host coalescing parameters. */
3436 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3437 (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3438 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3439 (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3440 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3441 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3442 REG_WR(sc, BCE_HC_TX_TICKS,
3443 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3444 REG_WR(sc, BCE_HC_RX_TICKS,
3445 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3446 REG_WR(sc, BCE_HC_COM_TICKS,
3447 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3448 REG_WR(sc, BCE_HC_CMD_TICKS,
3449 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3450 REG_WR(sc, BCE_HC_STATS_TICKS,
3451 (sc->bce_stats_ticks & 0xffff00));
3452 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3453 0xbb8); /* 3ms */
3454 REG_WR(sc, BCE_HC_CONFIG,
3455 (BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3456 BCE_HC_CONFIG_COLLECT_STATS));
3457
3458 /* Clear the internal statistics counters. */
3459 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3460
3461 /* Verify that bootcode is running. */
3462 reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3463
3464 DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3465 BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
3466 __FILE__, __LINE__);
3467 reg = 0);
3468
3469 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3470 BCE_DEV_INFO_SIGNATURE_MAGIC) {
3471 BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
3472 "Expected: 08%08X\n", __FILE__, __LINE__,
3473 (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3474 BCE_DEV_INFO_SIGNATURE_MAGIC);
3475 rc = ENODEV;
3476 goto bce_blockinit_exit;
3477 }
3478
3479 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3480 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3481
3482 /* Enable link state change interrupt generation. */
3483 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3484
3485 /* Enable all remaining blocks in the MAC. */
3486 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3487 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3488 DELAY(20);
3489
3490 bce_blockinit_exit:
3491 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3492
3493 return (rc);
3494 }
3495
3496
3497 /****************************************************************************/
3498 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3499 /* */
3500 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3501 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3502 /* necessary. */
3503 /* */
3504 /* Todo: Consider writing the hardware mailboxes here to make rx_bd's */
3505 /* available to the hardware as soon as possible. */
3506 /* */
3507 /* Returns: */
3508 /* 0 for success, positive value for failure. */
3509 /****************************************************************************/
3510 static int
3511 bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod,
3512 u32 *prod_bseq)
3513 {
3514 bus_dmamap_t map;
3515 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
3516 struct mbuf *m_new = NULL;
3517 struct rx_bd *rxbd;
3518 int i, nsegs, error, rc = 0;
3519 #ifdef BCE_DEBUG
3520 u16 debug_chain_prod = *chain_prod;
3521 #endif
3522
3523 DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
3524 __FUNCTION__);
3525
3526 /* Make sure the inputs are valid. */
3527 DBRUNIF((*chain_prod > MAX_RX_BD),
3528 BCE_PRINTF("%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3529 __FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3530
3531 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3532 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3533
3534 /* Check whether this is a new mbuf allocation. */
3535 if (m == NULL) {
3536
3537 /* Simulate an mbuf allocation failure. */
3538 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3539 sc->mbuf_alloc_failed++;
3540 sc->mbuf_sim_alloc_failed++;
3541 rc = ENOBUFS;
3542 goto bce_get_buf_exit);
3543
3544 /* This is a new mbuf allocation. */
3545 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3546 if (m_new == NULL) {
3547
3548 DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf header allocation failed!\n",
3549 __FILE__, __LINE__);
3550
3551 sc->mbuf_alloc_failed++;
3552
3553 rc = ENOBUFS;
3554 goto bce_get_buf_exit;
3555 }
3556
3557 DBRUNIF(1, sc->rx_mbuf_alloc++);
3558
3559 /* Simulate an mbuf cluster allocation failure. */
3560 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3561 m_freem(m_new);
3562 sc->rx_mbuf_alloc--;
3563 sc->mbuf_alloc_failed++;
3564 sc->mbuf_sim_alloc_failed++;
3565 rc = ENOBUFS;
3566 goto bce_get_buf_exit);
3567
3568 /* Attach a cluster to the mbuf. */
3569 m_cljget(m_new, M_DONTWAIT, sc->mbuf_alloc_size);
3570 if (!(m_new->m_flags & M_EXT)) {
3571
3572 DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf chain allocation failed!\n",
3573 __FILE__, __LINE__);
3574
3575 m_freem(m_new);
3576 DBRUNIF(1, sc->rx_mbuf_alloc--);
3577
3578 sc->mbuf_alloc_failed++;
3579 rc = ENOBUFS;
3580 goto bce_get_buf_exit;
3581 }
3582
3583 /* Initialize the mbuf cluster. */
3584 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3585 } else {
3586 /* Reuse an existing mbuf. */
3587 m_new = m;
3588 m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
3589 m_new->m_data = m_new->m_ext.ext_buf;
3590 }
3591
3592 /* Map the mbuf cluster into device memory. */
3593 map = sc->rx_mbuf_map[*chain_prod];
3594 error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3595 segs, &nsegs, BUS_DMA_NOWAIT);
3596
3597 /* Handle any mapping errors. */
3598 if (error) {
3599 BCE_PRINTF("%s(%d): Error mapping mbuf into RX chain!\n",
3600 __FILE__, __LINE__);
3601
3602 m_freem(m_new);
3603 DBRUNIF(1, sc->rx_mbuf_alloc--);
3604
3605 rc = ENOBUFS;
3606 goto bce_get_buf_exit;
3607 }
3608
3609 /* Make sure there is room in the receive chain. */
3610 if (nsegs > sc->free_rx_bd) {
3611 bus_dmamap_unload(sc->rx_mbuf_tag, map);
3612
3613 m_freem(m_new);
3614 DBRUNIF(1, sc->rx_mbuf_alloc--);
3615
3616 rc = EFBIG;
3617 goto bce_get_buf_exit;
3618 }
3619
3620 #ifdef BCE_DEBUG
3621 /* Track the distribution of buffer segments. */
3622 sc->rx_mbuf_segs[nsegs]++;
3623 #endif
3624
3625 /* Update some debug statistic counters */
3626 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3627 sc->rx_low_watermark = sc->free_rx_bd);
3628 DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3629
3630 /* Setup the rx_bd for the first segment. */
3631 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3632
3633 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3634 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3635 rxbd->rx_bd_len = htole32(segs[0].ds_len);
3636 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3637 *prod_bseq += segs[0].ds_len;
3638
3639 for (i = 1; i < nsegs; i++) {
3640
3641 *prod = NEXT_RX_BD(*prod);
3642 *chain_prod = RX_CHAIN_IDX(*prod);
3643
3644 rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3645
3646 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
3647 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
3648 rxbd->rx_bd_len = htole32(segs[i].ds_len);
3649 rxbd->rx_bd_flags = 0;
3650 *prod_bseq += segs[i].ds_len;
3651 }
3652
3653 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3654
3655 /* Save the mbuf and update our counter. */
3656 sc->rx_mbuf_ptr[*chain_prod] = m_new;
3657 sc->free_rx_bd -= nsegs;
3658
3659 DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
3660 nsegs));
3661
3662 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3663 "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3664
3665 bce_get_buf_exit:
3666 DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
3667 __FUNCTION__);
3668
3669 return(rc);
3670 }
3671
3672
3673 /****************************************************************************/
3674 /* Allocate memory and initialize the TX data structures. */
3675 /* */
3676 /* Returns: */
3677 /* 0 for success, positive value for failure. */
3678 /****************************************************************************/
3679 static int
3680 bce_init_tx_chain(struct bce_softc *sc)
3681 {
3682 struct tx_bd *txbd;
3683 u32 val;
3684 int i, rc = 0;
3685
3686 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3687
3688 /* Set the initial TX producer/consumer indices. */
3689 sc->tx_prod = 0;
3690 sc->tx_cons = 0;
3691 sc->tx_prod_bseq = 0;
3692 sc->used_tx_bd = 0;
3693 sc->max_tx_bd = USABLE_TX_BD;
3694 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3695 DBRUNIF(1, sc->tx_full_count = 0);
3696
3697 /*
3698 * The NetXtreme II supports a linked-list structre called
3699 * a Buffer Descriptor Chain (or BD chain). A BD chain
3700 * consists of a series of 1 or more chain pages, each of which
3701 * consists of a fixed number of BD entries.
3702 * The last BD entry on each page is a pointer to the next page
3703 * in the chain, and the last pointer in the BD chain
3704 * points back to the beginning of the chain.
3705 */
3706
3707 /* Set the TX next pointer chain entries. */
3708 for (i = 0; i < TX_PAGES; i++) {
3709 int j;
3710
3711 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3712
3713 /* Check if we've reached the last page. */
3714 if (i == (TX_PAGES - 1))
3715 j = 0;
3716 else
3717 j = i + 1;
3718
3719 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3720 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3721 }
3722
3723 /* Initialize the context ID for an L2 TX chain. */
3724 val = BCE_L2CTX_TYPE_TYPE_L2;
3725 val |= BCE_L2CTX_TYPE_SIZE_L2;
3726 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3727
3728 val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3729 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3730
3731 /* Point the hardware to the first page in the chain. */
3732 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3733 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3734 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3735 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3736
3737 DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3738
3739 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3740
3741 return(rc);
3742 }
3743
3744
3745 /****************************************************************************/
3746 /* Free memory and clear the TX data structures. */
3747 /* */
3748 /* Returns: */
3749 /* Nothing. */
3750 /****************************************************************************/
3751 static void
3752 bce_free_tx_chain(struct bce_softc *sc)
3753 {
3754 int i;
3755
3756 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3757
3758 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3759 for (i = 0; i < TOTAL_TX_BD; i++) {
3760 if (sc->tx_mbuf_ptr[i] != NULL) {
3761 if (sc->tx_mbuf_map != NULL)
3762 bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
3763 BUS_DMASYNC_POSTWRITE);
3764 m_freem(sc->tx_mbuf_ptr[i]);
3765 sc->tx_mbuf_ptr[i] = NULL;
3766 DBRUNIF(1, sc->tx_mbuf_alloc--);
3767 }
3768 }
3769
3770 /* Clear each TX chain page. */
3771 for (i = 0; i < TX_PAGES; i++)
3772 bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3773
3774 sc->used_tx_bd = 0;
3775
3776 /* Check if we lost any mbufs in the process. */
3777 DBRUNIF((sc->tx_mbuf_alloc),
3778 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
3779 "from tx chain!\n",
3780 __FILE__, __LINE__, sc->tx_mbuf_alloc));
3781
3782 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3783 }
3784
3785
3786 /****************************************************************************/
3787 /* Add mbufs to the RX chain until its full or an mbuf allocation error */
3788 /* occurs. */
3789 /* */
3790 /* Returns: */
3791 /* Nothing */
3792 /****************************************************************************/
3793 static void
3794 bce_fill_rx_chain(struct bce_softc *sc)
3795 {
3796 u16 prod, chain_prod;
3797 u32 prod_bseq;
3798 #ifdef BCE_DEBUG
3799 int rx_mbuf_alloc_before, free_rx_bd_before;
3800 #endif
3801
3802 DBPRINT(sc, BCE_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
3803
3804 prod = sc->rx_prod;
3805 prod_bseq = sc->rx_prod_bseq;
3806
3807 #ifdef BCE_DEBUG
3808 rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
3809 free_rx_bd_before = sc->free_rx_bd;
3810 #endif
3811
3812 /* Keep filling the RX chain until it's full. */
3813 while (sc->free_rx_bd > 0) {
3814 chain_prod = RX_CHAIN_IDX(prod);
3815 if (bce_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
3816 /* Bail out if we can't add an mbuf to the chain. */
3817 break;
3818 }
3819 prod = NEXT_RX_BD(prod);
3820 }
3821
3822 #if 0
3823 DBRUNIF((sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
3824 BCE_PRINTF("%s(): Installed %d mbufs in %d rx_bd entries.\n",
3825 __FUNCTION__, (sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
3826 (free_rx_bd_before - sc->free_rx_bd)));
3827 #endif
3828
3829 /* Save the RX chain producer index. */
3830 sc->rx_prod = prod;
3831 sc->rx_prod_bseq = prod_bseq;
3832
3833 /* Tell the chip about the waiting rx_bd's. */
3834 REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3835 REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3836
3837 DBPRINT(sc, BCE_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
3838
3839 }
3840
3841
3842 /****************************************************************************/
3843 /* Allocate memory and initialize the RX data structures. */
3844 /* */
3845 /* Returns: */
3846 /* 0 for success, positive value for failure. */
3847 /****************************************************************************/
3848 static int
3849 bce_init_rx_chain(struct bce_softc *sc)
3850 {
3851 struct rx_bd *rxbd;
3852 int i, rc = 0;
3853 u32 val;
3854
3855 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3856
3857 /* Initialize the RX producer and consumer indices. */
3858 sc->rx_prod = 0;
3859 sc->rx_cons = 0;
3860 sc->rx_prod_bseq = 0;
3861 sc->free_rx_bd = USABLE_RX_BD;
3862 sc->max_rx_bd = USABLE_RX_BD;
3863 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3864 DBRUNIF(1, sc->rx_empty_count = 0);
3865
3866 /* Initialize the RX next pointer chain entries. */
3867 for (i = 0; i < RX_PAGES; i++) {
3868 int j;
3869
3870 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3871
3872 /* Check if we've reached the last page. */
3873 if (i == (RX_PAGES - 1))
3874 j = 0;
3875 else
3876 j = i + 1;
3877
3878 /* Setup the chain page pointers. */
3879 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3880 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3881 }
3882
3883 /* Initialize the context ID for an L2 RX chain. */
3884 val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3885 val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3886 val |= 0x02 << 8;
3887 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3888
3889 /* Point the hardware to the first page in the chain. */
3890 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3891 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3892 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3893 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3894
3895
3896 /* Fill up the RX chain. */
3897 bce_fill_rx_chain(sc);
3898
3899
3900 for (i = 0; i < RX_PAGES; i++) {
3901 bus_dmamap_sync(
3902 sc->rx_bd_chain_tag,
3903 sc->rx_bd_chain_map[i],
3904 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3905 }
3906
3907 DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3908
3909 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3910
3911 return(rc);
3912 }
3913
3914
3915 /****************************************************************************/
3916 /* Free memory and clear the RX data structures. */
3917 /* */
3918 /* Returns: */
3919 /* Nothing. */
3920 /****************************************************************************/
3921 static void
3922 bce_free_rx_chain(struct bce_softc *sc)
3923 {
3924 int i;
3925 #ifdef BCE_DEBUG
3926 int rx_mbuf_alloc_before;
3927 #endif
3928
3929 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3930
3931 #ifdef BCE_DEBUG
3932 rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
3933 #endif
3934
3935 /* Free any mbufs still in the RX mbuf chain. */
3936 for (i = 0; i < TOTAL_RX_BD; i++) {
3937 if (sc->rx_mbuf_ptr[i] != NULL) {
3938 if (sc->rx_mbuf_map[i] != NULL)
3939 bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
3940 BUS_DMASYNC_POSTREAD);
3941 m_freem(sc->rx_mbuf_ptr[i]);
3942 sc->rx_mbuf_ptr[i] = NULL;
3943 DBRUNIF(1, sc->rx_mbuf_alloc--);
3944 }
3945 }
3946
3947 DBRUNIF((rx_mbuf_alloc_before - sc->rx_mbuf_alloc),
3948 BCE_PRINTF("%s(): Released %d mbufs.\n",
3949 __FUNCTION__, (rx_mbuf_alloc_before - sc->rx_mbuf_alloc)));
3950
3951 /* Clear each RX chain page. */
3952 for (i = 0; i < RX_PAGES; i++)
3953 bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3954
3955 sc->free_rx_bd = sc->max_rx_bd;
3956
3957 /* Check if we lost any mbufs in the process. */
3958 DBRUNIF((sc->rx_mbuf_alloc),
3959 BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs from rx chain!\n",
3960 __FILE__, __LINE__, sc->rx_mbuf_alloc));
3961
3962 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3963 }
3964
3965
3966 /****************************************************************************/
3967 /* Set media options. */
3968 /* */
3969 /* Returns: */
3970 /* 0 for success, positive value for failure. */
3971 /****************************************************************************/
3972 static int
3973 bce_ifmedia_upd(struct ifnet *ifp)
3974 {
3975 struct bce_softc *sc;
3976
3977 sc = ifp->if_softc;
3978 BCE_LOCK(sc);
3979 bce_ifmedia_upd_locked(ifp);
3980 BCE_UNLOCK(sc);
3981 return (0);
3982 }
3983
3984
3985 /****************************************************************************/
3986 /* Set media options. */
3987 /* */
3988 /* Returns: */
3989 /* Nothing. */
3990 /****************************************************************************/
3991 static void
3992 bce_ifmedia_upd_locked(struct ifnet *ifp)
3993 {
3994 struct bce_softc *sc;
3995 struct mii_data *mii;
3996 struct ifmedia *ifm;
3997
3998 sc = ifp->if_softc;
3999 ifm = &sc->bce_ifmedia;
4000 BCE_LOCK_ASSERT(sc);
4001
4002 mii = device_get_softc(sc->bce_miibus);
4003
4004 /* Make sure the MII bus has been enumerated. */
4005 if (mii) {
4006 sc->bce_link = 0;
4007 if (mii->mii_instance) {
4008 struct mii_softc *miisc;
4009
4010 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4011 mii_phy_reset(miisc);
4012 }
4013 mii_mediachg(mii);
4014 }
4015 }
4016
4017
4018 /****************************************************************************/
4019 /* Reports current media status. */
4020 /* */
4021 /* Returns: */
4022 /* Nothing. */
4023 /****************************************************************************/
4024 static void
4025 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4026 {
4027 struct bce_softc *sc;
4028 struct mii_data *mii;
4029
4030 sc = ifp->if_softc;
4031
4032 BCE_LOCK(sc);
4033
4034 mii = device_get_softc(sc->bce_miibus);
4035
4036 mii_pollstat(mii);
4037 ifmr->ifm_active = mii->mii_media_active;
4038 ifmr->ifm_status = mii->mii_media_status;
4039
4040 BCE_UNLOCK(sc);
4041 }
4042
4043
4044 /****************************************************************************/
4045 /* Handles PHY generated interrupt events. */
4046 /* */
4047 /* Returns: */
4048 /* Nothing. */
4049 /****************************************************************************/
4050 static void
4051 bce_phy_intr(struct bce_softc *sc)
4052 {
4053 u32 new_link_state, old_link_state;
4054
4055 new_link_state = sc->status_block->status_attn_bits &
4056 STATUS_ATTN_BITS_LINK_STATE;
4057 old_link_state = sc->status_block->status_attn_bits_ack &
4058 STATUS_ATTN_BITS_LINK_STATE;
4059
4060 /* Handle any changes if the link state has changed. */
4061 if (new_link_state != old_link_state) {
4062
4063 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
4064
4065 sc->bce_link = 0;
4066 callout_stop(&sc->bce_tick_callout);
4067 bce_tick(sc);
4068
4069 /* Update the status_attn_bits_ack field in the status block. */
4070 if (new_link_state) {
4071 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4072 STATUS_ATTN_BITS_LINK_STATE);
4073 DBPRINT(sc, BCE_INFO_MISC, "Link is now UP.\n");
4074 }
4075 else {
4076 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4077 STATUS_ATTN_BITS_LINK_STATE);
4078 DBPRINT(sc, BCE_INFO_MISC, "Link is now DOWN.\n");
4079 }
4080
4081 }
4082
4083 /* Acknowledge the link change interrupt. */
4084 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4085 }
4086
4087
4088 /****************************************************************************/
4089 /* Handles received frame interrupt events. */
4090 /* */
4091 /* Returns: */
4092 /* Nothing. */
4093 /****************************************************************************/
4094 static void
4095 bce_rx_intr(struct bce_softc *sc)
4096 {
4097 struct status_block *sblk = sc->status_block;
4098 struct ifnet *ifp = sc->bce_ifp;
4099 u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4100 u32 sw_prod_bseq;
4101 struct l2_fhdr *l2fhdr;
4102
4103 DBRUNIF(1, sc->rx_interrupts++);
4104
4105 /* Prepare the RX chain pages to be accessed by the host CPU. */
4106 for (int i = 0; i < RX_PAGES; i++)
4107 bus_dmamap_sync(sc->rx_bd_chain_tag,
4108 sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4109
4110 /* Get the hardware's view of the RX consumer index. */
4111 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4112 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4113 hw_cons++;
4114
4115 /* Get working copies of the driver's view of the RX indices. */
4116 sw_cons = sc->rx_cons;
4117 sw_prod = sc->rx_prod;
4118 sw_prod_bseq = sc->rx_prod_bseq;
4119
4120 DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
4121 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
4122 __FUNCTION__, sw_prod, sw_cons, sw_prod_bseq);
4123
4124 /* Prevent speculative reads from getting ahead of the status block. */
4125 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4126 BUS_SPACE_BARRIER_READ);
4127
4128 /* Update some debug statistics counters */
4129 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4130 sc->rx_low_watermark = sc->free_rx_bd);
4131 DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++);
4132
4133 /* Scan through the receive chain as long as there is work to do */
4134 while (sw_cons != hw_cons) {
4135 struct mbuf *m;
4136 struct rx_bd *rxbd;
4137 unsigned int len;
4138 u32 status;
4139
4140 /* Clear the mbuf pointer. */
4141 m = NULL;
4142
4143 /* Convert the producer/consumer indices to an actual rx_bd index. */
4144 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
4145 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
4146
4147 /* Get the used rx_bd. */
4148 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
4149 sc->free_rx_bd++;
4150
4151 DBRUN(BCE_VERBOSE_RECV,
4152 BCE_PRINTF("%s(): ", __FUNCTION__);
4153 bce_dump_rxbd(sc, sw_chain_cons, rxbd));
4154
4155 #ifdef DEVICE_POLLING
4156 if (ifp->if_capenable & IFCAP_POLLING) {
4157 if (sc->bce_rxcycles <= 0)
4158 break;
4159 sc->bce_rxcycles--;
4160 }
4161 #endif
4162
4163 /* The mbuf is stored with the last rx_bd entry of a packet. */
4164 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4165
4166 /* Validate that this is the last rx_bd. */
4167 DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
4168 BCE_PRINTF("%s(%d): Unexpected mbuf found in rx_bd[0x%04X]!\n",
4169 __FILE__, __LINE__, sw_chain_cons);
4170 bce_breakpoint(sc));
4171
4172 /*
4173 * ToDo: If the received packet is small enough
4174 * to fit into a single, non-M_EXT mbuf,
4175 * allocate a new mbuf here, copy the data to
4176 * that mbuf, and recycle the mapped jumbo frame.
4177 */
4178
4179 /* Unmap the mbuf from DMA space. */
4180 bus_dmamap_sync(sc->rx_mbuf_tag,
4181 sc->rx_mbuf_map[sw_chain_cons],
4182 BUS_DMASYNC_POSTREAD);
4183 bus_dmamap_unload(sc->rx_mbuf_tag,
4184 sc->rx_mbuf_map[sw_chain_cons]);
4185
4186 /* Remove the mbuf from the RX chain. */
4187 m = sc->rx_mbuf_ptr[sw_chain_cons];
4188 sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
4189
4190 /*
4191 * Frames received on the NetXteme II are prepended
4192 * with an l2_fhdr structure which provides status
4193 * information about the received frame (including
4194 * VLAN tags and checksum info). The frames are also
4195 * automatically adjusted to align the IP header
4196 * (i.e. two null bytes are inserted before the
4197 * Ethernet header).
4198 */
4199 l2fhdr = mtod(m, struct l2_fhdr *);
4200
4201 len = l2fhdr->l2_fhdr_pkt_len;
4202 status = l2fhdr->l2_fhdr_status;
4203
4204 DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4205 BCE_PRINTF("Simulating l2_fhdr status error.\n");
4206 status = status | L2_FHDR_ERRORS_PHY_DECODE);
4207
4208 /* Watch for unusual sized frames. */
4209 DBRUNIF(((len < BCE_MIN_MTU) || (len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
4210 BCE_PRINTF("%s(%d): Unusual frame size found. "
4211 "Min(%d), Actual(%d), Max(%d)\n",
4212 __FILE__, __LINE__, (int) BCE_MIN_MTU,
4213 len, (int) BCE_MAX_JUMBO_ETHER_MTU_VLAN);
4214 bce_dump_mbuf(sc, m);
4215 bce_breakpoint(sc));
4216
4217 len -= ETHER_CRC_LEN;
4218
4219 /* Check the received frame for errors. */
4220 if (status & (L2_FHDR_ERRORS_BAD_CRC |
4221 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
4222 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) {
4223
4224 /* Log the error and release the mbuf. */
4225 ifp->if_ierrors++;
4226 DBRUNIF(1, sc->l2fhdr_status_errors++);
4227
4228 /* Todo: Reuse the mbuf to improve performance. */
4229
4230 m_freem(m);
4231 m = NULL;
4232 goto bce_rx_int_next_rx;
4233 }
4234
4235 /* Skip over the l2_fhdr when passing the data up the stack. */
4236 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4237
4238 /* Adjust the packet length to match the received data. */
4239 m->m_pkthdr.len = m->m_len = len;
4240
4241 /* Send the packet to the appropriate interface. */
4242 m->m_pkthdr.rcvif = ifp;
4243
4244 DBRUN(BCE_VERBOSE_RECV,
4245 struct ether_header *eh;
4246 eh = mtod(m, struct ether_header *);
4247 BCE_PRINTF("%s(): to: %6D, from: %6D, type: 0x%04X\n",
4248 __FUNCTION__, eh->ether_dhost, ":",
4249 eh->ether_shost, ":", htons(eh->ether_type)));
4250
4251 /* Validate the checksum if offload enabled. */
4252 if (ifp->if_capenable & IFCAP_RXCSUM) {
4253
4254 /* Check for an IP datagram. */
4255 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4256 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4257
4258 /* Check if the IP checksum is valid. */
4259 if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4260 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4261 else
4262 DBPRINT(sc, BCE_WARN_SEND,
4263 "%s(): Invalid IP checksum = 0x%04X!\n",
4264 __FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
4265 }
4266
4267 /* Check for a valid TCP/UDP frame. */
4268 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4269 L2_FHDR_STATUS_UDP_DATAGRAM)) {
4270
4271 /* Check for a good TCP/UDP checksum. */
4272 if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4273 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4274 m->m_pkthdr.csum_data =
4275 l2fhdr->l2_fhdr_tcp_udp_xsum;
4276 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
4277 | CSUM_PSEUDO_HDR);
4278 } else
4279 DBPRINT(sc, BCE_WARN_SEND,
4280 "%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
4281 __FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
4282 }
4283 }
4284
4285
4286 /*
4287 * If we received a packet with a vlan tag,
4288 * attach that information to the packet.
4289 */
4290 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4291 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): VLAN tag = 0x%04X\n",
4292 __FUNCTION__, l2fhdr->l2_fhdr_vlan_tag);
4293 #if __FreeBSD_version < 700000
4294 VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag, continue);
4295 #else
4296 m->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
4297 m->m_flags |= M_VLANTAG;
4298 #endif
4299 }
4300
4301 /* Pass the mbuf off to the upper layers. */
4302 ifp->if_ipackets++;
4303
4304 bce_rx_int_next_rx:
4305 sw_prod = NEXT_RX_BD(sw_prod);
4306 }
4307
4308 sw_cons = NEXT_RX_BD(sw_cons);
4309
4310 /* If we have a packet, pass it up the stack */
4311 if (m) {
4312 /* Make sure we don't lose our place when we release the lock. */
4313 sc->rx_cons = sw_cons;
4314
4315 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(): Passing received frame up.\n",
4316 __FUNCTION__);
4317 BCE_UNLOCK(sc);
4318 (*ifp->if_input)(ifp, m);
4319 DBRUNIF(1, sc->rx_mbuf_alloc--);
4320 BCE_LOCK(sc);
4321
4322 /* Recover our place. */
4323 sw_cons = sc->rx_cons;
4324 }
4325
4326 /* Refresh hw_cons to see if there's new work */
4327 if (sw_cons == hw_cons) {
4328 hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
4329 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4330 hw_cons++;
4331 }
4332
4333 /* Prevent speculative reads from getting ahead of the status block. */
4334 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4335 BUS_SPACE_BARRIER_READ);
4336 }
4337
4338 /* No new packets to process. Refill the RX chain and exit. */
4339 sc->rx_cons = sw_cons;
4340 bce_fill_rx_chain(sc);
4341
4342 for (int i = 0; i < RX_PAGES; i++)
4343 bus_dmamap_sync(sc->rx_bd_chain_tag,
4344 sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4345
4346 DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4347 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4348 __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4349 }
4350
4351
4352 /****************************************************************************/
4353 /* Handles transmit completion interrupt events. */
4354 /* */
4355 /* Returns: */
4356 /* Nothing. */
4357 /****************************************************************************/
4358 static void
4359 bce_tx_intr(struct bce_softc *sc)
4360 {
4361 struct status_block *sblk = sc->status_block;
4362 struct ifnet *ifp = sc->bce_ifp;
4363 u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4364
4365 BCE_LOCK_ASSERT(sc);
4366
4367 DBRUNIF(1, sc->tx_interrupts++);
4368
4369 /* Get the hardware's view of the TX consumer index. */
4370 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4371
4372 /* Skip to the next entry if this is a chain page pointer. */
4373 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4374 hw_tx_cons++;
4375
4376 sw_tx_cons = sc->tx_cons;
4377
4378 /* Prevent speculative reads from getting ahead of the status block. */
4379 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4380 BUS_SPACE_BARRIER_READ);
4381
4382 /* Cycle through any completed TX chain page entries. */
4383 while (sw_tx_cons != hw_tx_cons) {
4384 #ifdef BCE_DEBUG
4385 struct tx_bd *txbd = NULL;
4386 #endif
4387 sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4388
4389 DBPRINT(sc, BCE_INFO_SEND,
4390 "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4391 "sw_tx_chain_cons = 0x%04X\n",
4392 __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4393
4394 DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4395 BCE_PRINTF("%s(%d): TX chain consumer out of range! "
4396 " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
4397 (int) MAX_TX_BD);
4398 bce_breakpoint(sc));
4399
4400 DBRUNIF(1, txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4401 [TX_IDX(sw_tx_chain_cons)]);
4402
4403 DBRUNIF((txbd == NULL),
4404 BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
4405 __FILE__, __LINE__, sw_tx_chain_cons);
4406 bce_breakpoint(sc));
4407
4408 DBRUN(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
4409 bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4410
4411 /*
4412 * Free the associated mbuf. Remember
4413 * that only the last tx_bd of a packet
4414 * has an mbuf pointer and DMA map.
4415 */
4416 if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4417
4418 /* Validate that this is the last tx_bd. */
4419 DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4420 BCE_PRINTF("%s(%d): tx_bd END flag not set but "
4421 "txmbuf == NULL!\n", __FILE__, __LINE__);
4422 bce_breakpoint(sc));
4423
4424 DBRUN(BCE_INFO_SEND,
4425 BCE_PRINTF("%s(): Unloading map/freeing mbuf "
4426 "from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4427
4428 /* Unmap the mbuf. */
4429 bus_dmamap_unload(sc->tx_mbuf_tag,
4430 sc->tx_mbuf_map[sw_tx_chain_cons]);
4431
4432 /* Free the mbuf. */
4433 m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4434 sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4435 DBRUNIF(1, sc->tx_mbuf_alloc--);
4436
4437 ifp->if_opackets++;
4438 }
4439
4440 sc->used_tx_bd--;
4441 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4442
4443 /* Refresh hw_cons to see if there's new work. */
4444 hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
4445 if ((hw_tx_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4446 hw_tx_cons++;
4447
4448 /* Prevent speculative reads from getting ahead of the status block. */
4449 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4450 BUS_SPACE_BARRIER_READ);
4451 }
4452
4453 /* Clear the TX timeout timer. */
4454 sc->watchdog_timer = 0;
4455
4456 /* Clear the tx hardware queue full flag. */
4457 if (sc->used_tx_bd < sc->max_tx_bd) {
4458 DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4459 DBPRINT(sc, BCE_INFO_SEND,
4460 "%s(): Open TX chain! %d/%d (used/total)\n",
4461 __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
4462 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4463 }
4464
4465 sc->tx_cons = sw_tx_cons;
4466 }
4467
4468
4469 /****************************************************************************/
4470 /* Disables interrupt generation. */
4471 /* */
4472 /* Returns: */
4473 /* Nothing. */
4474 /****************************************************************************/
4475 static void
4476 bce_disable_intr(struct bce_softc *sc)
4477 {
4478 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4479 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4480 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4481 }
4482
4483
4484 /****************************************************************************/
4485 /* Enables interrupt generation. */
4486 /* */
4487 /* Returns: */
4488 /* Nothing. */
4489 /****************************************************************************/
4490 static void
4491 bce_enable_intr(struct bce_softc *sc)
4492 {
4493 u32 val;
4494
4495 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4496 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4497 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4498
4499 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4500 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4501
4502 val = REG_RD(sc, BCE_HC_COMMAND);
4503 REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
4504 }
4505
4506
4507 /****************************************************************************/
4508 /* Handles controller initialization. */
4509 /* */
4510 /* Returns: */
4511 /* Nothing. */
4512 /****************************************************************************/
4513 static void
4514 bce_init_locked(struct bce_softc *sc)
4515 {
4516 struct ifnet *ifp;
4517 u32 ether_mtu;
4518
4519 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4520
4521 BCE_LOCK_ASSERT(sc);
4522
4523 ifp = sc->bce_ifp;
4524
4525 /* Check if the driver is still running and bail out if it is. */
4526 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4527 goto bce_init_locked_exit;
4528
4529 bce_stop(sc);
4530
4531 if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
4532 BCE_PRINTF("%s(%d): Controller reset failed!\n",
4533 __FILE__, __LINE__);
4534 goto bce_init_locked_exit;
4535 }
4536
4537 if (bce_chipinit(sc)) {
4538 BCE_PRINTF("%s(%d): Controller initialization failed!\n",
4539 __FILE__, __LINE__);
4540 goto bce_init_locked_exit;
4541 }
4542
4543 if (bce_blockinit(sc)) {
4544 BCE_PRINTF("%s(%d): Block initialization failed!\n",
4545 __FILE__, __LINE__);
4546 goto bce_init_locked_exit;
4547 }
4548
4549 /* Load our MAC address. */
4550 bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
4551 bce_set_mac_addr(sc);
4552
4553 /* Calculate and program the Ethernet MTU size. */
4554 ether_mtu = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu +
4555 ETHER_CRC_LEN;
4556
4557 DBPRINT(sc, BCE_INFO_MISC, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
4558
4559 /*
4560 * Program the mtu, enabling jumbo frame
4561 * support if necessary. Also set the mbuf
4562 * allocation count for RX frames.
4563 */
4564 if (ether_mtu > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
4565 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4566 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4567 sc->mbuf_alloc_size = MJUM9BYTES;
4568 } else {
4569 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4570 sc->mbuf_alloc_size = MCLBYTES;
4571 }
4572
4573 /* Calculate the RX Ethernet frame size for rx_bd's. */
4574 sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
4575
4576 DBPRINT(sc, BCE_INFO_RECV,
4577 "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
4578 "max_frame_size = %d\n",
4579 __FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
4580
4581 /* Program appropriate promiscuous/multicast filtering. */
4582 bce_set_rx_mode(sc);
4583
4584 /* Init RX buffer descriptor chain. */
4585 bce_init_rx_chain(sc);
4586
4587 /* Init TX buffer descriptor chain. */
4588 bce_init_tx_chain(sc);
4589
4590 #ifdef DEVICE_POLLING
4591 /* Disable interrupts if we are polling. */
4592 if (ifp->if_capenable & IFCAP_POLLING) {
4593 bce_disable_intr(sc);
4594
4595 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4596 (1 << 16) | sc->bce_rx_quick_cons_trip);
4597 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4598 (1 << 16) | sc->bce_tx_quick_cons_trip);
4599 } else
4600 #endif
4601 /* Enable host interrupts. */
4602 bce_enable_intr(sc);
4603
4604 bce_ifmedia_upd_locked(ifp);
4605
4606 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4607 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4608
4609 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
4610
4611 bce_init_locked_exit:
4612 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4613
4614 return;
4615 }
4616
4617
4618 /****************************************************************************/
4619 /* Initialize the controller just enough so that any management firmware */
4620 /* running on the device will continue to operate correctly. */
4621 /* */
4622 /* Returns: */
4623 /* Nothing. */
4624 /****************************************************************************/
4625 static void
4626 bce_mgmt_init_locked(struct bce_softc *sc)
4627 {
4628 struct ifnet *ifp;
4629
4630 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4631
4632 BCE_LOCK_ASSERT(sc);
4633
4634 /* Bail out if management firmware is not running. */
4635 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
4636 DBPRINT(sc, BCE_VERBOSE_SPECIAL,
4637 "No management firmware running...\n");
4638 goto bce_mgmt_init_locked_exit;
4639 }
4640
4641 ifp = sc->bce_ifp;
4642
4643 /* Enable all critical blocks in the MAC. */
4644 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
4645 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4646 DELAY(20);
4647
4648 bce_ifmedia_upd_locked(ifp);
4649 bce_mgmt_init_locked_exit:
4650 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4651
4652 return;
4653 }
4654
4655
4656 /****************************************************************************/
4657 /* Handles controller initialization when called from an unlocked routine. */
4658 /* */
4659 /* Returns: */
4660 /* Nothing. */
4661 /****************************************************************************/
4662 static void
4663 bce_init(void *xsc)
4664 {
4665 struct bce_softc *sc = xsc;
4666
4667 BCE_LOCK(sc);
4668 bce_init_locked(sc);
4669 BCE_UNLOCK(sc);
4670 }
4671
4672
4673 /****************************************************************************/
4674 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4675 /* memory visible to the controller. */
4676 /* */
4677 /* Returns: */
4678 /* 0 for success, positive value for failure. */
4679 /* Modified: */
4680 /* m_head: May be set to NULL if MBUF is excessively fragmented. */
4681 /****************************************************************************/
4682 static int
4683 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
4684 {
4685 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4686 bus_dmamap_t map;
4687 struct tx_bd *txbd = NULL;
4688 struct mbuf *m0;
4689 struct ether_vlan_header *eh;
4690 struct ip *ip;
4691 struct tcphdr *th;
4692 u16 prod, chain_prod, etype, mss = 0, vlan_tag = 0, flags = 0;
4693 u32 prod_bseq;
4694 int hdr_len = 0, e_hlen = 0, ip_hlen = 0, tcp_hlen = 0, ip_len = 0;
4695
4696
4697 #ifdef BCE_DEBUG
4698 u16 debug_prod;
4699 #endif
4700 int i, error, nsegs, rc = 0;
4701
4702 /* Transfer any checksum offload flags to the bd. */
4703 m0 = *m_head;
4704 if (m0->m_pkthdr.csum_flags) {
4705 if (m0->m_pkthdr.csum_flags & CSUM_IP)
4706 flags |= TX_BD_FLAGS_IP_CKSUM;
4707 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4708 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4709 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4710 /* For TSO the controller needs two pieces of info, */
4711 /* the MSS and the IP+TCP options length. */
4712 mss = htole16(m0->m_pkthdr.tso_segsz);
4713
4714 /* Map the header and find the Ethernet type & header length */
4715 eh = mtod(m0, struct ether_vlan_header *);
4716 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4717 etype = ntohs(eh->evl_proto);
4718 e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4719 } else {
4720 etype = ntohs(eh->evl_encap_proto);
4721 e_hlen = ETHER_HDR_LEN;
4722 }
4723
4724 /* Check for supported TSO Ethernet types (only IPv4 for now) */
4725 switch (etype) {
4726 case ETHERTYPE_IP:
4727 ip = (struct ip *)(m0->m_data + e_hlen);
4728
4729 /* TSO only supported for TCP protocol */
4730 if (ip->ip_p != IPPROTO_TCP) {
4731 BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n",
4732 __FILE__, __LINE__);
4733 goto bce_tx_encap_skip_tso;
4734 }
4735
4736 /* Get IP header length in bytes (min 20) */
4737 ip_hlen = ip->ip_hl << 2;
4738
4739 /* Get the TCP header length in bytes (min 20) */
4740 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
4741 tcp_hlen = (th->th_off << 2);
4742
4743 /* IP header length and checksum will be calc'd by hardware */
4744 ip_len = ip->ip_len;
4745 ip->ip_len = 0;
4746 ip->ip_sum = 0;
4747 break;
4748 case ETHERTYPE_IPV6:
4749 BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n",
4750 __FILE__, __LINE__);
4751 goto bce_tx_encap_skip_tso;
4752 default:
4753 BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n",
4754 __FILE__, __LINE__);
4755 goto bce_tx_encap_skip_tso;
4756 }
4757
4758 hdr_len = e_hlen + ip_hlen + tcp_hlen;
4759
4760 DBPRINT(sc, BCE_EXCESSIVE_SEND,
4761 "%s(): hdr_len = %d, e_hlen = %d, ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n",
4762 __FUNCTION__, hdr_len, e_hlen, ip_hlen, tcp_hlen, ip_len);
4763
4764 /* Set the LSO flag in the TX BD */
4765 flags |= TX_BD_FLAGS_SW_LSO;
4766 /* Set the length of IP + TCP options (in 32 bit words) */
4767 flags |= (((ip_hlen + tcp_hlen - 40) >> 2) << 8);
4768
4769 bce_tx_encap_skip_tso:
4770 DBRUNIF(1, sc->requested_tso_frames++);
4771 }
4772 }
4773
4774 /* Transfer any VLAN tags to the bd. */
4775 if (m0->m_flags & M_VLANTAG) {
4776 flags |= TX_BD_FLAGS_VLAN_TAG;
4777 vlan_tag = m0->m_pkthdr.ether_vtag;
4778 }
4779
4780 /* Map the mbuf into DMAable memory. */
4781 prod = sc->tx_prod;
4782 chain_prod = TX_CHAIN_IDX(prod);
4783 map = sc->tx_mbuf_map[chain_prod];
4784
4785 /* Map the mbuf into our DMA address space. */
4786 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
4787 segs, &nsegs, BUS_DMA_NOWAIT);
4788
4789 /* Check if the DMA mapping was successful */
4790 if (error == EFBIG) {
4791
4792 /* The mbuf is too fragmented for our DMA mapping. */
4793 DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
4794 __FUNCTION__, nsegs);
4795 DBRUNIF(1, bce_dump_mbuf(sc, m0););
4796
4797 /* Try to defrag the mbuf. */
4798 m0 = m_defrag(*m_head, M_DONTWAIT);
4799 if (m0 == NULL) {
4800 /* Defrag was unsuccessful */
4801 m_freem(*m_head);
4802 *m_head = NULL;
4803 sc->mbuf_alloc_failed++;
4804 return (ENOBUFS);
4805 }
4806
4807 /* Defrag was successful, try mapping again */
4808 *m_head = m0;
4809 error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
4810 segs, &nsegs, BUS_DMA_NOWAIT);
4811
4812 /* Still getting an error after a defrag. */
4813 if (error == ENOMEM) {
4814 /* Insufficient DMA buffers available. */
4815 sc->tx_dma_map_failures++;
4816 return (error);
4817 } else if (error != 0) {
4818 /* Still can't map the mbuf, release it and return an error. */
4819 BCE_PRINTF(
4820 "%s(%d): Unknown error mapping mbuf into TX chain!\n",
4821 __FILE__, __LINE__);
4822 m_freem(m0);
4823 *m_head = NULL;
4824 sc->tx_dma_map_failures++;
4825 return (ENOBUFS);
4826 }
4827 } else if (error == ENOMEM) {
4828 /* Insufficient DMA buffers available. */
4829 sc->tx_dma_map_failures++;
4830 return (error);
4831 } else if (error != 0) {
4832 m_freem(m0);
4833 *m_head = NULL;
4834 sc->tx_dma_map_failures++;
4835 return (error);
4836 }
4837
4838 /* Make sure there's room in the chain */
4839 if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
4840 bus_dmamap_unload(sc->tx_mbuf_tag, map);
4841 return (ENOBUFS);
4842 }
4843
4844 /* prod points to an empty tx_bd at this point. */
4845 prod_bseq = sc->tx_prod_bseq;
4846
4847 #ifdef BCE_DEBUG
4848 debug_prod = chain_prod;
4849 #endif
4850
4851 DBPRINT(sc, BCE_INFO_SEND,
4852 "%s(): Start: prod = 0x%04X, chain_prod = %04X, "
4853 "prod_bseq = 0x%08X\n",
4854 __FUNCTION__, prod, chain_prod, prod_bseq);
4855
4856 /*
4857 * Cycle through each mbuf segment that makes up
4858 * the outgoing frame, gathering the mapping info
4859 * for that segment and creating a tx_bd to for
4860 * the mbuf.
4861 */
4862 for (i = 0; i < nsegs ; i++) {
4863
4864 chain_prod = TX_CHAIN_IDX(prod);
4865 txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4866
4867 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4868 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4869 txbd->tx_bd_mss_nbytes = htole32(mss << 16) | htole16(segs[i].ds_len);
4870 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4871 txbd->tx_bd_flags = htole16(flags);
4872 prod_bseq += segs[i].ds_len;
4873 if (i == 0)
4874 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4875 prod = NEXT_TX_BD(prod);
4876 }
4877
4878 /* Set the END flag on the last TX buffer descriptor. */
4879 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4880
4881 DBRUN(BCE_EXCESSIVE_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
4882
4883 DBPRINT(sc, BCE_INFO_SEND,
4884 "%s(): End: prod = 0x%04X, chain_prod = %04X, "
4885 "prod_bseq = 0x%08X\n",
4886 __FUNCTION__, prod, chain_prod, prod_bseq);
4887
4888 /*
4889 * Ensure that the mbuf pointer for this transmission
4890 * is placed at the array index of the last
4891 * descriptor in this chain. This is done
4892 * because a single map is used for all
4893 * segments of the mbuf and we don't want to
4894 * unload the map before all of the segments
4895 * have been freed.
4896 */
4897 sc->tx_mbuf_ptr[chain_prod] = m0;
4898 sc->used_tx_bd += nsegs;
4899
4900 /* Update some debug statistic counters */
4901 DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
4902 sc->tx_hi_watermark = sc->used_tx_bd);
4903 DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
4904 DBRUNIF(1, sc->tx_mbuf_alloc++);
4905
4906 DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, nsegs));
4907
4908 /* prod points to the next free tx_bd at this point. */
4909 sc->tx_prod = prod;
4910 sc->tx_prod_bseq = prod_bseq;
4911
4912 return(rc);
4913 }
4914
4915
4916 /****************************************************************************/
4917 /* Main transmit routine when called from another routine with a lock. */
4918 /* */
4919 /* Returns: */
4920 /* Nothing. */
4921 /****************************************************************************/
4922 static void
4923 bce_start_locked(struct ifnet *ifp)
4924 {
4925 struct bce_softc *sc = ifp->if_softc;
4926 struct mbuf *m_head = NULL;
4927 int count = 0;
4928 u16 tx_prod, tx_chain_prod;
4929
4930 /* If there's no link or the transmit queue is empty then just exit. */
4931 if (!sc->bce_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
4932 DBPRINT(sc, BCE_INFO_SEND, "%s(): No link or transmit queue empty.\n",
4933 __FUNCTION__);
4934 goto bce_start_locked_exit;
4935 }
4936
4937 /* prod points to the next free tx_bd. */
4938 tx_prod = sc->tx_prod;
4939 tx_chain_prod = TX_CHAIN_IDX(tx_prod);
4940
4941 DBPRINT(sc, BCE_INFO_SEND,
4942 "%s(): Start: tx_prod = 0x%04X, tx_chain_prod = %04X, "
4943 "tx_prod_bseq = 0x%08X\n",
4944 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4945
4946 /*
4947 * Keep adding entries while there is space in the ring.
4948 */
4949 while (sc->used_tx_bd < sc->max_tx_bd) {
4950
4951 /* Check for any frames to send. */
4952 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4953 if (m_head == NULL)
4954 break;
4955
4956 /*
4957 * Pack the data into the transmit ring. If we
4958 * don't have room, place the mbuf back at the
4959 * head of the queue and set the OACTIVE flag
4960 * to wait for the NIC to drain the chain.
4961 */
4962 if (bce_tx_encap(sc, &m_head)) {
4963 if (m_head != NULL)
4964 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4965 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4966 DBPRINT(sc, BCE_INFO_SEND,
4967 "TX chain is closed for business! Total tx_bd used = %d\n",
4968 sc->used_tx_bd);
4969 break;
4970 }
4971
4972 count++;
4973
4974 /* Send a copy of the frame to any BPF listeners. */
4975 ETHER_BPF_MTAP(ifp, m_head);
4976 }
4977
4978 if (count == 0) {
4979 /* no packets were dequeued */
4980 DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
4981 __FUNCTION__);
4982 goto bce_start_locked_exit;
4983 }
4984
4985 /* Update the driver's counters. */
4986 tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
4987
4988 DBPRINT(sc, BCE_INFO_SEND,
4989 "%s(): End: tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
4990 "tx_prod_bseq = 0x%08X\n",
4991 __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
4992
4993 /* Start the transmit. */
4994 REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
4995 REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
4996
4997 /* Set the tx timeout. */
4998 sc->watchdog_timer = BCE_TX_TIMEOUT;
4999
5000 bce_start_locked_exit:
5001 return;
5002 }
5003
5004
5005 /****************************************************************************/
5006 /* Main transmit routine when called from another routine without a lock. */
5007 /* */
5008 /* Returns: */
5009 /* Nothing. */
5010 /****************************************************************************/
5011 static void
5012 bce_start(struct ifnet *ifp)
5013 {
5014 struct bce_softc *sc = ifp->if_softc;
5015
5016 BCE_LOCK(sc);
5017 bce_start_locked(ifp);
5018 BCE_UNLOCK(sc);
5019 }
5020
5021
5022 /****************************************************************************/
5023 /* Handles any IOCTL calls from the operating system. */
5024 /* */
5025 /* Returns: */
5026 /* 0 for success, positive value for failure. */
5027 /****************************************************************************/
5028 static int
5029 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5030 {
5031 struct bce_softc *sc = ifp->if_softc;
5032 struct ifreq *ifr = (struct ifreq *) data;
5033 struct mii_data *mii;
5034 int mask, error = 0;
5035
5036 switch(command) {
5037
5038 /* Set the interface MTU. */
5039 case SIOCSIFMTU:
5040 /* Check that the MTU setting is supported. */
5041 if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
5042 (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
5043 error = EINVAL;
5044 break;
5045 }
5046
5047 DBPRINT(sc, BCE_INFO_MISC,
5048 "SIOCSIFMTU: Changing MTU from %d to %d\n",
5049 (int) ifp->if_mtu, (int) ifr->ifr_mtu);
5050
5051 BCE_LOCK(sc);
5052 ifp->if_mtu = ifr->ifr_mtu;
5053 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5054 bce_init_locked(sc);
5055 BCE_UNLOCK(sc);
5056 break;
5057
5058 /* Set interface flags. */
5059 case SIOCSIFFLAGS:
5060 DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
5061
5062 BCE_LOCK(sc);
5063
5064 /* Check if the interface is up. */
5065 if (ifp->if_flags & IFF_UP) {
5066 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5067 /* Change promiscuous/multicast flags as necessary. */
5068 bce_set_rx_mode(sc);
5069 } else {
5070 /* Start the HW */
5071 bce_init_locked(sc);
5072 }
5073 } else {
5074 /* The interface is down, check if driver is running. */
5075 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5076 bce_stop(sc);
5077
5078 /* If MFW is running, restart the controller a bit. */
5079 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5080 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5081 bce_chipinit(sc);
5082 bce_mgmt_init_locked(sc);
5083 }
5084 }
5085 }
5086
5087 BCE_UNLOCK(sc);
5088 error = 0;
5089
5090 break;
5091
5092 /* Add/Delete multicast address */
5093 case SIOCADDMULTI:
5094 case SIOCDELMULTI:
5095 DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n");
5096
5097 BCE_LOCK(sc);
5098 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5099 bce_set_rx_mode(sc);
5100 error = 0;
5101 }
5102 BCE_UNLOCK(sc);
5103
5104 break;
5105
5106 /* Set/Get Interface media */
5107 case SIOCSIFMEDIA:
5108 case SIOCGIFMEDIA:
5109 DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
5110
5111 mii = device_get_softc(sc->bce_miibus);
5112 error = ifmedia_ioctl(ifp, ifr,
5113 &mii->mii_media, command);
5114 break;
5115
5116 /* Set interface capability */
5117 case SIOCSIFCAP:
5118 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5119 DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
5120
5121 #ifdef DEVICE_POLLING
5122 if (mask & IFCAP_POLLING) {
5123 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5124
5125 /* Setup the poll routine to call. */
5126 error = ether_poll_register(bce_poll, ifp);
5127 if (error) {
5128 BCE_PRINTF("%s(%d): Error registering poll function!\n",
5129 __FILE__, __LINE__);
5130 goto bce_ioctl_exit;
5131 }
5132
5133 /* Clear the interrupt. */
5134 BCE_LOCK(sc);
5135 bce_disable_intr(sc);
5136
5137 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5138 (1 << 16) | sc->bce_rx_quick_cons_trip);
5139 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5140 (1 << 16) | sc->bce_tx_quick_cons_trip);
5141
5142 ifp->if_capenable |= IFCAP_POLLING;
5143 BCE_UNLOCK(sc);
5144 } else {
5145 /* Clear the poll routine. */
5146 error = ether_poll_deregister(ifp);
5147
5148 /* Enable interrupt even in error case */
5149 BCE_LOCK(sc);
5150 bce_enable_intr(sc);
5151
5152 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5153 (sc->bce_tx_quick_cons_trip_int << 16) |
5154 sc->bce_tx_quick_cons_trip);
5155 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5156 (sc->bce_rx_quick_cons_trip_int << 16) |
5157 sc->bce_rx_quick_cons_trip);
5158
5159 ifp->if_capenable &= ~IFCAP_POLLING;
5160 BCE_UNLOCK(sc);
5161 }
5162 }
5163 #endif /*DEVICE_POLLING */
5164
5165 /* Toggle the TX checksum capabilites enable flag. */
5166 if (mask & IFCAP_TXCSUM) {
5167 ifp->if_capenable ^= IFCAP_TXCSUM;
5168 if (IFCAP_TXCSUM & ifp->if_capenable)
5169 ifp->if_hwassist = BCE_IF_HWASSIST;
5170 else
5171 ifp->if_hwassist = 0;
5172 }
5173
5174 /* Toggle the RX checksum capabilities enable flag. */
5175 if (mask & IFCAP_RXCSUM) {
5176 ifp->if_capenable ^= IFCAP_RXCSUM;
5177 if (IFCAP_RXCSUM & ifp->if_capenable)
5178 ifp->if_hwassist = BCE_IF_HWASSIST;
5179 else
5180 ifp->if_hwassist = 0;
5181 }
5182
5183 /* Toggle the TSO capabilities enable flag. */
5184 if (bce_tso_enable && (mask & IFCAP_TSO4)) {
5185 ifp->if_capenable ^= IFCAP_TSO4;
5186 if (IFCAP_RXCSUM & ifp->if_capenable)
5187 ifp->if_hwassist = BCE_IF_HWASSIST;
5188 else
5189 ifp->if_hwassist = 0;
5190 }
5191
5192 /* Toggle VLAN_MTU capabilities enable flag. */
5193 if (mask & IFCAP_VLAN_MTU) {
5194 BCE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
5195 __FILE__, __LINE__);
5196 }
5197
5198 /* Toggle VLANHWTAG capabilities enabled flag. */
5199 if (mask & IFCAP_VLAN_HWTAGGING) {
5200 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
5201 BCE_PRINTF("%s(%d): Cannot change VLAN_HWTAGGING while "
5202 "management firmware (ASF/IPMI/UMP) is running!\n",
5203 __FILE__, __LINE__);
5204 else
5205 BCE_PRINTF("%s(%d): Changing VLAN_HWTAGGING not supported!\n",
5206 __FILE__, __LINE__);
5207 }
5208
5209 break;
5210 default:
5211 /* We don't know how to handle the IOCTL, pass it on. */
5212 error = ether_ioctl(ifp, command, data);
5213 break;
5214 }
5215
5216 #ifdef DEVICE_POLLING
5217 bce_ioctl_exit:
5218 #endif
5219 return(error);
5220 }
5221
5222
5223 /****************************************************************************/
5224 /* Transmit timeout handler. */
5225 /* */
5226 /* Returns: */
5227 /* Nothing. */
5228 /****************************************************************************/
5229 static void
5230 bce_watchdog(struct bce_softc *sc)
5231 {
5232
5233 BCE_LOCK_ASSERT(sc);
5234
5235 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
5236 return;
5237
5238 /*
5239 * If we are in this routine because of pause frames, then
5240 * don't reset the hardware.
5241 */
5242 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5243 return;
5244
5245 BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
5246 __FILE__, __LINE__);
5247
5248 DBRUN(BCE_VERBOSE_SEND,
5249 bce_dump_driver_state(sc);
5250 bce_dump_status_block(sc));
5251
5252 /* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5253
5254 sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5255
5256 bce_init_locked(sc);
5257 sc->bce_ifp->if_oerrors++;
5258
5259 }
5260
5261
5262 #ifdef DEVICE_POLLING
5263 static void
5264 bce_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
5265 {
5266 struct bce_softc *sc = ifp->if_softc;
5267
5268 BCE_LOCK_ASSERT(sc);
5269
5270 sc->bce_rxcycles = count;
5271
5272 bus_dmamap_sync(sc->status_tag, sc->status_map,
5273 BUS_DMASYNC_POSTWRITE);
5274
5275 /* Check for any completed RX frames. */
5276 if (sc->status_block->status_rx_quick_consumer_index0 !=
5277 sc->hw_rx_cons)
5278 bce_rx_intr(sc);
5279
5280 /* Check for any completed TX frames. */
5281 if (sc->status_block->status_tx_quick_consumer_index0 !=
5282 sc->hw_tx_cons)
5283 bce_tx_intr(sc);
5284
5285 /* Check for new frames to transmit. */
5286 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5287 bce_start_locked(ifp);
5288
5289 }
5290
5291
5292 static void
5293 bce_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
5294 {
5295 struct bce_softc *sc = ifp->if_softc;
5296
5297 BCE_LOCK(sc);
5298 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5299 bce_poll_locked(ifp, cmd, count);
5300 BCE_UNLOCK(sc);
5301 }
5302 #endif /* DEVICE_POLLING */
5303
5304
5305 #if 0
5306 static inline int
5307 bce_has_work(struct bce_softc *sc)
5308 {
5309 struct status_block *stat = sc->status_block;
5310
5311 if ((stat->status_rx_quick_consumer_index0 != sc->hw_rx_cons) ||
5312 (stat->status_tx_quick_consumer_index0 != sc->hw_tx_cons))
5313 return 1;
5314
5315 if (((stat->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
5316 bp->link_up)
5317 return 1;
5318
5319 return 0;
5320 }
5321 #endif
5322
5323
5324 /*
5325 * Interrupt handler.
5326 */
5327 /****************************************************************************/
5328 /* Main interrupt entry point. Verifies that the controller generated the */
5329 /* interrupt and then calls a separate routine for handle the various */
5330 /* interrupt causes (PHY, TX, RX). */
5331 /* */
5332 /* Returns: */
5333 /* 0 for success, positive value for failure. */
5334 /****************************************************************************/
5335 static void
5336 bce_intr(void *xsc)
5337 {
5338 struct bce_softc *sc;
5339 struct ifnet *ifp;
5340 u32 status_attn_bits;
5341
5342 sc = xsc;
5343 ifp = sc->bce_ifp;
5344
5345 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5346 BCE_LOCK(sc);
5347
5348 DBRUNIF(1, sc->interrupts_generated++);
5349
5350 #ifdef DEVICE_POLLING
5351 if (ifp->if_capenable & IFCAP_POLLING) {
5352 DBPRINT(sc, BCE_INFO_MISC, "Polling enabled!\n");
5353 goto bce_intr_exit;
5354 }
5355 #endif
5356
5357 bus_dmamap_sync(sc->status_tag, sc->status_map,
5358 BUS_DMASYNC_POSTWRITE);
5359
5360 /*
5361 * If the hardware status block index
5362 * matches the last value read by the
5363 * driver and we haven't asserted our
5364 * interrupt then there's nothing to do.
5365 */
5366 if ((sc->status_block->status_idx == sc->last_status_idx) &&
5367 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5368 goto bce_intr_exit;
5369
5370 /* Ack the interrupt and stop others from occuring. */
5371 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5372 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5373 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5374
5375 /* Keep processing data as long as there is work to do. */
5376 for (;;) {
5377
5378 status_attn_bits = sc->status_block->status_attn_bits;
5379
5380 DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5381 BCE_PRINTF("Simulating unexpected status attention bit set.");
5382 status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5383
5384 /* Was it a link change interrupt? */
5385 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5386 (sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5387 bce_phy_intr(sc);
5388
5389 /* If any other attention is asserted then the chip is toast. */
5390 if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5391 (sc->status_block->status_attn_bits_ack &
5392 ~STATUS_ATTN_BITS_LINK_STATE))) {
5393
5394 DBRUN(1, sc->unexpected_attentions++);
5395
5396 BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n",
5397 __FILE__, __LINE__, sc->status_block->status_attn_bits);
5398
5399 DBRUN(BCE_FATAL,
5400 if (bce_debug_unexpected_attention == 0)
5401 bce_breakpoint(sc));
5402
5403 bce_init_locked(sc);
5404 goto bce_intr_exit;
5405 }
5406
5407 /* Check for any completed RX frames. */
5408 if (sc->status_block->status_rx_quick_consumer_index0 != sc->hw_rx_cons)
5409 bce_rx_intr(sc);
5410
5411 /* Check for any completed TX frames. */
5412 if (sc->status_block->status_tx_quick_consumer_index0 != sc->hw_tx_cons)
5413 bce_tx_intr(sc);
5414
5415 /* Save the status block index value for use during the next interrupt. */
5416 sc->last_status_idx = sc->status_block->status_idx;
5417
5418 /* Prevent speculative reads from getting ahead of the status block. */
5419 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
5420 BUS_SPACE_BARRIER_READ);
5421
5422 /* If there's no work left then exit the interrupt service routine. */
5423 if ((sc->status_block->status_rx_quick_consumer_index0 == sc->hw_rx_cons) &&
5424 (sc->status_block->status_tx_quick_consumer_index0 == sc->hw_tx_cons))
5425 break;
5426
5427 }
5428
5429 bus_dmamap_sync(sc->status_tag, sc->status_map,
5430 BUS_DMASYNC_PREWRITE);
5431
5432 /* Re-enable interrupts. */
5433 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5434 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5435 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5436 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5437 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5438
5439 /* Handle any frames that arrived while handling the interrupt. */
5440 if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5441 bce_start_locked(ifp);
5442
5443 bce_intr_exit:
5444 BCE_UNLOCK(sc);
5445 }
5446
5447
5448 /****************************************************************************/
5449 /* Programs the various packet receive modes (broadcast and multicast). */
5450 /* */
5451 /* Returns: */
5452 /* Nothing. */
5453 /****************************************************************************/
5454 static void
5455 bce_set_rx_mode(struct bce_softc *sc)
5456 {
5457 struct ifnet *ifp;
5458 struct ifmultiaddr *ifma;
5459 u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5460 u32 rx_mode, sort_mode;
5461 int h, i;
5462
5463 BCE_LOCK_ASSERT(sc);
5464
5465 ifp = sc->bce_ifp;
5466
5467 /* Initialize receive mode default settings. */
5468 rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5469 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5470 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5471
5472 /*
5473 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5474 * be enbled.
5475 */
5476 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5477 (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5478 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5479
5480 /*
5481 * Check for promiscuous, all multicast, or selected
5482 * multicast address filtering.
5483 */
5484 if (ifp->if_flags & IFF_PROMISC) {
5485 DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
5486
5487 /* Enable promiscuous mode. */
5488 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5489 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5490 } else if (ifp->if_flags & IFF_ALLMULTI) {
5491 DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
5492
5493 /* Enable all multicast addresses. */
5494 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5495 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5496 }
5497 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5498 } else {
5499 /* Accept one or more multicast(s). */
5500 DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
5501
5502 IF_ADDR_LOCK(ifp);
5503 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5504 if (ifma->ifma_addr->sa_family != AF_LINK)
5505 continue;
5506 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5507 ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
5508 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5509 }
5510 IF_ADDR_UNLOCK(ifp);
5511
5512 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5513 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5514
5515 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5516 }
5517
5518 /* Only make changes if the recive mode has actually changed. */
5519 if (rx_mode != sc->rx_mode) {
5520 DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: 0x%08X\n",
5521 rx_mode);
5522
5523 sc->rx_mode = rx_mode;
5524 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5525 }
5526
5527 /* Disable and clear the exisitng sort before enabling a new sort. */
5528 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5529 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5530 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5531 }
5532
5533
5534 /****************************************************************************/
5535 /* Called periodically to updates statistics from the controllers */
5536 /* statistics block. */
5537 /* */
5538 /* Returns: */
5539 /* Nothing. */
5540 /****************************************************************************/
5541 static void
5542 bce_stats_update(struct bce_softc *sc)
5543 {
5544 struct ifnet *ifp;
5545 struct statistics_block *stats;
5546
5547 DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5548
5549 ifp = sc->bce_ifp;
5550
5551 stats = (struct statistics_block *) sc->stats_block;
5552
5553 /*
5554 * Update the interface statistics from the
5555 * hardware statistics.
5556 */
5557 ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5558
5559 ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5560 (u_long) stats->stat_EtherStatsOverrsizePkts +
5561 (u_long) stats->stat_IfInMBUFDiscards +
5562 (u_long) stats->stat_Dot3StatsAlignmentErrors +
5563 (u_long) stats->stat_Dot3StatsFCSErrors;
5564
5565 ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5566 (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5567 (u_long) stats->stat_Dot3StatsLateCollisions;
5568
5569 /*
5570 * Certain controllers don't report
5571 * carrier sense errors correctly.
5572 * See errata E11_5708CA0_1165.
5573 */
5574 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5575 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
5576 ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5577
5578 /*
5579 * Update the sysctl statistics from the
5580 * hardware statistics.
5581 */
5582 sc->stat_IfHCInOctets =
5583 ((u64) stats->stat_IfHCInOctets_hi << 32) +
5584 (u64) stats->stat_IfHCInOctets_lo;
5585
5586 sc->stat_IfHCInBadOctets =
5587 ((u64) stats->stat_IfHCInBadOctets_hi << 32) +
5588 (u64) stats->stat_IfHCInBadOctets_lo;
5589
5590 sc->stat_IfHCOutOctets =
5591 ((u64) stats->stat_IfHCOutOctets_hi << 32) +
5592 (u64) stats->stat_IfHCOutOctets_lo;
5593
5594 sc->stat_IfHCOutBadOctets =
5595 ((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
5596 (u64) stats->stat_IfHCOutBadOctets_lo;
5597
5598 sc->stat_IfHCInUcastPkts =
5599 ((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
5600 (u64) stats->stat_IfHCInUcastPkts_lo;
5601
5602 sc->stat_IfHCInMulticastPkts =
5603 ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
5604 (u64) stats->stat_IfHCInMulticastPkts_lo;
5605
5606 sc->stat_IfHCInBroadcastPkts =
5607 ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
5608 (u64) stats->stat_IfHCInBroadcastPkts_lo;
5609
5610 sc->stat_IfHCOutUcastPkts =
5611 ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
5612 (u64) stats->stat_IfHCOutUcastPkts_lo;
5613
5614 sc->stat_IfHCOutMulticastPkts =
5615 ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
5616 (u64) stats->stat_IfHCOutMulticastPkts_lo;
5617
5618 sc->stat_IfHCOutBroadcastPkts =
5619 ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5620 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
5621
5622 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5623 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5624
5625 sc->stat_Dot3StatsCarrierSenseErrors =
5626 stats->stat_Dot3StatsCarrierSenseErrors;
5627
5628 sc->stat_Dot3StatsFCSErrors =
5629 stats->stat_Dot3StatsFCSErrors;
5630
5631 sc->stat_Dot3StatsAlignmentErrors =
5632 stats->stat_Dot3StatsAlignmentErrors;
5633
5634 sc->stat_Dot3StatsSingleCollisionFrames =
5635 stats->stat_Dot3StatsSingleCollisionFrames;
5636
5637 sc->stat_Dot3StatsMultipleCollisionFrames =
5638 stats->stat_Dot3StatsMultipleCollisionFrames;
5639
5640 sc->stat_Dot3StatsDeferredTransmissions =
5641 stats->stat_Dot3StatsDeferredTransmissions;
5642
5643 sc->stat_Dot3StatsExcessiveCollisions =
5644 stats->stat_Dot3StatsExcessiveCollisions;
5645
5646 sc->stat_Dot3StatsLateCollisions =
5647 stats->stat_Dot3StatsLateCollisions;
5648
5649 sc->stat_EtherStatsCollisions =
5650 stats->stat_EtherStatsCollisions;
5651
5652 sc->stat_EtherStatsFragments =
5653 stats->stat_EtherStatsFragments;
5654
5655 sc->stat_EtherStatsJabbers =
5656 stats->stat_EtherStatsJabbers;
5657
5658 sc->stat_EtherStatsUndersizePkts =
5659 stats->stat_EtherStatsUndersizePkts;
5660
5661 sc->stat_EtherStatsOverrsizePkts =
5662 stats->stat_EtherStatsOverrsizePkts;
5663
5664 sc->stat_EtherStatsPktsRx64Octets =
5665 stats->stat_EtherStatsPktsRx64Octets;
5666
5667 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5668 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5669
5670 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5671 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5672
5673 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5674 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5675
5676 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5677 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5678
5679 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5680 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5681
5682 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5683 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5684
5685 sc->stat_EtherStatsPktsTx64Octets =
5686 stats->stat_EtherStatsPktsTx64Octets;
5687
5688 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5689 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5690
5691 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5692 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5693
5694 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5695 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5696
5697 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5698 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5699
5700 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5701 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5702
5703 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5704 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5705
5706 sc->stat_XonPauseFramesReceived =
5707 stats->stat_XonPauseFramesReceived;
5708
5709 sc->stat_XoffPauseFramesReceived =
5710 stats->stat_XoffPauseFramesReceived;
5711
5712 sc->stat_OutXonSent =
5713 stats->stat_OutXonSent;
5714
5715 sc->stat_OutXoffSent =
5716 stats->stat_OutXoffSent;
5717
5718 sc->stat_FlowControlDone =
5719 stats->stat_FlowControlDone;
5720
5721 sc->stat_MacControlFramesReceived =
5722 stats->stat_MacControlFramesReceived;
5723
5724 sc->stat_XoffStateEntered =
5725 stats->stat_XoffStateEntered;
5726
5727 sc->stat_IfInFramesL2FilterDiscards =
5728 stats->stat_IfInFramesL2FilterDiscards;
5729
5730 sc->stat_IfInRuleCheckerDiscards =
5731 stats->stat_IfInRuleCheckerDiscards;
5732
5733 sc->stat_IfInFTQDiscards =
5734 stats->stat_IfInFTQDiscards;
5735
5736 sc->stat_IfInMBUFDiscards =
5737 stats->stat_IfInMBUFDiscards;
5738
5739 sc->stat_IfInRuleCheckerP4Hit =
5740 stats->stat_IfInRuleCheckerP4Hit;
5741
5742 sc->stat_CatchupInRuleCheckerDiscards =
5743 stats->stat_CatchupInRuleCheckerDiscards;
5744
5745 sc->stat_CatchupInFTQDiscards =
5746 stats->stat_CatchupInFTQDiscards;
5747
5748 sc->stat_CatchupInMBUFDiscards =
5749 stats->stat_CatchupInMBUFDiscards;
5750
5751 sc->stat_CatchupInRuleCheckerP4Hit =
5752 stats->stat_CatchupInRuleCheckerP4Hit;
5753
5754 sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5755
5756 DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
5757 }
5758
5759
5760 /****************************************************************************/
5761 /* Periodic function to notify the bootcode that the driver is still */
5762 /* present. */
5763 /* */
5764 /* Returns: */
5765 /* Nothing. */
5766 /****************************************************************************/
5767 static void
5768 bce_pulse(void *xsc)
5769 {
5770 struct bce_softc *sc = xsc;
5771 u32 msg;
5772
5773 DBPRINT(sc, BCE_EXCESSIVE_MISC, "pulse\n");
5774
5775 BCE_LOCK_ASSERT(sc);
5776
5777 /* Tell the firmware that the driver is still running. */
5778 msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
5779 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
5780
5781 /* Schedule the next pulse. */
5782 callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
5783
5784 return;
5785 }
5786
5787
5788 /****************************************************************************/
5789 /* Periodic function to perform maintenance tasks. */
5790 /* */
5791 /* Returns: */
5792 /* Nothing. */
5793 /****************************************************************************/
5794 static void
5795 bce_tick(void *xsc)
5796 {
5797 struct bce_softc *sc = xsc;
5798 struct mii_data *mii;
5799 struct ifnet *ifp;
5800
5801 ifp = sc->bce_ifp;
5802
5803 BCE_LOCK_ASSERT(sc);
5804
5805 /* Update the statistics from the hardware statistics block. */
5806 bce_stats_update(sc);
5807
5808 /* Check that chip hasn't hung. */
5809 bce_watchdog(sc);
5810
5811 /* Schedule the next tick. */
5812 callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
5813
5814 /* If link is up already up then we're done. */
5815 if (sc->bce_link)
5816 goto bce_tick_locked_exit;
5817
5818 mii = device_get_softc(sc->bce_miibus);
5819 mii_tick(mii);
5820
5821 /* Check if the link has come up. */
5822 if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
5823 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5824 sc->bce_link++;
5825 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
5826 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
5827 bootverbose)
5828 BCE_PRINTF("Gigabit link up\n");
5829 /* Now that link is up, handle any outstanding TX traffic. */
5830 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5831 bce_start_locked(ifp);
5832 }
5833
5834 bce_tick_locked_exit:
5835 return;
5836 }
5837
5838
5839 #ifdef BCE_DEBUG
5840 /****************************************************************************/
5841 /* Allows the driver state to be dumped through the sysctl interface. */
5842 /* */
5843 /* Returns: */
5844 /* 0 for success, positive value for failure. */
5845 /****************************************************************************/
5846 static int
5847 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
5848 {
5849 int error;
5850 int result;
5851 struct bce_softc *sc;
5852
5853 result = -1;
5854 error = sysctl_handle_int(oidp, &result, 0, req);
5855
5856 if (error || !req->newptr)
5857 return (error);
5858
5859 if (result == 1) {
5860 sc = (struct bce_softc *)arg1;
5861 bce_dump_driver_state(sc);
5862 }
5863
5864 return error;
5865 }
5866
5867
5868 /****************************************************************************/
5869 /* Allows the hardware state to be dumped through the sysctl interface. */
5870 /* */
5871 /* Returns: */
5872 /* 0 for success, positive value for failure. */
5873 /****************************************************************************/
5874 static int
5875 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
5876 {
5877 int error;
5878 int result;
5879 struct bce_softc *sc;
5880
5881 result = -1;
5882 error = sysctl_handle_int(oidp, &result, 0, req);
5883
5884 if (error || !req->newptr)
5885 return (error);
5886
5887 if (result == 1) {
5888 sc = (struct bce_softc *)arg1;
5889 bce_dump_hw_state(sc);
5890 }
5891
5892 return error;
5893 }
5894
5895
5896 /****************************************************************************/
5897 /* Allows the bootcode state to be dumped through the sysctl interface. */
5898 /* */
5899 /* Returns: */
5900 /* 0 for success, positive value for failure. */
5901 /****************************************************************************/
5902 static int
5903 bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
5904 {
5905 int error;
5906 int result;
5907 struct bce_softc *sc;
5908
5909 result = -1;
5910 error = sysctl_handle_int(oidp, &result, 0, req);
5911
5912 if (error || !req->newptr)
5913 return (error);
5914
5915 if (result == 1) {
5916 sc = (struct bce_softc *)arg1;
5917 bce_dump_bc_state(sc);
5918 }
5919
5920 return error;
5921 }
5922
5923
5924 /****************************************************************************/
5925 /* Provides a sysctl interface to allow dumping the RX chain. */
5926 /* */
5927 /* Returns: */
5928 /* 0 for success, positive value for failure. */
5929 /****************************************************************************/
5930 static int
5931 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
5932 {
5933 int error;
5934 int result;
5935 struct bce_softc *sc;
5936
5937 result = -1;
5938 error = sysctl_handle_int(oidp, &result, 0, req);
5939
5940 if (error || !req->newptr)
5941 return (error);
5942
5943 if (result == 1) {
5944 sc = (struct bce_softc *)arg1;
5945 bce_dump_rx_chain(sc, 0, sc->max_rx_bd);
5946 }
5947
5948 return error;
5949 }
5950
5951
5952 /****************************************************************************/
5953 /* Provides a sysctl interface to allow dumping the TX chain. */
5954 /* */
5955 /* Returns: */
5956 /* 0 for success, positive value for failure. */
5957 /****************************************************************************/
5958 static int
5959 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
5960 {
5961 int error;
5962 int result;
5963 struct bce_softc *sc;
5964
5965 result = -1;
5966 error = sysctl_handle_int(oidp, &result, 0, req);
5967
5968 if (error || !req->newptr)
5969 return (error);
5970
5971 if (result == 1) {
5972 sc = (struct bce_softc *)arg1;
5973 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
5974 }
5975
5976 return error;
5977 }
5978
5979
5980 /****************************************************************************/
5981 /* Provides a sysctl interface to allow reading arbitrary registers in the */
5982 /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
5983 /* */
5984 /* Returns: */
5985 /* 0 for success, positive value for failure. */
5986 /****************************************************************************/
5987 static int
5988 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
5989 {
5990 struct bce_softc *sc;
5991 int error;
5992 u32 val, result;
5993
5994 result = -1;
5995 error = sysctl_handle_int(oidp, &result, 0, req);
5996 if (error || (req->newptr == NULL))
5997 return (error);
5998
5999 /* Make sure the register is accessible. */
6000 if (result < 0x8000) {
6001 sc = (struct bce_softc *)arg1;
6002 val = REG_RD(sc, result);
6003 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
6004 } else if (result < 0x0280000) {
6005 sc = (struct bce_softc *)arg1;
6006 val = REG_RD_IND(sc, result);
6007 BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
6008 }
6009
6010 return (error);
6011 }
6012
6013
6014 /****************************************************************************/
6015 /* Provides a sysctl interface to allow reading arbitrary PHY registers in */
6016 /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
6017 /* */
6018 /* Returns: */
6019 /* 0 for success, positive value for failure. */
6020 /****************************************************************************/
6021 static int
6022 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
6023 {
6024 struct bce_softc *sc;
6025 device_t dev;
6026 int error, result;
6027 u16 val;
6028
6029 result = -1;
6030 error = sysctl_handle_int(oidp, &result, 0, req);
6031 if (error || (req->newptr == NULL))
6032 return (error);
6033
6034 /* Make sure the register is accessible. */
6035 if (result < 0x20) {
6036 sc = (struct bce_softc *)arg1;
6037 dev = sc->bce_dev;
6038 val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
6039 BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
6040 }
6041 return (error);
6042 }
6043
6044
6045 /****************************************************************************/
6046 /* Provides a sysctl interface to forcing the driver to dump state and */
6047 /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
6048 /* */
6049 /* Returns: */
6050 /* 0 for success, positive value for failure. */
6051 /****************************************************************************/
6052 static int
6053 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
6054 {
6055 int error;
6056 int result;
6057 struct bce_softc *sc;
6058
6059 result = -1;
6060 error = sysctl_handle_int(oidp, &result, 0, req);
6061
6062 if (error || !req->newptr)
6063 return (error);
6064
6065 if (result == 1) {
6066 sc = (struct bce_softc *)arg1;
6067 bce_breakpoint(sc);
6068 }
6069
6070 return error;
6071 }
6072 #endif
6073
6074
6075 /****************************************************************************/
6076 /* Adds any sysctl parameters for tuning or debugging purposes. */
6077 /* */
6078 /* Returns: */
6079 /* 0 for success, positive value for failure. */
6080 /****************************************************************************/
6081 static void
6082 bce_add_sysctls(struct bce_softc *sc)
6083 {
6084 struct sysctl_ctx_list *ctx;
6085 struct sysctl_oid_list *children;
6086
6087 ctx = device_get_sysctl_ctx(sc->bce_dev);
6088 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
6089
6090 #ifdef BCE_DEBUG
6091 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6092 "rx_low_watermark",
6093 CTLFLAG_RD, &sc->rx_low_watermark,
6094 0, "Lowest level of free rx_bd's");
6095
6096 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6097 "rx_empty_count",
6098 CTLFLAG_RD, &sc->rx_empty_count,
6099 0, "Number of times the RX chain was empty");
6100
6101 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6102 "tx_hi_watermark",
6103 CTLFLAG_RD, &sc->tx_hi_watermark,
6104 0, "Highest level of used tx_bd's");
6105
6106 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6107 "tx_full_count",
6108 CTLFLAG_RD, &sc->tx_full_count,
6109 0, "Number of times the TX chain was full");
6110
6111 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6112 "l2fhdr_status_errors",
6113 CTLFLAG_RD, &sc->l2fhdr_status_errors,
6114 0, "l2_fhdr status errors");
6115
6116 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6117 "unexpected_attentions",
6118 CTLFLAG_RD, &sc->unexpected_attentions,
6119 0, "unexpected attentions");
6120
6121 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6122 "lost_status_block_updates",
6123 CTLFLAG_RD, &sc->lost_status_block_updates,
6124 0, "lost status block updates");
6125
6126 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6127 "mbuf_sim_alloc_failed",
6128 CTLFLAG_RD, &sc->mbuf_sim_alloc_failed,
6129 0, "mbuf cluster simulated allocation failures");
6130
6131 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6132 "requested_tso_frames",
6133 CTLFLAG_RD, &sc->requested_tso_frames,
6134 0, "The number of TSO frames received");
6135
6136 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6137 "rx_mbuf_segs[1]",
6138 CTLFLAG_RD, &sc->rx_mbuf_segs[1],
6139 0, "mbuf cluster with 1 segment");
6140
6141 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6142 "rx_mbuf_segs[2]",
6143 CTLFLAG_RD, &sc->rx_mbuf_segs[2],
6144 0, "mbuf cluster with 2 segments");
6145
6146 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6147 "rx_mbuf_segs[3]",
6148 CTLFLAG_RD, &sc->rx_mbuf_segs[3],
6149 0, "mbuf cluster with 3 segments");
6150
6151 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6152 "rx_mbuf_segs[4]",
6153 CTLFLAG_RD, &sc->rx_mbuf_segs[4],
6154 0, "mbuf cluster with 4 segments");
6155
6156 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6157 "rx_mbuf_segs[5]",
6158 CTLFLAG_RD, &sc->rx_mbuf_segs[5],
6159 0, "mbuf cluster with 5 segments");
6160
6161 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6162 "rx_mbuf_segs[6]",
6163 CTLFLAG_RD, &sc->rx_mbuf_segs[6],
6164 0, "mbuf cluster with 6 segments");
6165
6166 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6167 "rx_mbuf_segs[7]",
6168 CTLFLAG_RD, &sc->rx_mbuf_segs[7],
6169 0, "mbuf cluster with 7 segments");
6170
6171 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6172 "rx_mbuf_segs[8]",
6173 CTLFLAG_RD, &sc->rx_mbuf_segs[8],
6174 0, "mbuf cluster with 8 segments");
6175
6176 #endif
6177
6178 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6179 "mbuf_alloc_failed",
6180 CTLFLAG_RD, &sc->mbuf_alloc_failed,
6181 0, "mbuf cluster allocation failures");
6182
6183 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6184 "tx_dma_map_failures",
6185 CTLFLAG_RD, &sc->tx_dma_map_failures,
6186 0, "tx dma mapping failures");
6187
6188 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6189 "stat_IfHcInOctets",
6190 CTLFLAG_RD, &sc->stat_IfHCInOctets,
6191 "Bytes received");
6192
6193 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6194 "stat_IfHCInBadOctets",
6195 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6196 "Bad bytes received");
6197
6198 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6199 "stat_IfHCOutOctets",
6200 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6201 "Bytes sent");
6202
6203 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6204 "stat_IfHCOutBadOctets",
6205 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6206 "Bad bytes sent");
6207
6208 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6209 "stat_IfHCInUcastPkts",
6210 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6211 "Unicast packets received");
6212
6213 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6214 "stat_IfHCInMulticastPkts",
6215 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6216 "Multicast packets received");
6217
6218 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6219 "stat_IfHCInBroadcastPkts",
6220 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6221 "Broadcast packets received");
6222
6223 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6224 "stat_IfHCOutUcastPkts",
6225 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6226 "Unicast packets sent");
6227
6228 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6229 "stat_IfHCOutMulticastPkts",
6230 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6231 "Multicast packets sent");
6232
6233 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6234 "stat_IfHCOutBroadcastPkts",
6235 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6236 "Broadcast packets sent");
6237
6238 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6239 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6240 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6241 0, "Internal MAC transmit errors");
6242
6243 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6244 "stat_Dot3StatsCarrierSenseErrors",
6245 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6246 0, "Carrier sense errors");
6247
6248 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6249 "stat_Dot3StatsFCSErrors",
6250 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6251 0, "Frame check sequence errors");
6252
6253 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6254 "stat_Dot3StatsAlignmentErrors",
6255 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6256 0, "Alignment errors");
6257
6258 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6259 "stat_Dot3StatsSingleCollisionFrames",
6260 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6261 0, "Single Collision Frames");
6262
6263 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6264 "stat_Dot3StatsMultipleCollisionFrames",
6265 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6266 0, "Multiple Collision Frames");
6267
6268 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6269 "stat_Dot3StatsDeferredTransmissions",
6270 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6271 0, "Deferred Transmissions");
6272
6273 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6274 "stat_Dot3StatsExcessiveCollisions",
6275 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6276 0, "Excessive Collisions");
6277
6278 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6279 "stat_Dot3StatsLateCollisions",
6280 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6281 0, "Late Collisions");
6282
6283 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6284 "stat_EtherStatsCollisions",
6285 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6286 0, "Collisions");
6287
6288 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6289 "stat_EtherStatsFragments",
6290 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6291 0, "Fragments");
6292
6293 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6294 "stat_EtherStatsJabbers",
6295 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6296 0, "Jabbers");
6297
6298 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6299 "stat_EtherStatsUndersizePkts",
6300 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6301 0, "Undersize packets");
6302
6303 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6304 "stat_EtherStatsOverrsizePkts",
6305 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6306 0, "stat_EtherStatsOverrsizePkts");
6307
6308 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6309 "stat_EtherStatsPktsRx64Octets",
6310 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6311 0, "Bytes received in 64 byte packets");
6312
6313 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6314 "stat_EtherStatsPktsRx65Octetsto127Octets",
6315 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6316 0, "Bytes received in 65 to 127 byte packets");
6317
6318 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6319 "stat_EtherStatsPktsRx128Octetsto255Octets",
6320 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6321 0, "Bytes received in 128 to 255 byte packets");
6322
6323 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6324 "stat_EtherStatsPktsRx256Octetsto511Octets",
6325 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6326 0, "Bytes received in 256 to 511 byte packets");
6327
6328 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6329 "stat_EtherStatsPktsRx512Octetsto1023Octets",
6330 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6331 0, "Bytes received in 512 to 1023 byte packets");
6332
6333 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6334 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
6335 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6336 0, "Bytes received in 1024 t0 1522 byte packets");
6337
6338 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6339 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
6340 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6341 0, "Bytes received in 1523 to 9022 byte packets");
6342
6343 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6344 "stat_EtherStatsPktsTx64Octets",
6345 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6346 0, "Bytes sent in 64 byte packets");
6347
6348 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6349 "stat_EtherStatsPktsTx65Octetsto127Octets",
6350 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6351 0, "Bytes sent in 65 to 127 byte packets");
6352
6353 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6354 "stat_EtherStatsPktsTx128Octetsto255Octets",
6355 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6356 0, "Bytes sent in 128 to 255 byte packets");
6357
6358 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6359 "stat_EtherStatsPktsTx256Octetsto511Octets",
6360 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6361 0, "Bytes sent in 256 to 511 byte packets");
6362
6363 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6364 "stat_EtherStatsPktsTx512Octetsto1023Octets",
6365 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6366 0, "Bytes sent in 512 to 1023 byte packets");
6367
6368 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6369 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
6370 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6371 0, "Bytes sent in 1024 to 1522 byte packets");
6372
6373 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6374 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
6375 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6376 0, "Bytes sent in 1523 to 9022 byte packets");
6377
6378 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6379 "stat_XonPauseFramesReceived",
6380 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6381 0, "XON pause frames receved");
6382
6383 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6384 "stat_XoffPauseFramesReceived",
6385 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6386 0, "XOFF pause frames received");
6387
6388 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6389 "stat_OutXonSent",
6390 CTLFLAG_RD, &sc->stat_OutXonSent,
6391 0, "XON pause frames sent");
6392
6393 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6394 "stat_OutXoffSent",
6395 CTLFLAG_RD, &sc->stat_OutXoffSent,
6396 0, "XOFF pause frames sent");
6397
6398 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6399 "stat_FlowControlDone",
6400 CTLFLAG_RD, &sc->stat_FlowControlDone,
6401 0, "Flow control done");
6402
6403 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6404 "stat_MacControlFramesReceived",
6405 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6406 0, "MAC control frames received");
6407
6408 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6409 "stat_XoffStateEntered",
6410 CTLFLAG_RD, &sc->stat_XoffStateEntered,
6411 0, "XOFF state entered");
6412
6413 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6414 "stat_IfInFramesL2FilterDiscards",
6415 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6416 0, "Received L2 packets discarded");
6417
6418 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6419 "stat_IfInRuleCheckerDiscards",
6420 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6421 0, "Received packets discarded by rule");
6422
6423 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6424 "stat_IfInFTQDiscards",
6425 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6426 0, "Received packet FTQ discards");
6427
6428 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6429 "stat_IfInMBUFDiscards",
6430 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6431 0, "Received packets discarded due to lack of controller buffer memory");
6432
6433 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6434 "stat_IfInRuleCheckerP4Hit",
6435 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6436 0, "Received packets rule checker hits");
6437
6438 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6439 "stat_CatchupInRuleCheckerDiscards",
6440 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6441 0, "Received packets discarded in Catchup path");
6442
6443 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6444 "stat_CatchupInFTQDiscards",
6445 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6446 0, "Received packets discarded in FTQ in Catchup path");
6447
6448 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6449 "stat_CatchupInMBUFDiscards",
6450 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6451 0, "Received packets discarded in controller buffer memory in Catchup path");
6452
6453 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6454 "stat_CatchupInRuleCheckerP4Hit",
6455 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6456 0, "Received packets rule checker hits in Catchup path");
6457
6458 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6459 "com_no_buffers",
6460 CTLFLAG_RD, &sc->com_no_buffers,
6461 0, "Valid packets received but no RX buffers available");
6462
6463 #ifdef BCE_DEBUG
6464 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6465 "driver_state", CTLTYPE_INT | CTLFLAG_RW,
6466 (void *)sc, 0,
6467 bce_sysctl_driver_state, "I", "Drive state information");
6468
6469 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6470 "hw_state", CTLTYPE_INT | CTLFLAG_RW,
6471 (void *)sc, 0,
6472 bce_sysctl_hw_state, "I", "Hardware state information");
6473
6474 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6475 "bc_state", CTLTYPE_INT | CTLFLAG_RW,
6476 (void *)sc, 0,
6477 bce_sysctl_bc_state, "I", "Bootcode state information");
6478
6479 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6480 "dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6481 (void *)sc, 0,
6482 bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6483
6484 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6485 "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
6486 (void *)sc, 0,
6487 bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
6488
6489 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6490 "breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6491 (void *)sc, 0,
6492 bce_sysctl_breakpoint, "I", "Driver breakpoint");
6493
6494 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6495 "reg_read", CTLTYPE_INT | CTLFLAG_RW,
6496 (void *)sc, 0,
6497 bce_sysctl_reg_read, "I", "Register read");
6498
6499 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6500 "phy_read", CTLTYPE_INT | CTLFLAG_RW,
6501 (void *)sc, 0,
6502 bce_sysctl_phy_read, "I", "PHY register read");
6503
6504 #endif
6505
6506 }
6507
6508
6509 /****************************************************************************/
6510 /* BCE Debug Routines */
6511 /****************************************************************************/
6512 #ifdef BCE_DEBUG
6513
6514 /****************************************************************************/
6515 /* Freezes the controller to allow for a cohesive state dump. */
6516 /* */
6517 /* Returns: */
6518 /* Nothing. */
6519 /****************************************************************************/
6520 static void
6521 bce_freeze_controller(struct bce_softc *sc)
6522 {
6523 u32 val;
6524 val = REG_RD(sc, BCE_MISC_COMMAND);
6525 val |= BCE_MISC_COMMAND_DISABLE_ALL;
6526 REG_WR(sc, BCE_MISC_COMMAND, val);
6527
6528 }
6529
6530
6531 /****************************************************************************/
6532 /* Unfreezes the controller after a freeze operation. This may not always */
6533 /* work and the controller will require a reset! */
6534 /* */
6535 /* Returns: */
6536 /* Nothing. */
6537 /****************************************************************************/
6538 static void
6539 bce_unfreeze_controller(struct bce_softc *sc)
6540 {
6541 u32 val;
6542 val = REG_RD(sc, BCE_MISC_COMMAND);
6543 val |= BCE_MISC_COMMAND_ENABLE_ALL;
6544 REG_WR(sc, BCE_MISC_COMMAND, val);
6545
6546 }
6547
6548 /****************************************************************************/
6549 /* Prints out information about an mbuf. */
6550 /* */
6551 /* Returns: */
6552 /* Nothing. */
6553 /****************************************************************************/
6554 static void
6555 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6556 {
6557 u32 val_hi, val_lo;
6558 struct mbuf *mp = m;
6559
6560 if (m == NULL) {
6561 BCE_PRINTF("mbuf: null pointer\n");
6562 return;
6563 }
6564
6565 while (mp) {
6566 val_hi = BCE_ADDR_HI(mp);
6567 val_lo = BCE_ADDR_LO(mp);
6568 BCE_PRINTF("mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ( ",
6569 val_hi, val_lo, mp->m_len);
6570
6571 if (mp->m_flags & M_EXT)
6572 printf("M_EXT ");
6573 if (mp->m_flags & M_PKTHDR)
6574 printf("M_PKTHDR ");
6575 if (mp->m_flags & M_EOR)
6576 printf("M_EOR ");
6577 if (mp->m_flags & M_RDONLY)
6578 printf("M_RDONLY ");
6579
6580 val_hi = BCE_ADDR_HI(mp->m_data);
6581 val_lo = BCE_ADDR_LO(mp->m_data);
6582 printf(") m_data = 0x%08X:%08X\n",
6583 val_hi, val_lo);
6584
6585 if (mp->m_flags & M_PKTHDR) {
6586 BCE_PRINTF("- m_pkthdr: flags = ( ");
6587 if (mp->m_flags & M_BCAST)
6588 printf("M_BCAST ");
6589 if (mp->m_flags & M_MCAST)
6590 printf("M_MCAST ");
6591 if (mp->m_flags & M_FRAG)
6592 printf("M_FRAG ");
6593 if (mp->m_flags & M_FIRSTFRAG)
6594 printf("M_FIRSTFRAG ");
6595 if (mp->m_flags & M_LASTFRAG)
6596 printf("M_LASTFRAG ");
6597 if (mp->m_flags & M_VLANTAG)
6598 printf("M_VLANTAG ");
6599 if (mp->m_flags & M_PROMISC)
6600 printf("M_PROMISC ");
6601 printf(") csum_flags = ( ");
6602 if (mp->m_pkthdr.csum_flags & CSUM_IP)
6603 printf("CSUM_IP ");
6604 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
6605 printf("CSUM_TCP ");
6606 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
6607 printf("CSUM_UDP ");
6608 if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS)
6609 printf("CSUM_IP_FRAGS ");
6610 if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT)
6611 printf("CSUM_FRAGMENT ");
6612 if (mp->m_pkthdr.csum_flags & CSUM_TSO)
6613 printf("CSUM_TSO ");
6614 if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED)
6615 printf("CSUM_IP_CHECKED ");
6616 if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID)
6617 printf("CSUM_IP_VALID ");
6618 if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID)
6619 printf("CSUM_DATA_VALID ");
6620 printf(")\n");
6621 }
6622
6623 if (mp->m_flags & M_EXT) {
6624 val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
6625 val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
6626 BCE_PRINTF("- m_ext: vaddr = 0x%08X:%08X, ext_size = %d, type = ",
6627 val_hi, val_lo, mp->m_ext.ext_size);
6628 switch (mp->m_ext.ext_type) {
6629 case EXT_CLUSTER: printf("EXT_CLUSTER\n"); break;
6630 case EXT_SFBUF: printf("EXT_SFBUF\n"); break;
6631 case EXT_JUMBO9: printf("EXT_JUMBO9\n"); break;
6632 case EXT_JUMBO16: printf("EXT_JUMBO16\n"); break;
6633 case EXT_PACKET: printf("EXT_PACKET\n"); break;
6634 case EXT_MBUF: printf("EXT_MBUF\n"); break;
6635 case EXT_NET_DRV: printf("EXT_NET_DRV\n"); break;
6636 case EXT_MOD_TYPE: printf("EXT_MDD_TYPE\n"); break;
6637 case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break;
6638 case EXT_EXTREF: printf("EXT_EXTREF\n"); break;
6639 default: printf("UNKNOWN\n");
6640 }
6641 }
6642
6643 mp = mp->m_next;
6644 }
6645 }
6646
6647
6648 /****************************************************************************/
6649 /* Prints out the mbufs in the TX mbuf chain. */
6650 /* */
6651 /* Returns: */
6652 /* Nothing. */
6653 /****************************************************************************/
6654 static void
6655 bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6656 {
6657 struct mbuf *m;
6658
6659 BCE_PRINTF(
6660 "----------------------------"
6661 " tx mbuf data "
6662 "----------------------------\n");
6663
6664 for (int i = 0; i < count; i++) {
6665 m = sc->tx_mbuf_ptr[chain_prod];
6666 BCE_PRINTF("txmbuf[%d]\n", chain_prod);
6667 bce_dump_mbuf(sc, m);
6668 chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
6669 }
6670
6671 BCE_PRINTF(
6672 "----------------------------"
6673 "----------------"
6674 "----------------------------\n");
6675 }
6676
6677
6678 /****************************************************************************/
6679 /* Prints out the mbufs in the RX mbuf chain. */
6680 /* */
6681 /* Returns: */
6682 /* Nothing. */
6683 /****************************************************************************/
6684 static void
6685 bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
6686 {
6687 struct mbuf *m;
6688
6689 BCE_PRINTF(
6690 "----------------------------"
6691 " rx mbuf data "
6692 "----------------------------\n");
6693
6694 for (int i = 0; i < count; i++) {
6695 m = sc->rx_mbuf_ptr[chain_prod];
6696 BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
6697 bce_dump_mbuf(sc, m);
6698 chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
6699 }
6700
6701
6702 BCE_PRINTF(
6703 "----------------------------"
6704 "----------------"
6705 "----------------------------\n");
6706 }
6707
6708
6709 /****************************************************************************/
6710 /* Prints out a tx_bd structure. */
6711 /* */
6712 /* Returns: */
6713 /* Nothing. */
6714 /****************************************************************************/
6715 static void
6716 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
6717 {
6718 if (idx > MAX_TX_BD)
6719 /* Index out of range. */
6720 BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
6721 else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
6722 /* TX Chain page pointer. */
6723 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6724 idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
6725 else {
6726 /* Normal tx_bd entry. */
6727 BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6728 "vlan tag= 0x%04X, flags = 0x%04X (", idx,
6729 txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
6730 txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
6731 txbd->tx_bd_flags);
6732
6733 if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
6734 printf(" CONN_FAULT");
6735
6736 if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
6737 printf(" TCP_UDP_CKSUM");
6738
6739 if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
6740 printf(" IP_CKSUM");
6741
6742 if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
6743 printf(" VLAN");
6744
6745 if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
6746 printf(" COAL_NOW");
6747
6748 if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
6749 printf(" DONT_GEN_CRC");
6750
6751 if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
6752 printf(" START");
6753
6754 if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
6755 printf(" END");
6756
6757 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
6758 printf(" LSO");
6759
6760 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
6761 printf(" OPTION_WORD");
6762
6763 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
6764 printf(" FLAGS");
6765
6766 if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
6767 printf(" SNAP");
6768
6769 printf(" )\n");
6770 }
6771
6772 }
6773
6774
6775 /****************************************************************************/
6776 /* Prints out a rx_bd structure. */
6777 /* */
6778 /* Returns: */
6779 /* Nothing. */
6780 /****************************************************************************/
6781 static void
6782 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
6783 {
6784 if (idx > MAX_RX_BD)
6785 /* Index out of range. */
6786 BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
6787 else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
6788 /* TX Chain page pointer. */
6789 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
6790 idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
6791 else
6792 /* Normal tx_bd entry. */
6793 BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
6794 "flags = 0x%08X\n", idx,
6795 rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
6796 rxbd->rx_bd_len, rxbd->rx_bd_flags);
6797 }
6798
6799
6800 /****************************************************************************/
6801 /* Prints out a l2_fhdr structure. */
6802 /* */
6803 /* Returns: */
6804 /* Nothing. */
6805 /****************************************************************************/
6806 static void
6807 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
6808 {
6809 BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%08X, "
6810 "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
6811 "tcp_udp_xsum = 0x%04X\n", idx,
6812 l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
6813 l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
6814 l2fhdr->l2_fhdr_tcp_udp_xsum);
6815 }
6816
6817
6818 /****************************************************************************/
6819 /* Prints out the TX chain. */
6820 /* */
6821 /* Returns: */
6822 /* Nothing. */
6823 /****************************************************************************/
6824 static void
6825 bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
6826 {
6827 struct tx_bd *txbd;
6828
6829 /* First some info about the tx_bd chain structure. */
6830 BCE_PRINTF(
6831 "----------------------------"
6832 " tx_bd chain "
6833 "----------------------------\n");
6834
6835 BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n",
6836 (u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
6837
6838 BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
6839 (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
6840
6841 BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD);
6842
6843 BCE_PRINTF(
6844 "----------------------------"
6845 " tx_bd data "
6846 "----------------------------\n");
6847
6848 /* Now print out the tx_bd's themselves. */
6849 for (int i = 0; i < count; i++) {
6850 txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
6851 bce_dump_txbd(sc, tx_prod, txbd);
6852 tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
6853 }
6854
6855 BCE_PRINTF(
6856 "----------------------------"
6857 "----------------"
6858 "----------------------------\n");
6859 }
6860
6861
6862 /****************************************************************************/
6863 /* Prints out the RX chain. */
6864 /* */
6865 /* Returns: */
6866 /* Nothing. */
6867 /****************************************************************************/
6868 static void
6869 bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
6870 {
6871 struct rx_bd *rxbd;
6872
6873 /* First some info about the tx_bd chain structure. */
6874 BCE_PRINTF(
6875 "----------------------------"
6876 " rx_bd chain "
6877 "----------------------------\n");
6878
6879 BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n",
6880 (u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
6881
6882 BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
6883 (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
6884
6885 BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD);
6886
6887 BCE_PRINTF(
6888 "----------------------------"
6889 " rx_bd data "
6890 "----------------------------\n");
6891
6892 /* Now print out the rx_bd's themselves. */
6893 for (int i = 0; i < count; i++) {
6894 rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
6895 bce_dump_rxbd(sc, rx_prod, rxbd);
6896 rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
6897 }
6898
6899 BCE_PRINTF(
6900 "----------------------------"
6901 "----------------"
6902 "----------------------------\n");
6903 }
6904
6905
6906 /****************************************************************************/
6907 /* Prints out the status block from host memory. */
6908 /* */
6909 /* Returns: */
6910 /* Nothing. */
6911 /****************************************************************************/
6912 static void
6913 bce_dump_status_block(struct bce_softc *sc)
6914 {
6915 struct status_block *sblk;
6916
6917 sblk = sc->status_block;
6918
6919 BCE_PRINTF(
6920 "----------------------------"
6921 " Status Block "
6922 "----------------------------\n");
6923
6924 BCE_PRINTF(" 0x%08X - attn_bits\n",
6925 sblk->status_attn_bits);
6926
6927 BCE_PRINTF(" 0x%08X - attn_bits_ack\n",
6928 sblk->status_attn_bits_ack);
6929
6930 BCE_PRINTF("0x%04X(0x%04X) - rx_cons0\n",
6931 sblk->status_rx_quick_consumer_index0,
6932 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
6933
6934 BCE_PRINTF("0x%04X(0x%04X) - tx_cons0\n",
6935 sblk->status_tx_quick_consumer_index0,
6936 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
6937
6938 BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx);
6939
6940 /* Theses indices are not used for normal L2 drivers. */
6941 if (sblk->status_rx_quick_consumer_index1)
6942 BCE_PRINTF("0x%04X(0x%04X) - rx_cons1\n",
6943 sblk->status_rx_quick_consumer_index1,
6944 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
6945
6946 if (sblk->status_tx_quick_consumer_index1)
6947 BCE_PRINTF("0x%04X(0x%04X) - tx_cons1\n",
6948 sblk->status_tx_quick_consumer_index1,
6949 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
6950
6951 if (sblk->status_rx_quick_consumer_index2)
6952 BCE_PRINTF("0x%04X(0x%04X)- rx_cons2\n",
6953 sblk->status_rx_quick_consumer_index2,
6954 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
6955
6956 if (sblk->status_tx_quick_consumer_index2)
6957 BCE_PRINTF("0x%04X(0x%04X) - tx_cons2\n",
6958 sblk->status_tx_quick_consumer_index2,
6959 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
6960
6961 if (sblk->status_rx_quick_consumer_index3)
6962 BCE_PRINTF("0x%04X(0x%04X) - rx_cons3\n",
6963 sblk->status_rx_quick_consumer_index3,
6964 (u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
6965
6966 if (sblk->status_tx_quick_consumer_index3)
6967 BCE_PRINTF("0x%04X(0x%04X) - tx_cons3\n",
6968 sblk->status_tx_quick_consumer_index3,
6969 (u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
6970
6971 if (sblk->status_rx_quick_consumer_index4 ||
6972 sblk->status_rx_quick_consumer_index5)
6973 BCE_PRINTF("rx_cons4 = 0x%08X, rx_cons5 = 0x%08X\n",
6974 sblk->status_rx_quick_consumer_index4,
6975 sblk->status_rx_quick_consumer_index5);
6976
6977 if (sblk->status_rx_quick_consumer_index6 ||
6978 sblk->status_rx_quick_consumer_index7)
6979 BCE_PRINTF("rx_cons6 = 0x%08X, rx_cons7 = 0x%08X\n",
6980 sblk->status_rx_quick_consumer_index6,
6981 sblk->status_rx_quick_consumer_index7);
6982
6983 if (sblk->status_rx_quick_consumer_index8 ||
6984 sblk->status_rx_quick_consumer_index9)
6985 BCE_PRINTF("rx_cons8 = 0x%08X, rx_cons9 = 0x%08X\n",
6986 sblk->status_rx_quick_consumer_index8,
6987 sblk->status_rx_quick_consumer_index9);
6988
6989 if (sblk->status_rx_quick_consumer_index10 ||
6990 sblk->status_rx_quick_consumer_index11)
6991 BCE_PRINTF("rx_cons10 = 0x%08X, rx_cons11 = 0x%08X\n",
6992 sblk->status_rx_quick_consumer_index10,
6993 sblk->status_rx_quick_consumer_index11);
6994
6995 if (sblk->status_rx_quick_consumer_index12 ||
6996 sblk->status_rx_quick_consumer_index13)
6997 BCE_PRINTF("rx_cons12 = 0x%08X, rx_cons13 = 0x%08X\n",
6998 sblk->status_rx_quick_consumer_index12,
6999 sblk->status_rx_quick_consumer_index13);
7000
7001 if (sblk->status_rx_quick_consumer_index14 ||
7002 sblk->status_rx_quick_consumer_index15)
7003 BCE_PRINTF("rx_cons14 = 0x%08X, rx_cons15 = 0x%08X\n",
7004 sblk->status_rx_quick_consumer_index14,
7005 sblk->status_rx_quick_consumer_index15);
7006
7007 if (sblk->status_completion_producer_index ||
7008 sblk->status_cmd_consumer_index)
7009 BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n",
7010 sblk->status_completion_producer_index,
7011 sblk->status_cmd_consumer_index);
7012
7013 BCE_PRINTF(
7014 "----------------------------"
7015 "----------------"
7016 "----------------------------\n");
7017 }
7018
7019
7020 /****************************************************************************/
7021 /* Prints out the statistics block from host memory. */
7022 /* */
7023 /* Returns: */
7024 /* Nothing. */
7025 /****************************************************************************/
7026 static void
7027 bce_dump_stats_block(struct bce_softc *sc)
7028 {
7029 struct statistics_block *sblk;
7030
7031 sblk = sc->stats_block;
7032
7033 BCE_PRINTF(
7034 "---------------"
7035 " Stats Block (All Stats Not Shown Are 0) "
7036 "---------------\n");
7037
7038 if (sblk->stat_IfHCInOctets_hi
7039 || sblk->stat_IfHCInOctets_lo)
7040 BCE_PRINTF("0x%08X:%08X : "
7041 "IfHcInOctets\n",
7042 sblk->stat_IfHCInOctets_hi,
7043 sblk->stat_IfHCInOctets_lo);
7044
7045 if (sblk->stat_IfHCInBadOctets_hi
7046 || sblk->stat_IfHCInBadOctets_lo)
7047 BCE_PRINTF("0x%08X:%08X : "
7048 "IfHcInBadOctets\n",
7049 sblk->stat_IfHCInBadOctets_hi,
7050 sblk->stat_IfHCInBadOctets_lo);
7051
7052 if (sblk->stat_IfHCOutOctets_hi
7053 || sblk->stat_IfHCOutOctets_lo)
7054 BCE_PRINTF("0x%08X:%08X : "
7055 "IfHcOutOctets\n",
7056 sblk->stat_IfHCOutOctets_hi,
7057 sblk->stat_IfHCOutOctets_lo);
7058
7059 if (sblk->stat_IfHCOutBadOctets_hi
7060 || sblk->stat_IfHCOutBadOctets_lo)
7061 BCE_PRINTF("0x%08X:%08X : "
7062 "IfHcOutBadOctets\n",
7063 sblk->stat_IfHCOutBadOctets_hi,
7064 sblk->stat_IfHCOutBadOctets_lo);
7065
7066 if (sblk->stat_IfHCInUcastPkts_hi
7067 || sblk->stat_IfHCInUcastPkts_lo)
7068 BCE_PRINTF("0x%08X:%08X : "
7069 "IfHcInUcastPkts\n",
7070 sblk->stat_IfHCInUcastPkts_hi,
7071 sblk->stat_IfHCInUcastPkts_lo);
7072
7073 if (sblk->stat_IfHCInBroadcastPkts_hi
7074 || sblk->stat_IfHCInBroadcastPkts_lo)
7075 BCE_PRINTF("0x%08X:%08X : "
7076 "IfHcInBroadcastPkts\n",
7077 sblk->stat_IfHCInBroadcastPkts_hi,
7078 sblk->stat_IfHCInBroadcastPkts_lo);
7079
7080 if (sblk->stat_IfHCInMulticastPkts_hi
7081 || sblk->stat_IfHCInMulticastPkts_lo)
7082 BCE_PRINTF("0x%08X:%08X : "
7083 "IfHcInMulticastPkts\n",
7084 sblk->stat_IfHCInMulticastPkts_hi,
7085 sblk->stat_IfHCInMulticastPkts_lo);
7086
7087 if (sblk->stat_IfHCOutUcastPkts_hi
7088 || sblk->stat_IfHCOutUcastPkts_lo)
7089 BCE_PRINTF("0x%08X:%08X : "
7090 "IfHcOutUcastPkts\n",
7091 sblk->stat_IfHCOutUcastPkts_hi,
7092 sblk->stat_IfHCOutUcastPkts_lo);
7093
7094 if (sblk->stat_IfHCOutBroadcastPkts_hi
7095 || sblk->stat_IfHCOutBroadcastPkts_lo)
7096 BCE_PRINTF("0x%08X:%08X : "
7097 "IfHcOutBroadcastPkts\n",
7098 sblk->stat_IfHCOutBroadcastPkts_hi,
7099 sblk->stat_IfHCOutBroadcastPkts_lo);
7100
7101 if (sblk->stat_IfHCOutMulticastPkts_hi
7102 || sblk->stat_IfHCOutMulticastPkts_lo)
7103 BCE_PRINTF("0x%08X:%08X : "
7104 "IfHcOutMulticastPkts\n",
7105 sblk->stat_IfHCOutMulticastPkts_hi,
7106 sblk->stat_IfHCOutMulticastPkts_lo);
7107
7108 if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
7109 BCE_PRINTF(" 0x%08X : "
7110 "emac_tx_stat_dot3statsinternalmactransmiterrors\n",
7111 sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
7112
7113 if (sblk->stat_Dot3StatsCarrierSenseErrors)
7114 BCE_PRINTF(" 0x%08X : Dot3StatsCarrierSenseErrors\n",
7115 sblk->stat_Dot3StatsCarrierSenseErrors);
7116
7117 if (sblk->stat_Dot3StatsFCSErrors)
7118 BCE_PRINTF(" 0x%08X : Dot3StatsFCSErrors\n",
7119 sblk->stat_Dot3StatsFCSErrors);
7120
7121 if (sblk->stat_Dot3StatsAlignmentErrors)
7122 BCE_PRINTF(" 0x%08X : Dot3StatsAlignmentErrors\n",
7123 sblk->stat_Dot3StatsAlignmentErrors);
7124
7125 if (sblk->stat_Dot3StatsSingleCollisionFrames)
7126 BCE_PRINTF(" 0x%08X : Dot3StatsSingleCollisionFrames\n",
7127 sblk->stat_Dot3StatsSingleCollisionFrames);
7128
7129 if (sblk->stat_Dot3StatsMultipleCollisionFrames)
7130 BCE_PRINTF(" 0x%08X : Dot3StatsMultipleCollisionFrames\n",
7131 sblk->stat_Dot3StatsMultipleCollisionFrames);
7132
7133 if (sblk->stat_Dot3StatsDeferredTransmissions)
7134 BCE_PRINTF(" 0x%08X : Dot3StatsDeferredTransmissions\n",
7135 sblk->stat_Dot3StatsDeferredTransmissions);
7136
7137 if (sblk->stat_Dot3StatsExcessiveCollisions)
7138 BCE_PRINTF(" 0x%08X : Dot3StatsExcessiveCollisions\n",
7139 sblk->stat_Dot3StatsExcessiveCollisions);
7140
7141 if (sblk->stat_Dot3StatsLateCollisions)
7142 BCE_PRINTF(" 0x%08X : Dot3StatsLateCollisions\n",
7143 sblk->stat_Dot3StatsLateCollisions);
7144
7145 if (sblk->stat_EtherStatsCollisions)
7146 BCE_PRINTF(" 0x%08X : EtherStatsCollisions\n",
7147 sblk->stat_EtherStatsCollisions);
7148
7149 if (sblk->stat_EtherStatsFragments)
7150 BCE_PRINTF(" 0x%08X : EtherStatsFragments\n",
7151 sblk->stat_EtherStatsFragments);
7152
7153 if (sblk->stat_EtherStatsJabbers)
7154 BCE_PRINTF(" 0x%08X : EtherStatsJabbers\n",
7155 sblk->stat_EtherStatsJabbers);
7156
7157 if (sblk->stat_EtherStatsUndersizePkts)
7158 BCE_PRINTF(" 0x%08X : EtherStatsUndersizePkts\n",
7159 sblk->stat_EtherStatsUndersizePkts);
7160
7161 if (sblk->stat_EtherStatsOverrsizePkts)
7162 BCE_PRINTF(" 0x%08X : EtherStatsOverrsizePkts\n",
7163 sblk->stat_EtherStatsOverrsizePkts);
7164
7165 if (sblk->stat_EtherStatsPktsRx64Octets)
7166 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx64Octets\n",
7167 sblk->stat_EtherStatsPktsRx64Octets);
7168
7169 if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
7170 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
7171 sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
7172
7173 if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
7174 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
7175 sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
7176
7177 if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
7178 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
7179 sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
7180
7181 if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
7182 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
7183 sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
7184
7185 if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
7186 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
7187 sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
7188
7189 if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
7190 BCE_PRINTF(" 0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
7191 sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
7192
7193 if (sblk->stat_EtherStatsPktsTx64Octets)
7194 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx64Octets\n",
7195 sblk->stat_EtherStatsPktsTx64Octets);
7196
7197 if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
7198 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
7199 sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
7200
7201 if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
7202 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
7203 sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
7204
7205 if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
7206 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
7207 sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
7208
7209 if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
7210 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
7211 sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
7212
7213 if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
7214 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
7215 sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
7216
7217 if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
7218 BCE_PRINTF(" 0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
7219 sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
7220
7221 if (sblk->stat_XonPauseFramesReceived)
7222 BCE_PRINTF(" 0x%08X : XonPauseFramesReceived\n",
7223 sblk->stat_XonPauseFramesReceived);
7224
7225 if (sblk->stat_XoffPauseFramesReceived)
7226 BCE_PRINTF(" 0x%08X : XoffPauseFramesReceived\n",
7227 sblk->stat_XoffPauseFramesReceived);
7228
7229 if (sblk->stat_OutXonSent)
7230 BCE_PRINTF(" 0x%08X : OutXonSent\n",
7231 sblk->stat_OutXonSent);
7232
7233 if (sblk->stat_OutXoffSent)
7234 BCE_PRINTF(" 0x%08X : OutXoffSent\n",
7235 sblk->stat_OutXoffSent);
7236
7237 if (sblk->stat_FlowControlDone)
7238 BCE_PRINTF(" 0x%08X : FlowControlDone\n",
7239 sblk->stat_FlowControlDone);
7240
7241 if (sblk->stat_MacControlFramesReceived)
7242 BCE_PRINTF(" 0x%08X : MacControlFramesReceived\n",
7243 sblk->stat_MacControlFramesReceived);
7244
7245 if (sblk->stat_XoffStateEntered)
7246 BCE_PRINTF(" 0x%08X : XoffStateEntered\n",
7247 sblk->stat_XoffStateEntered);
7248
7249 if (sblk->stat_IfInFramesL2FilterDiscards)
7250 BCE_PRINTF(" 0x%08X : IfInFramesL2FilterDiscards\n",
7251 sblk->stat_IfInFramesL2FilterDiscards);
7252
7253 if (sblk->stat_IfInRuleCheckerDiscards)
7254 BCE_PRINTF(" 0x%08X : IfInRuleCheckerDiscards\n",
7255 sblk->stat_IfInRuleCheckerDiscards);
7256
7257 if (sblk->stat_IfInFTQDiscards)
7258 BCE_PRINTF(" 0x%08X : IfInFTQDiscards\n",
7259 sblk->stat_IfInFTQDiscards);
7260
7261 if (sblk->stat_IfInMBUFDiscards)
7262 BCE_PRINTF(" 0x%08X : IfInMBUFDiscards\n",
7263 sblk->stat_IfInMBUFDiscards);
7264
7265 if (sblk->stat_IfInRuleCheckerP4Hit)
7266 BCE_PRINTF(" 0x%08X : IfInRuleCheckerP4Hit\n",
7267 sblk->stat_IfInRuleCheckerP4Hit);
7268
7269 if (sblk->stat_CatchupInRuleCheckerDiscards)
7270 BCE_PRINTF(" 0x%08X : CatchupInRuleCheckerDiscards\n",
7271 sblk->stat_CatchupInRuleCheckerDiscards);
7272
7273 if (sblk->stat_CatchupInFTQDiscards)
7274 BCE_PRINTF(" 0x%08X : CatchupInFTQDiscards\n",
7275 sblk->stat_CatchupInFTQDiscards);
7276
7277 if (sblk->stat_CatchupInMBUFDiscards)
7278 BCE_PRINTF(" 0x%08X : CatchupInMBUFDiscards\n",
7279 sblk->stat_CatchupInMBUFDiscards);
7280
7281 if (sblk->stat_CatchupInRuleCheckerP4Hit)
7282 BCE_PRINTF(" 0x%08X : CatchupInRuleCheckerP4Hit\n",
7283 sblk->stat_CatchupInRuleCheckerP4Hit);
7284
7285 BCE_PRINTF(
7286 "----------------------------"
7287 "----------------"
7288 "----------------------------\n");
7289 }
7290
7291
7292 /****************************************************************************/
7293 /* Prints out a summary of the driver state. */
7294 /* */
7295 /* Returns: */
7296 /* Nothing. */
7297 /****************************************************************************/
7298 static void
7299 bce_dump_driver_state(struct bce_softc *sc)
7300 {
7301 u32 val_hi, val_lo;
7302
7303 BCE_PRINTF(
7304 "-----------------------------"
7305 " Driver State "
7306 "-----------------------------\n");
7307
7308 val_hi = BCE_ADDR_HI(sc);
7309 val_lo = BCE_ADDR_LO(sc);
7310 BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual address\n",
7311 val_hi, val_lo);
7312
7313 val_hi = BCE_ADDR_HI(sc->bce_vhandle);
7314 val_lo = BCE_ADDR_LO(sc->bce_vhandle);
7315 BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
7316 val_hi, val_lo);
7317
7318 val_hi = BCE_ADDR_HI(sc->status_block);
7319 val_lo = BCE_ADDR_LO(sc->status_block);
7320 BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block virtual address\n",
7321 val_hi, val_lo);
7322
7323 val_hi = BCE_ADDR_HI(sc->stats_block);
7324 val_lo = BCE_ADDR_LO(sc->stats_block);
7325 BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
7326 val_hi, val_lo);
7327
7328 val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
7329 val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
7330 BCE_PRINTF(
7331 "0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
7332 val_hi, val_lo);
7333
7334 val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
7335 val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
7336 BCE_PRINTF(
7337 "0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
7338 val_hi, val_lo);
7339
7340 val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
7341 val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
7342 BCE_PRINTF(
7343 "0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
7344 val_hi, val_lo);
7345
7346 val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
7347 val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
7348 BCE_PRINTF(
7349 "0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
7350 val_hi, val_lo);
7351
7352 BCE_PRINTF(" 0x%08X - (sc->interrupts_generated) h/w intrs\n",
7353 sc->interrupts_generated);
7354
7355 BCE_PRINTF(" 0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
7356 sc->rx_interrupts);
7357
7358 BCE_PRINTF(" 0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
7359 sc->tx_interrupts);
7360
7361 BCE_PRINTF(" 0x%08X - (sc->last_status_idx) status block index\n",
7362 sc->last_status_idx);
7363
7364 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer index\n",
7365 sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
7366
7367 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer index\n",
7368 sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
7369
7370 BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
7371 sc->tx_prod_bseq);
7372
7373 BCE_PRINTF(" 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
7374 sc->tx_mbuf_alloc);
7375
7376 BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
7377 sc->used_tx_bd);
7378
7379 BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
7380 sc->tx_hi_watermark, sc->max_tx_bd);
7381
7382 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer index\n",
7383 sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
7384
7385 BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer index\n",
7386 sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
7387
7388 BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
7389 sc->rx_prod_bseq);
7390
7391 BCE_PRINTF(" 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
7392 sc->rx_mbuf_alloc);
7393
7394 BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free rx_bd's\n",
7395 sc->free_rx_bd);
7396
7397 BCE_PRINTF("0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
7398 sc->rx_low_watermark, sc->max_rx_bd);
7399
7400 BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed) "
7401 "mbuf alloc failures\n",
7402 sc->mbuf_alloc_failed);
7403
7404 BCE_PRINTF(" 0x%08X - (sc->mbuf_sim_alloc_failed) "
7405 "simulated mbuf alloc failures\n",
7406 sc->mbuf_sim_alloc_failed);
7407
7408 BCE_PRINTF(
7409 "----------------------------"
7410 "----------------"
7411 "----------------------------\n");
7412 }
7413
7414
7415 /****************************************************************************/
7416 /* Prints out the hardware state through a summary of important register, */
7417 /* followed by a complete register dump. */
7418 /* */
7419 /* Returns: */
7420 /* Nothing. */
7421 /****************************************************************************/
7422 static void
7423 bce_dump_hw_state(struct bce_softc *sc)
7424 {
7425 u32 val1;
7426
7427 BCE_PRINTF(
7428 "----------------------------"
7429 " Hardware State "
7430 "----------------------------\n");
7431
7432 BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
7433
7434 val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
7435 BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
7436 val1, BCE_MISC_ENABLE_STATUS_BITS);
7437
7438 val1 = REG_RD(sc, BCE_DMA_STATUS);
7439 BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val1, BCE_DMA_STATUS);
7440
7441 val1 = REG_RD(sc, BCE_CTX_STATUS);
7442 BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val1, BCE_CTX_STATUS);
7443
7444 val1 = REG_RD(sc, BCE_EMAC_STATUS);
7445 BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val1, BCE_EMAC_STATUS);
7446
7447 val1 = REG_RD(sc, BCE_RPM_STATUS);
7448 BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val1, BCE_RPM_STATUS);
7449
7450 val1 = REG_RD(sc, BCE_TBDR_STATUS);
7451 BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val1, BCE_TBDR_STATUS);
7452
7453 val1 = REG_RD(sc, BCE_TDMA_STATUS);
7454 BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val1, BCE_TDMA_STATUS);
7455
7456 val1 = REG_RD(sc, BCE_HC_STATUS);
7457 BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS);
7458
7459 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
7460 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val1, BCE_TXP_CPU_STATE);
7461
7462 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
7463 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val1, BCE_TPAT_CPU_STATE);
7464
7465 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
7466 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val1, BCE_RXP_CPU_STATE);
7467
7468 val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE);
7469 BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val1, BCE_COM_CPU_STATE);
7470
7471 val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
7472 BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val1, BCE_MCP_CPU_STATE);
7473
7474 val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE);
7475 BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val1, BCE_CP_CPU_STATE);
7476
7477 BCE_PRINTF(
7478 "----------------------------"
7479 "----------------"
7480 "----------------------------\n");
7481
7482 BCE_PRINTF(
7483 "----------------------------"
7484 " Register Dump "
7485 "----------------------------\n");
7486
7487 for (int i = 0x400; i < 0x8000; i += 0x10)
7488 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
7489 i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
7490 REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
7491
7492 BCE_PRINTF(
7493 "----------------------------"
7494 "----------------"
7495 "----------------------------\n");
7496 }
7497
7498
7499 /****************************************************************************/
7500 /* Prints out the bootcode state. */
7501 /* */
7502 /* Returns: */
7503 /* Nothing. */
7504 /****************************************************************************/
7505 static void
7506 bce_dump_bc_state(struct bce_softc *sc)
7507 {
7508 u32 val;
7509
7510 BCE_PRINTF(
7511 "----------------------------"
7512 " Bootcode State "
7513 "----------------------------\n");
7514
7515 BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
7516
7517 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_RESET_TYPE);
7518 BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
7519 val, BCE_BC_RESET_TYPE);
7520
7521 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE);
7522 BCE_PRINTF("0x%08X - (0x%06X) state\n",
7523 val, BCE_BC_STATE);
7524
7525 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_CONDITION);
7526 BCE_PRINTF("0x%08X - (0x%06X) condition\n",
7527 val, BCE_BC_CONDITION);
7528
7529 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE_DEBUG_CMD);
7530 BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
7531 val, BCE_BC_STATE_DEBUG_CMD);
7532
7533 BCE_PRINTF(
7534 "----------------------------"
7535 "----------------"
7536 "----------------------------\n");
7537 }
7538
7539
7540 /****************************************************************************/
7541 /* Prints out the TXP state. */
7542 /* */
7543 /* Returns: */
7544 /* Nothing. */
7545 /****************************************************************************/
7546 static void
7547 bce_dump_txp_state(struct bce_softc *sc)
7548 {
7549 u32 val1;
7550
7551 BCE_PRINTF(
7552 "----------------------------"
7553 " TXP State "
7554 "----------------------------\n");
7555
7556 val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
7557 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val1, BCE_TXP_CPU_MODE);
7558
7559 val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
7560 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val1, BCE_TXP_CPU_STATE);
7561
7562 val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
7563 BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val1, BCE_TXP_CPU_EVENT_MASK);
7564
7565 BCE_PRINTF(
7566 "----------------------------"
7567 " Register Dump "
7568 "----------------------------\n");
7569
7570 for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
7571 /* Skip the big blank spaces */
7572 if (i < 0x454000 && i > 0x5ffff)
7573 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
7574 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
7575 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
7576 }
7577
7578 BCE_PRINTF(
7579 "----------------------------"
7580 "----------------"
7581 "----------------------------\n");
7582 }
7583
7584
7585 /****************************************************************************/
7586 /* Prints out the RXP state. */
7587 /* */
7588 /* Returns: */
7589 /* Nothing. */
7590 /****************************************************************************/
7591 static void
7592 bce_dump_rxp_state(struct bce_softc *sc)
7593 {
7594 u32 val1;
7595
7596 BCE_PRINTF(
7597 "----------------------------"
7598 " RXP State "
7599 "----------------------------\n");
7600
7601 val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
7602 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val1, BCE_RXP_CPU_MODE);
7603
7604 val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
7605 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val1, BCE_RXP_CPU_STATE);
7606
7607 val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
7608 BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val1, BCE_RXP_CPU_EVENT_MASK);
7609
7610 BCE_PRINTF(
7611 "----------------------------"
7612 " Register Dump "
7613 "----------------------------\n");
7614
7615 for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
7616 /* Skip the big blank sapces */
7617 if (i < 0xc5400 && i > 0xdffff)
7618 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
7619 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
7620 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
7621 }
7622
7623 BCE_PRINTF(
7624 "----------------------------"
7625 "----------------"
7626 "----------------------------\n");
7627 }
7628
7629
7630 /****************************************************************************/
7631 /* Prints out the TPAT state. */
7632 /* */
7633 /* Returns: */
7634 /* Nothing. */
7635 /****************************************************************************/
7636 static void
7637 bce_dump_tpat_state(struct bce_softc *sc)
7638 {
7639 u32 val1;
7640
7641 BCE_PRINTF(
7642 "----------------------------"
7643 " TPAT State "
7644 "----------------------------\n");
7645
7646 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
7647 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val1, BCE_TPAT_CPU_MODE);
7648
7649 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
7650 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val1, BCE_TPAT_CPU_STATE);
7651
7652 val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
7653 BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val1, BCE_TPAT_CPU_EVENT_MASK);
7654
7655 BCE_PRINTF(
7656 "----------------------------"
7657 " Register Dump "
7658 "----------------------------\n");
7659
7660 for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
7661 /* Skip the big blank spaces */
7662 if (i < 0x854000 && i > 0x9ffff)
7663 BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
7664 i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
7665 REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
7666 }
7667
7668 BCE_PRINTF(
7669 "----------------------------"
7670 "----------------"
7671 "----------------------------\n");
7672 }
7673
7674
7675 /****************************************************************************/
7676 /* Prints out the driver state and then enters the debugger. */
7677 /* */
7678 /* Returns: */
7679 /* Nothing. */
7680 /****************************************************************************/
7681 static void
7682 bce_breakpoint(struct bce_softc *sc)
7683 {
7684
7685 /* Unreachable code to shut the compiler up about unused functions. */
7686 if (0) {
7687 bce_freeze_controller(sc);
7688 bce_unfreeze_controller(sc);
7689 bce_dump_txbd(sc, 0, NULL);
7690 bce_dump_rxbd(sc, 0, NULL);
7691 bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
7692 bce_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
7693 bce_dump_l2fhdr(sc, 0, NULL);
7694 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
7695 bce_dump_rx_chain(sc, 0, sc->max_rx_bd);
7696 bce_dump_status_block(sc);
7697 bce_dump_stats_block(sc);
7698 bce_dump_driver_state(sc);
7699 bce_dump_hw_state(sc);
7700 bce_dump_bc_state(sc);
7701 bce_dump_txp_state(sc);
7702 bce_dump_rxp_state(sc);
7703 bce_dump_tpat_state(sc);
7704 }
7705
7706 /* bce_freeze_controller(sc); */
7707 bce_dump_driver_state(sc);
7708 bce_dump_status_block(sc);
7709 bce_dump_tx_chain(sc, 0, TOTAL_TX_BD);
7710 bce_dump_hw_state(sc);
7711 bce_dump_txp_state(sc);
7712 /* bce_unfreeze_controller(sc); */
7713
7714 /* Call the debugger. */
7715 breakpoint();
7716
7717 return;
7718 }
7719 #endif
7720
Cache object: e7677fe36688b6c040850499153aa651
|