FreeBSD/Linux Kernel Cross Reference
sys/dev/bge/if_bge.c
1 /*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/8.0/sys/dev/bge/if_bge.c 198296 2009-10-20 16:41:23Z stas $");
36
37 /*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
83
84 #include <net/if.h>
85 #include <net/if_arp.h>
86 #include <net/ethernet.h>
87 #include <net/if_dl.h>
88 #include <net/if_media.h>
89
90 #include <net/bpf.h>
91
92 #include <net/if_types.h>
93 #include <net/if_vlan_var.h>
94
95 #include <netinet/in_systm.h>
96 #include <netinet/in.h>
97 #include <netinet/ip.h>
98
99 #include <machine/bus.h>
100 #include <machine/resource.h>
101 #include <sys/bus.h>
102 #include <sys/rman.h>
103
104 #include <dev/mii/mii.h>
105 #include <dev/mii/miivar.h>
106 #include "miidevs.h"
107 #include <dev/mii/brgphyreg.h>
108
109 #ifdef __sparc64__
110 #include <dev/ofw/ofw_bus.h>
111 #include <dev/ofw/openfirm.h>
112 #include <machine/ofw_machdep.h>
113 #include <machine/ver.h>
114 #endif
115
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118
119 #include <dev/bge/if_bgereg.h>
120
121 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
122 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
123
124 MODULE_DEPEND(bge, pci, 1, 1, 1);
125 MODULE_DEPEND(bge, ether, 1, 1, 1);
126 MODULE_DEPEND(bge, miibus, 1, 1, 1);
127
128 /* "device miibus" required. See GENERIC if you get errors here. */
129 #include "miibus_if.h"
130
131 /*
132 * Various supported device vendors/types and their names. Note: the
133 * spec seems to indicate that the hardware still has Alteon's vendor
134 * ID burned into it, though it will always be overriden by the vendor
135 * ID in the EEPROM. Just to be safe, we cover all possibilities.
136 */
137 static const struct bge_type {
138 uint16_t bge_vid;
139 uint16_t bge_did;
140 } bge_devs[] = {
141 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
142 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
143
144 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
145 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
147
148 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
149
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
201
202 { SK_VENDORID, SK_DEVICEID_ALTIMA },
203
204 { TC_VENDORID, TC_DEVICEID_3C996 },
205
206 { 0, 0 }
207 };
208
209 static const struct bge_vendor {
210 uint16_t v_id;
211 const char *v_name;
212 } bge_vendors[] = {
213 { ALTEON_VENDORID, "Alteon" },
214 { ALTIMA_VENDORID, "Altima" },
215 { APPLE_VENDORID, "Apple" },
216 { BCOM_VENDORID, "Broadcom" },
217 { SK_VENDORID, "SysKonnect" },
218 { TC_VENDORID, "3Com" },
219
220 { 0, NULL }
221 };
222
223 static const struct bge_revision {
224 uint32_t br_chipid;
225 const char *br_name;
226 } bge_revisions[] = {
227 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
228 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
229 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
230 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
231 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
232 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
233 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
234 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
235 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
236 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
237 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
238 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
239 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
240 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
241 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
242 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
243 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
244 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
245 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
246 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
247 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
248 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
249 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
250 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
251 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
252 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
253 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
254 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
255 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
256 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
257 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
258 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
259 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
260 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
261 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
262 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
263 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
264 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
265 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
266 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
267 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
268 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
269 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
270 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
271 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
272 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
273 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
274 /* 5754 and 5787 share the same ASIC ID */
275 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
276 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
277 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
278 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
279 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
280
281 { 0, NULL }
282 };
283
284 /*
285 * Some defaults for major revisions, so that newer steppings
286 * that we don't know about have a shot at working.
287 */
288 static const struct bge_revision bge_majorrevs[] = {
289 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
290 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
291 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
292 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
293 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
294 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
295 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
296 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
297 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
298 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
299 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
300 /* 5754 and 5787 share the same ASIC ID */
301 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
302 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
303
304 { 0, NULL }
305 };
306
307 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
308 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
309 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
310 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
311 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
312
313 const struct bge_revision * bge_lookup_rev(uint32_t);
314 const struct bge_vendor * bge_lookup_vendor(uint16_t);
315
316 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
317
318 static int bge_probe(device_t);
319 static int bge_attach(device_t);
320 static int bge_detach(device_t);
321 static int bge_suspend(device_t);
322 static int bge_resume(device_t);
323 static void bge_release_resources(struct bge_softc *);
324 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
325 static int bge_dma_alloc(device_t);
326 static void bge_dma_free(struct bge_softc *);
327
328 static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
329 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
330 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
331 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
332 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
333
334 static void bge_txeof(struct bge_softc *);
335 static int bge_rxeof(struct bge_softc *);
336
337 static void bge_asf_driver_up (struct bge_softc *);
338 static void bge_tick(void *);
339 static void bge_stats_update(struct bge_softc *);
340 static void bge_stats_update_regs(struct bge_softc *);
341 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
342
343 static void bge_intr(void *);
344 static void bge_start_locked(struct ifnet *);
345 static void bge_start(struct ifnet *);
346 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
347 static void bge_init_locked(struct bge_softc *);
348 static void bge_init(void *);
349 static void bge_stop(struct bge_softc *);
350 static void bge_watchdog(struct bge_softc *);
351 static int bge_shutdown(device_t);
352 static int bge_ifmedia_upd_locked(struct ifnet *);
353 static int bge_ifmedia_upd(struct ifnet *);
354 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
355
356 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
357 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
358
359 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
360 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
361
362 static void bge_setpromisc(struct bge_softc *);
363 static void bge_setmulti(struct bge_softc *);
364 static void bge_setvlan(struct bge_softc *);
365
366 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
367 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
368 static int bge_init_rx_ring_std(struct bge_softc *);
369 static void bge_free_rx_ring_std(struct bge_softc *);
370 static int bge_init_rx_ring_jumbo(struct bge_softc *);
371 static void bge_free_rx_ring_jumbo(struct bge_softc *);
372 static void bge_free_tx_ring(struct bge_softc *);
373 static int bge_init_tx_ring(struct bge_softc *);
374
375 static int bge_chipinit(struct bge_softc *);
376 static int bge_blockinit(struct bge_softc *);
377
378 static int bge_has_eaddr(struct bge_softc *);
379 static uint32_t bge_readmem_ind(struct bge_softc *, int);
380 static void bge_writemem_ind(struct bge_softc *, int, int);
381 static void bge_writembx(struct bge_softc *, int, int);
382 #ifdef notdef
383 static uint32_t bge_readreg_ind(struct bge_softc *, int);
384 #endif
385 static void bge_writemem_direct(struct bge_softc *, int, int);
386 static void bge_writereg_ind(struct bge_softc *, int, int);
387 static void bge_set_max_readrq(struct bge_softc *, int);
388
389 static int bge_miibus_readreg(device_t, int, int);
390 static int bge_miibus_writereg(device_t, int, int, int);
391 static void bge_miibus_statchg(device_t);
392 #ifdef DEVICE_POLLING
393 static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
394 #endif
395
396 #define BGE_RESET_START 1
397 #define BGE_RESET_STOP 2
398 static void bge_sig_post_reset(struct bge_softc *, int);
399 static void bge_sig_legacy(struct bge_softc *, int);
400 static void bge_sig_pre_reset(struct bge_softc *, int);
401 static int bge_reset(struct bge_softc *);
402 static void bge_link_upd(struct bge_softc *);
403
404 /*
405 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
406 * leak information to untrusted users. It is also known to cause alignment
407 * traps on certain architectures.
408 */
409 #ifdef BGE_REGISTER_DEBUG
410 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
411 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
412 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
413 #endif
414 static void bge_add_sysctls(struct bge_softc *);
415 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
416
417 static device_method_t bge_methods[] = {
418 /* Device interface */
419 DEVMETHOD(device_probe, bge_probe),
420 DEVMETHOD(device_attach, bge_attach),
421 DEVMETHOD(device_detach, bge_detach),
422 DEVMETHOD(device_shutdown, bge_shutdown),
423 DEVMETHOD(device_suspend, bge_suspend),
424 DEVMETHOD(device_resume, bge_resume),
425
426 /* bus interface */
427 DEVMETHOD(bus_print_child, bus_generic_print_child),
428 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
429
430 /* MII interface */
431 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
432 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
433 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
434
435 { 0, 0 }
436 };
437
438 static driver_t bge_driver = {
439 "bge",
440 bge_methods,
441 sizeof(struct bge_softc)
442 };
443
444 static devclass_t bge_devclass;
445
446 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
447 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
448
449 static int bge_allow_asf = 0;
450
451 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
452
453 SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
454 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
455 "Allow ASF mode if available");
456
457 #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
458 #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
459 #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
460 #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
461 #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
462
463 static int
464 bge_has_eaddr(struct bge_softc *sc)
465 {
466 #ifdef __sparc64__
467 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
468 device_t dev;
469 uint32_t subvendor;
470
471 dev = sc->bge_dev;
472
473 /*
474 * The on-board BGEs found in sun4u machines aren't fitted with
475 * an EEPROM which means that we have to obtain the MAC address
476 * via OFW and that some tests will always fail. We distinguish
477 * such BGEs by the subvendor ID, which also has to be obtained
478 * from OFW instead of the PCI configuration space as the latter
479 * indicates Broadcom as the subvendor of the netboot interface.
480 * For early Blade 1500 and 2500 we even have to check the OFW
481 * device path as the subvendor ID always defaults to Broadcom
482 * there.
483 */
484 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
485 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
486 subvendor == SUN_VENDORID)
487 return (0);
488 memset(buf, 0, sizeof(buf));
489 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
490 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
491 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
492 return (0);
493 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
494 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
495 return (0);
496 }
497 #endif
498 return (1);
499 }
500
501 static uint32_t
502 bge_readmem_ind(struct bge_softc *sc, int off)
503 {
504 device_t dev;
505 uint32_t val;
506
507 dev = sc->bge_dev;
508
509 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
510 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
511 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
512 return (val);
513 }
514
515 static void
516 bge_writemem_ind(struct bge_softc *sc, int off, int val)
517 {
518 device_t dev;
519
520 dev = sc->bge_dev;
521
522 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
523 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
524 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
525 }
526
527 /*
528 * PCI Express only
529 */
530 static void
531 bge_set_max_readrq(struct bge_softc *sc, int expr_ptr)
532 {
533 device_t dev;
534 uint16_t val;
535
536 KASSERT((sc->bge_flags & BGE_FLAG_PCIE) && expr_ptr != 0,
537 ("%s: not applicable", __func__));
538
539 dev = sc->bge_dev;
540
541 val = pci_read_config(dev, expr_ptr + BGE_PCIE_DEVCTL, 2);
542 if ((val & BGE_PCIE_DEVCTL_MAX_READRQ_MASK) !=
543 BGE_PCIE_DEVCTL_MAX_READRQ_4096) {
544 if (bootverbose)
545 device_printf(dev, "adjust device control 0x%04x ",
546 val);
547 val &= ~BGE_PCIE_DEVCTL_MAX_READRQ_MASK;
548 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
549 pci_write_config(dev, expr_ptr + BGE_PCIE_DEVCTL, val, 2);
550 if (bootverbose)
551 printf("-> 0x%04x\n", val);
552 }
553 }
554
555 #ifdef notdef
556 static uint32_t
557 bge_readreg_ind(struct bge_softc *sc, int off)
558 {
559 device_t dev;
560
561 dev = sc->bge_dev;
562
563 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
564 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
565 }
566 #endif
567
568 static void
569 bge_writereg_ind(struct bge_softc *sc, int off, int val)
570 {
571 device_t dev;
572
573 dev = sc->bge_dev;
574
575 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
576 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
577 }
578
579 static void
580 bge_writemem_direct(struct bge_softc *sc, int off, int val)
581 {
582 CSR_WRITE_4(sc, off, val);
583 }
584
585 static void
586 bge_writembx(struct bge_softc *sc, int off, int val)
587 {
588 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
589 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
590
591 CSR_WRITE_4(sc, off, val);
592 }
593
594 /*
595 * Map a single buffer address.
596 */
597
598 static void
599 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
600 {
601 struct bge_dmamap_arg *ctx;
602
603 if (error)
604 return;
605
606 ctx = arg;
607
608 if (nseg > ctx->bge_maxsegs) {
609 ctx->bge_maxsegs = 0;
610 return;
611 }
612
613 ctx->bge_busaddr = segs->ds_addr;
614 }
615
616 static uint8_t
617 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
618 {
619 uint32_t access, byte = 0;
620 int i;
621
622 /* Lock. */
623 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
624 for (i = 0; i < 8000; i++) {
625 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
626 break;
627 DELAY(20);
628 }
629 if (i == 8000)
630 return (1);
631
632 /* Enable access. */
633 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
634 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
635
636 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
637 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
638 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
639 DELAY(10);
640 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
641 DELAY(10);
642 break;
643 }
644 }
645
646 if (i == BGE_TIMEOUT * 10) {
647 if_printf(sc->bge_ifp, "nvram read timed out\n");
648 return (1);
649 }
650
651 /* Get result. */
652 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
653
654 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
655
656 /* Disable access. */
657 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
658
659 /* Unlock. */
660 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
661 CSR_READ_4(sc, BGE_NVRAM_SWARB);
662
663 return (0);
664 }
665
666 /*
667 * Read a sequence of bytes from NVRAM.
668 */
669 static int
670 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
671 {
672 int err = 0, i;
673 uint8_t byte = 0;
674
675 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
676 return (1);
677
678 for (i = 0; i < cnt; i++) {
679 err = bge_nvram_getbyte(sc, off + i, &byte);
680 if (err)
681 break;
682 *(dest + i) = byte;
683 }
684
685 return (err ? 1 : 0);
686 }
687
688 /*
689 * Read a byte of data stored in the EEPROM at address 'addr.' The
690 * BCM570x supports both the traditional bitbang interface and an
691 * auto access interface for reading the EEPROM. We use the auto
692 * access method.
693 */
694 static uint8_t
695 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
696 {
697 int i;
698 uint32_t byte = 0;
699
700 /*
701 * Enable use of auto EEPROM access so we can avoid
702 * having to use the bitbang method.
703 */
704 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
705
706 /* Reset the EEPROM, load the clock period. */
707 CSR_WRITE_4(sc, BGE_EE_ADDR,
708 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
709 DELAY(20);
710
711 /* Issue the read EEPROM command. */
712 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
713
714 /* Wait for completion */
715 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
716 DELAY(10);
717 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
718 break;
719 }
720
721 if (i == BGE_TIMEOUT * 10) {
722 device_printf(sc->bge_dev, "EEPROM read timed out\n");
723 return (1);
724 }
725
726 /* Get result. */
727 byte = CSR_READ_4(sc, BGE_EE_DATA);
728
729 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
730
731 return (0);
732 }
733
734 /*
735 * Read a sequence of bytes from the EEPROM.
736 */
737 static int
738 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
739 {
740 int i, error = 0;
741 uint8_t byte = 0;
742
743 for (i = 0; i < cnt; i++) {
744 error = bge_eeprom_getbyte(sc, off + i, &byte);
745 if (error)
746 break;
747 *(dest + i) = byte;
748 }
749
750 return (error ? 1 : 0);
751 }
752
753 static int
754 bge_miibus_readreg(device_t dev, int phy, int reg)
755 {
756 struct bge_softc *sc;
757 uint32_t val, autopoll;
758 int i;
759
760 sc = device_get_softc(dev);
761
762 /*
763 * Broadcom's own driver always assumes the internal
764 * PHY is at GMII address 1. On some chips, the PHY responds
765 * to accesses at all addresses, which could cause us to
766 * bogusly attach the PHY 32 times at probe type. Always
767 * restricting the lookup to address 1 is simpler than
768 * trying to figure out which chips revisions should be
769 * special-cased.
770 */
771 if (phy != 1)
772 return (0);
773
774 /* Reading with autopolling on may trigger PCI errors */
775 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
776 if (autopoll & BGE_MIMODE_AUTOPOLL) {
777 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
778 DELAY(40);
779 }
780
781 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
782 BGE_MIPHY(phy) | BGE_MIREG(reg));
783
784 for (i = 0; i < BGE_TIMEOUT; i++) {
785 DELAY(10);
786 val = CSR_READ_4(sc, BGE_MI_COMM);
787 if (!(val & BGE_MICOMM_BUSY))
788 break;
789 }
790
791 if (i == BGE_TIMEOUT) {
792 device_printf(sc->bge_dev,
793 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
794 phy, reg, val);
795 val = 0;
796 goto done;
797 }
798
799 DELAY(5);
800 val = CSR_READ_4(sc, BGE_MI_COMM);
801
802 done:
803 if (autopoll & BGE_MIMODE_AUTOPOLL) {
804 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
805 DELAY(40);
806 }
807
808 if (val & BGE_MICOMM_READFAIL)
809 return (0);
810
811 return (val & 0xFFFF);
812 }
813
814 static int
815 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
816 {
817 struct bge_softc *sc;
818 uint32_t autopoll;
819 int i;
820
821 sc = device_get_softc(dev);
822
823 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
824 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
825 return(0);
826
827 /* Reading with autopolling on may trigger PCI errors */
828 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
829 if (autopoll & BGE_MIMODE_AUTOPOLL) {
830 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
831 DELAY(40);
832 }
833
834 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
835 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
836
837 for (i = 0; i < BGE_TIMEOUT; i++) {
838 DELAY(10);
839 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
840 DELAY(5);
841 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
842 break;
843 }
844 }
845
846 if (i == BGE_TIMEOUT) {
847 device_printf(sc->bge_dev,
848 "PHY write timed out (phy %d, reg %d, val %d)\n",
849 phy, reg, val);
850 return (0);
851 }
852
853 if (autopoll & BGE_MIMODE_AUTOPOLL) {
854 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
855 DELAY(40);
856 }
857
858 return (0);
859 }
860
861 static void
862 bge_miibus_statchg(device_t dev)
863 {
864 struct bge_softc *sc;
865 struct mii_data *mii;
866 sc = device_get_softc(dev);
867 mii = device_get_softc(sc->bge_miibus);
868
869 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
870 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
871 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
872 else
873 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
874
875 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
876 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
877 else
878 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
879 }
880
881 /*
882 * Intialize a standard receive ring descriptor.
883 */
884 static int
885 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
886 {
887 struct mbuf *m_new = NULL;
888 struct bge_rx_bd *r;
889 struct bge_dmamap_arg ctx;
890 int error;
891
892 if (m == NULL) {
893 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
894 if (m_new == NULL)
895 return (ENOBUFS);
896 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
897 } else {
898 m_new = m;
899 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
900 m_new->m_data = m_new->m_ext.ext_buf;
901 }
902
903 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
904 m_adj(m_new, ETHER_ALIGN);
905 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
906 r = &sc->bge_ldata.bge_rx_std_ring[i];
907 ctx.bge_maxsegs = 1;
908 ctx.sc = sc;
909 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
910 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
911 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
912 if (error || ctx.bge_maxsegs == 0) {
913 if (m == NULL) {
914 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
915 m_freem(m_new);
916 }
917 return (ENOMEM);
918 }
919 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
920 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
921 r->bge_flags = BGE_RXBDFLAG_END;
922 r->bge_len = m_new->m_len;
923 r->bge_idx = i;
924
925 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
926 sc->bge_cdata.bge_rx_std_dmamap[i],
927 BUS_DMASYNC_PREREAD);
928
929 return (0);
930 }
931
932 /*
933 * Initialize a jumbo receive ring descriptor. This allocates
934 * a jumbo buffer from the pool managed internally by the driver.
935 */
936 static int
937 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
938 {
939 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
940 struct bge_extrx_bd *r;
941 struct mbuf *m_new = NULL;
942 int nsegs;
943 int error;
944
945 if (m == NULL) {
946 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
947 if (m_new == NULL)
948 return (ENOBUFS);
949
950 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
951 if (!(m_new->m_flags & M_EXT)) {
952 m_freem(m_new);
953 return (ENOBUFS);
954 }
955 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
956 } else {
957 m_new = m;
958 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
959 m_new->m_data = m_new->m_ext.ext_buf;
960 }
961
962 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
963 m_adj(m_new, ETHER_ALIGN);
964
965 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
966 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
967 m_new, segs, &nsegs, BUS_DMA_NOWAIT);
968 if (error) {
969 if (m == NULL)
970 m_freem(m_new);
971 return (error);
972 }
973 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
974
975 /*
976 * Fill in the extended RX buffer descriptor.
977 */
978 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
979 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
980 r->bge_idx = i;
981 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
982 switch (nsegs) {
983 case 4:
984 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
985 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
986 r->bge_len3 = segs[3].ds_len;
987 case 3:
988 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
989 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
990 r->bge_len2 = segs[2].ds_len;
991 case 2:
992 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
993 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
994 r->bge_len1 = segs[1].ds_len;
995 case 1:
996 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
997 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
998 r->bge_len0 = segs[0].ds_len;
999 break;
1000 default:
1001 panic("%s: %d segments\n", __func__, nsegs);
1002 }
1003
1004 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1005 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1006 BUS_DMASYNC_PREREAD);
1007
1008 return (0);
1009 }
1010
1011 /*
1012 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1013 * that's 1MB or memory, which is a lot. For now, we fill only the first
1014 * 256 ring entries and hope that our CPU is fast enough to keep up with
1015 * the NIC.
1016 */
1017 static int
1018 bge_init_rx_ring_std(struct bge_softc *sc)
1019 {
1020 int i;
1021
1022 for (i = 0; i < BGE_SSLOTS; i++) {
1023 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1024 return (ENOBUFS);
1025 };
1026
1027 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1028 sc->bge_cdata.bge_rx_std_ring_map,
1029 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1030
1031 sc->bge_std = i - 1;
1032 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1033
1034 return (0);
1035 }
1036
1037 static void
1038 bge_free_rx_ring_std(struct bge_softc *sc)
1039 {
1040 int i;
1041
1042 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1043 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1044 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1045 sc->bge_cdata.bge_rx_std_dmamap[i],
1046 BUS_DMASYNC_POSTREAD);
1047 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1048 sc->bge_cdata.bge_rx_std_dmamap[i]);
1049 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1050 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1051 }
1052 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1053 sizeof(struct bge_rx_bd));
1054 }
1055 }
1056
1057 static int
1058 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1059 {
1060 struct bge_rcb *rcb;
1061 int i;
1062
1063 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1064 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1065 return (ENOBUFS);
1066 };
1067
1068 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1069 sc->bge_cdata.bge_rx_jumbo_ring_map,
1070 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1071
1072 sc->bge_jumbo = i - 1;
1073
1074 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1075 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1076 BGE_RCB_FLAG_USE_EXT_RX_BD);
1077 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1078
1079 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1080
1081 return (0);
1082 }
1083
1084 static void
1085 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1086 {
1087 int i;
1088
1089 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1090 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1091 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1092 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1093 BUS_DMASYNC_POSTREAD);
1094 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1095 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1096 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1097 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1098 }
1099 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1100 sizeof(struct bge_extrx_bd));
1101 }
1102 }
1103
1104 static void
1105 bge_free_tx_ring(struct bge_softc *sc)
1106 {
1107 int i;
1108
1109 if (sc->bge_ldata.bge_tx_ring == NULL)
1110 return;
1111
1112 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1113 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1114 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1115 sc->bge_cdata.bge_tx_dmamap[i],
1116 BUS_DMASYNC_POSTWRITE);
1117 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1118 sc->bge_cdata.bge_tx_dmamap[i]);
1119 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1120 sc->bge_cdata.bge_tx_chain[i] = NULL;
1121 }
1122 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1123 sizeof(struct bge_tx_bd));
1124 }
1125 }
1126
1127 static int
1128 bge_init_tx_ring(struct bge_softc *sc)
1129 {
1130 sc->bge_txcnt = 0;
1131 sc->bge_tx_saved_considx = 0;
1132
1133 /* Initialize transmit producer index for host-memory send ring. */
1134 sc->bge_tx_prodidx = 0;
1135 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1136
1137 /* 5700 b2 errata */
1138 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1139 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1140
1141 /* NIC-memory send ring not used; initialize to zero. */
1142 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1143 /* 5700 b2 errata */
1144 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1145 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1146
1147 return (0);
1148 }
1149
1150 static void
1151 bge_setpromisc(struct bge_softc *sc)
1152 {
1153 struct ifnet *ifp;
1154
1155 BGE_LOCK_ASSERT(sc);
1156
1157 ifp = sc->bge_ifp;
1158
1159 /* Enable or disable promiscuous mode as needed. */
1160 if (ifp->if_flags & IFF_PROMISC)
1161 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1162 else
1163 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1164 }
1165
1166 static void
1167 bge_setmulti(struct bge_softc *sc)
1168 {
1169 struct ifnet *ifp;
1170 struct ifmultiaddr *ifma;
1171 uint32_t hashes[4] = { 0, 0, 0, 0 };
1172 int h, i;
1173
1174 BGE_LOCK_ASSERT(sc);
1175
1176 ifp = sc->bge_ifp;
1177
1178 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1179 for (i = 0; i < 4; i++)
1180 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1181 return;
1182 }
1183
1184 /* First, zot all the existing filters. */
1185 for (i = 0; i < 4; i++)
1186 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1187
1188 /* Now program new ones. */
1189 if_maddr_rlock(ifp);
1190 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1191 if (ifma->ifma_addr->sa_family != AF_LINK)
1192 continue;
1193 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1194 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1195 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1196 }
1197 if_maddr_runlock(ifp);
1198
1199 for (i = 0; i < 4; i++)
1200 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1201 }
1202
1203 static void
1204 bge_setvlan(struct bge_softc *sc)
1205 {
1206 struct ifnet *ifp;
1207
1208 BGE_LOCK_ASSERT(sc);
1209
1210 ifp = sc->bge_ifp;
1211
1212 /* Enable or disable VLAN tag stripping as needed. */
1213 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1214 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1215 else
1216 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1217 }
1218
1219 static void
1220 bge_sig_pre_reset(sc, type)
1221 struct bge_softc *sc;
1222 int type;
1223 {
1224 /*
1225 * Some chips don't like this so only do this if ASF is enabled
1226 */
1227 if (sc->bge_asf_mode)
1228 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1229
1230 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1231 switch (type) {
1232 case BGE_RESET_START:
1233 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1234 break;
1235 case BGE_RESET_STOP:
1236 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1237 break;
1238 }
1239 }
1240 }
1241
1242 static void
1243 bge_sig_post_reset(sc, type)
1244 struct bge_softc *sc;
1245 int type;
1246 {
1247 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1248 switch (type) {
1249 case BGE_RESET_START:
1250 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1251 /* START DONE */
1252 break;
1253 case BGE_RESET_STOP:
1254 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1255 break;
1256 }
1257 }
1258 }
1259
1260 static void
1261 bge_sig_legacy(sc, type)
1262 struct bge_softc *sc;
1263 int type;
1264 {
1265 if (sc->bge_asf_mode) {
1266 switch (type) {
1267 case BGE_RESET_START:
1268 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1269 break;
1270 case BGE_RESET_STOP:
1271 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1272 break;
1273 }
1274 }
1275 }
1276
1277 void bge_stop_fw(struct bge_softc *);
1278 void
1279 bge_stop_fw(sc)
1280 struct bge_softc *sc;
1281 {
1282 int i;
1283
1284 if (sc->bge_asf_mode) {
1285 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1286 CSR_WRITE_4(sc, BGE_CPU_EVENT,
1287 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
1288
1289 for (i = 0; i < 100; i++ ) {
1290 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1291 break;
1292 DELAY(10);
1293 }
1294 }
1295 }
1296
1297 /*
1298 * Do endian, PCI and DMA initialization.
1299 */
1300 static int
1301 bge_chipinit(struct bge_softc *sc)
1302 {
1303 uint32_t dma_rw_ctl;
1304 int i;
1305
1306 /* Set endianness before we access any non-PCI registers. */
1307 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1308
1309 /* Clear the MAC control register */
1310 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1311
1312 /*
1313 * Clear the MAC statistics block in the NIC's
1314 * internal memory.
1315 */
1316 for (i = BGE_STATS_BLOCK;
1317 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1318 BGE_MEMWIN_WRITE(sc, i, 0);
1319
1320 for (i = BGE_STATUS_BLOCK;
1321 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1322 BGE_MEMWIN_WRITE(sc, i, 0);
1323
1324 /*
1325 * Set up the PCI DMA control register.
1326 */
1327 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1328 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1329 if (sc->bge_flags & BGE_FLAG_PCIE) {
1330 /* Read watermark not used, 128 bytes for write. */
1331 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1332 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1333 if (BGE_IS_5714_FAMILY(sc)) {
1334 /* 256 bytes for read and write. */
1335 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1336 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1337 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1338 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1339 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1340 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1341 /* 1536 bytes for read, 384 bytes for write. */
1342 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1343 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1344 } else {
1345 /* 384 bytes for read and write. */
1346 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1347 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1348 0x0F;
1349 }
1350 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1351 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1352 uint32_t tmp;
1353
1354 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1355 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1356 if (tmp == 6 || tmp == 7)
1357 dma_rw_ctl |=
1358 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1359
1360 /* Set PCI-X DMA write workaround. */
1361 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1362 }
1363 } else {
1364 /* Conventional PCI bus: 256 bytes for read and write. */
1365 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1366 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1367
1368 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1369 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1370 dma_rw_ctl |= 0x0F;
1371 }
1372 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1373 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1374 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1375 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1376 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1377 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1378 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1379 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1380
1381 /*
1382 * Set up general mode register.
1383 */
1384 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
1385 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1386 BGE_MODECTL_TX_NO_PHDR_CSUM);
1387
1388 /*
1389 * BCM5701 B5 have a bug causing data corruption when using
1390 * 64-bit DMA reads, which can be terminated early and then
1391 * completed later as 32-bit accesses, in combination with
1392 * certain bridges.
1393 */
1394 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1395 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1396 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1397
1398 /*
1399 * Tell the firmware the driver is running
1400 */
1401 if (sc->bge_asf_mode & ASF_STACKUP)
1402 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1403
1404 /*
1405 * Disable memory write invalidate. Apparently it is not supported
1406 * properly by these devices. Also ensure that INTx isn't disabled,
1407 * as these chips need it even when using MSI.
1408 */
1409 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1410 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1411
1412 /* Set the timer prescaler (always 66Mhz) */
1413 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1414
1415 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1416 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1417 DELAY(40); /* XXX */
1418
1419 /* Put PHY into ready state */
1420 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1421 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1422 DELAY(40);
1423 }
1424
1425 return (0);
1426 }
1427
1428 static int
1429 bge_blockinit(struct bge_softc *sc)
1430 {
1431 struct bge_rcb *rcb;
1432 bus_size_t vrcb;
1433 bge_hostaddr taddr;
1434 uint32_t val;
1435 int i;
1436
1437 /*
1438 * Initialize the memory window pointer register so that
1439 * we can access the first 32K of internal NIC RAM. This will
1440 * allow us to set up the TX send ring RCBs and the RX return
1441 * ring RCBs, plus other things which live in NIC memory.
1442 */
1443 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1444
1445 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1446
1447 if (!(BGE_IS_5705_PLUS(sc))) {
1448 /* Configure mbuf memory pool */
1449 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1450 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1451 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1452 else
1453 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1454
1455 /* Configure DMA resource pool */
1456 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1457 BGE_DMA_DESCRIPTORS);
1458 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1459 }
1460
1461 /* Configure mbuf pool watermarks */
1462 if (!BGE_IS_5705_PLUS(sc)) {
1463 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1464 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1465 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1466 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1467 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1468 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1469 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1470 } else {
1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1472 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1473 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1474 }
1475
1476 /* Configure DMA resource watermarks */
1477 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1478 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1479
1480 /* Enable buffer manager */
1481 if (!(BGE_IS_5705_PLUS(sc))) {
1482 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1483 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN);
1484
1485 /* Poll for buffer manager start indication */
1486 for (i = 0; i < BGE_TIMEOUT; i++) {
1487 DELAY(10);
1488 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1489 break;
1490 }
1491
1492 if (i == BGE_TIMEOUT) {
1493 device_printf(sc->bge_dev,
1494 "buffer manager failed to start\n");
1495 return (ENXIO);
1496 }
1497 }
1498
1499 /* Enable flow-through queues */
1500 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1501 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1502
1503 /* Wait until queue initialization is complete */
1504 for (i = 0; i < BGE_TIMEOUT; i++) {
1505 DELAY(10);
1506 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1507 break;
1508 }
1509
1510 if (i == BGE_TIMEOUT) {
1511 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1512 return (ENXIO);
1513 }
1514
1515 /* Initialize the standard RX ring control block */
1516 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1517 rcb->bge_hostaddr.bge_addr_lo =
1518 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1519 rcb->bge_hostaddr.bge_addr_hi =
1520 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1521 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1522 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1523 if (BGE_IS_5705_PLUS(sc))
1524 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1525 else
1526 rcb->bge_maxlen_flags =
1527 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1528 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1529 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1530 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1531
1532 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1533 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1534
1535 /*
1536 * Initialize the jumbo RX ring control block
1537 * We set the 'ring disabled' bit in the flags
1538 * field until we're actually ready to start
1539 * using this ring (i.e. once we set the MTU
1540 * high enough to require it).
1541 */
1542 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1543 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1544
1545 rcb->bge_hostaddr.bge_addr_lo =
1546 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1547 rcb->bge_hostaddr.bge_addr_hi =
1548 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1549 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1550 sc->bge_cdata.bge_rx_jumbo_ring_map,
1551 BUS_DMASYNC_PREREAD);
1552 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1553 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1554 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1555 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1556 rcb->bge_hostaddr.bge_addr_hi);
1557 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1558 rcb->bge_hostaddr.bge_addr_lo);
1559
1560 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1561 rcb->bge_maxlen_flags);
1562 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1563
1564 /* Set up dummy disabled mini ring RCB */
1565 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1566 rcb->bge_maxlen_flags =
1567 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1568 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1569 rcb->bge_maxlen_flags);
1570 }
1571
1572 /*
1573 * Set the BD ring replentish thresholds. The recommended
1574 * values are 1/8th the number of descriptors allocated to
1575 * each ring.
1576 * XXX The 5754 requires a lower threshold, so it might be a
1577 * requirement of all 575x family chips. The Linux driver sets
1578 * the lower threshold for all 5705 family chips as well, but there
1579 * are reports that it might not need to be so strict.
1580 *
1581 * XXX Linux does some extra fiddling here for the 5906 parts as
1582 * well.
1583 */
1584 if (BGE_IS_5705_PLUS(sc))
1585 val = 8;
1586 else
1587 val = BGE_STD_RX_RING_CNT / 8;
1588 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1589 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1590
1591 /*
1592 * Disable all unused send rings by setting the 'ring disabled'
1593 * bit in the flags field of all the TX send ring control blocks.
1594 * These are located in NIC memory.
1595 */
1596 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1597 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1598 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1599 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1600 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1601 vrcb += sizeof(struct bge_rcb);
1602 }
1603
1604 /* Configure TX RCB 0 (we use only the first ring) */
1605 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1606 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1607 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1608 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1609 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1610 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1611 if (!(BGE_IS_5705_PLUS(sc)))
1612 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1613 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1614
1615 /* Disable all unused RX return rings */
1616 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1617 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1618 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1619 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1620 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1621 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1622 BGE_RCB_FLAG_RING_DISABLED));
1623 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1624 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1625 (i * (sizeof(uint64_t))), 0);
1626 vrcb += sizeof(struct bge_rcb);
1627 }
1628
1629 /* Initialize RX ring indexes */
1630 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1631 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1632 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1633
1634 /*
1635 * Set up RX return ring 0
1636 * Note that the NIC address for RX return rings is 0x00000000.
1637 * The return rings live entirely within the host, so the
1638 * nicaddr field in the RCB isn't used.
1639 */
1640 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1641 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1642 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1643 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1644 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1645 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1646 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1647
1648 /* Set random backoff seed for TX */
1649 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1650 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1651 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1652 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1653 BGE_TX_BACKOFF_SEED_MASK);
1654
1655 /* Set inter-packet gap */
1656 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1657
1658 /*
1659 * Specify which ring to use for packets that don't match
1660 * any RX rules.
1661 */
1662 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1663
1664 /*
1665 * Configure number of RX lists. One interrupt distribution
1666 * list, sixteen active lists, one bad frames class.
1667 */
1668 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1669
1670 /* Inialize RX list placement stats mask. */
1671 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1672 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1673
1674 /* Disable host coalescing until we get it set up */
1675 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1676
1677 /* Poll to make sure it's shut down. */
1678 for (i = 0; i < BGE_TIMEOUT; i++) {
1679 DELAY(10);
1680 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1681 break;
1682 }
1683
1684 if (i == BGE_TIMEOUT) {
1685 device_printf(sc->bge_dev,
1686 "host coalescing engine failed to idle\n");
1687 return (ENXIO);
1688 }
1689
1690 /* Set up host coalescing defaults */
1691 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1692 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1693 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1694 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1695 if (!(BGE_IS_5705_PLUS(sc))) {
1696 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1697 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1698 }
1699 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1700 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1701
1702 /* Set up address of statistics block */
1703 if (!(BGE_IS_5705_PLUS(sc))) {
1704 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1705 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1706 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1707 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1708 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1709 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1710 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1711 }
1712
1713 /* Set up address of status block */
1714 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1715 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1716 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1717 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1718 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1719 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1720
1721 /* Turn on host coalescing state machine */
1722 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1723
1724 /* Turn on RX BD completion state machine and enable attentions */
1725 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1726 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
1727
1728 /* Turn on RX list placement state machine */
1729 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1730
1731 /* Turn on RX list selector state machine. */
1732 if (!(BGE_IS_5705_PLUS(sc)))
1733 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1734
1735 /* Turn on DMA, clear stats */
1736 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB |
1737 BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR |
1738 BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB |
1739 BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB |
1740 ((sc->bge_flags & BGE_FLAG_TBI) ?
1741 BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1742
1743 /* Set misc. local control, enable interrupts on attentions */
1744 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1745
1746 #ifdef notdef
1747 /* Assert GPIO pins for PHY reset */
1748 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
1749 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
1750 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
1751 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
1752 #endif
1753
1754 /* Turn on DMA completion state machine */
1755 if (!(BGE_IS_5705_PLUS(sc)))
1756 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1757
1758 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
1759
1760 /* Enable host coalescing bug fix. */
1761 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1762 sc->bge_asicrev == BGE_ASICREV_BCM5787)
1763 val |= 1 << 29;
1764
1765 /* Turn on write DMA state machine */
1766 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1767 DELAY(40);
1768
1769 /* Turn on read DMA state machine */
1770 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1771 if (sc->bge_flags & BGE_FLAG_PCIE)
1772 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1773 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1774 DELAY(40);
1775
1776 /* Turn on RX data completion state machine */
1777 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1778
1779 /* Turn on RX BD initiator state machine */
1780 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1781
1782 /* Turn on RX data and RX BD initiator state machine */
1783 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1784
1785 /* Turn on Mbuf cluster free state machine */
1786 if (!(BGE_IS_5705_PLUS(sc)))
1787 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1788
1789 /* Turn on send BD completion state machine */
1790 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1791
1792 /* Turn on send data completion state machine */
1793 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1794
1795 /* Turn on send data initiator state machine */
1796 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1797
1798 /* Turn on send BD initiator state machine */
1799 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1800
1801 /* Turn on send BD selector state machine */
1802 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1803
1804 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1805 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1806 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
1807
1808 /* ack/clear link change events */
1809 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1810 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1811 BGE_MACSTAT_LINK_CHANGED);
1812 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1813
1814 /* Enable PHY auto polling (for MII/GMII only) */
1815 if (sc->bge_flags & BGE_FLAG_TBI) {
1816 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1817 } else {
1818 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16));
1819 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1820 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1821 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1822 BGE_EVTENB_MI_INTERRUPT);
1823 }
1824
1825 /*
1826 * Clear any pending link state attention.
1827 * Otherwise some link state change events may be lost until attention
1828 * is cleared by bge_intr() -> bge_link_upd() sequence.
1829 * It's not necessary on newer BCM chips - perhaps enabling link
1830 * state change attentions implies clearing pending attention.
1831 */
1832 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
1833 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
1834 BGE_MACSTAT_LINK_CHANGED);
1835
1836 /* Enable link state change attentions. */
1837 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1838
1839 return (0);
1840 }
1841
1842 const struct bge_revision *
1843 bge_lookup_rev(uint32_t chipid)
1844 {
1845 const struct bge_revision *br;
1846
1847 for (br = bge_revisions; br->br_name != NULL; br++) {
1848 if (br->br_chipid == chipid)
1849 return (br);
1850 }
1851
1852 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1853 if (br->br_chipid == BGE_ASICREV(chipid))
1854 return (br);
1855 }
1856
1857 return (NULL);
1858 }
1859
1860 const struct bge_vendor *
1861 bge_lookup_vendor(uint16_t vid)
1862 {
1863 const struct bge_vendor *v;
1864
1865 for (v = bge_vendors; v->v_name != NULL; v++)
1866 if (v->v_id == vid)
1867 return (v);
1868
1869 panic("%s: unknown vendor %d", __func__, vid);
1870 return (NULL);
1871 }
1872
1873 /*
1874 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1875 * against our list and return its name if we find a match.
1876 *
1877 * Note that since the Broadcom controller contains VPD support, we
1878 * try to get the device name string from the controller itself instead
1879 * of the compiled-in string. It guarantees we'll always announce the
1880 * right product name. We fall back to the compiled-in string when
1881 * VPD is unavailable or corrupt.
1882 */
1883 static int
1884 bge_probe(device_t dev)
1885 {
1886 const struct bge_type *t = bge_devs;
1887 struct bge_softc *sc = device_get_softc(dev);
1888 uint16_t vid, did;
1889
1890 sc->bge_dev = dev;
1891 vid = pci_get_vendor(dev);
1892 did = pci_get_device(dev);
1893 while(t->bge_vid != 0) {
1894 if ((vid == t->bge_vid) && (did == t->bge_did)) {
1895 char model[64], buf[96];
1896 const struct bge_revision *br;
1897 const struct bge_vendor *v;
1898 uint32_t id;
1899
1900 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1901 BGE_PCIMISCCTL_ASICREV;
1902 br = bge_lookup_rev(id);
1903 v = bge_lookup_vendor(vid);
1904 {
1905 #if __FreeBSD_version > 700024
1906 const char *pname;
1907
1908 if (bge_has_eaddr(sc) &&
1909 pci_get_vpd_ident(dev, &pname) == 0)
1910 snprintf(model, 64, "%s", pname);
1911 else
1912 #endif
1913 snprintf(model, 64, "%s %s",
1914 v->v_name,
1915 br != NULL ? br->br_name :
1916 "NetXtreme Ethernet Controller");
1917 }
1918 snprintf(buf, 96, "%s, %sASIC rev. %#04x", model,
1919 br != NULL ? "" : "unknown ", id >> 16);
1920 device_set_desc_copy(dev, buf);
1921 if (pci_get_subvendor(dev) == DELL_VENDORID)
1922 sc->bge_flags |= BGE_FLAG_NO_3LED;
1923 if (did == BCOM_DEVICEID_BCM5755M)
1924 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1925 return (0);
1926 }
1927 t++;
1928 }
1929
1930 return (ENXIO);
1931 }
1932
1933 static void
1934 bge_dma_free(struct bge_softc *sc)
1935 {
1936 int i;
1937
1938 /* Destroy DMA maps for RX buffers. */
1939 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1940 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1941 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1942 sc->bge_cdata.bge_rx_std_dmamap[i]);
1943 }
1944
1945 /* Destroy DMA maps for jumbo RX buffers. */
1946 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1947 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1948 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1949 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1950 }
1951
1952 /* Destroy DMA maps for TX buffers. */
1953 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1954 if (sc->bge_cdata.bge_tx_dmamap[i])
1955 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1956 sc->bge_cdata.bge_tx_dmamap[i]);
1957 }
1958
1959 if (sc->bge_cdata.bge_mtag)
1960 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1961
1962
1963 /* Destroy standard RX ring. */
1964 if (sc->bge_cdata.bge_rx_std_ring_map)
1965 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1966 sc->bge_cdata.bge_rx_std_ring_map);
1967 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1968 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1969 sc->bge_ldata.bge_rx_std_ring,
1970 sc->bge_cdata.bge_rx_std_ring_map);
1971
1972 if (sc->bge_cdata.bge_rx_std_ring_tag)
1973 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1974
1975 /* Destroy jumbo RX ring. */
1976 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1977 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1978 sc->bge_cdata.bge_rx_jumbo_ring_map);
1979
1980 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1981 sc->bge_ldata.bge_rx_jumbo_ring)
1982 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1983 sc->bge_ldata.bge_rx_jumbo_ring,
1984 sc->bge_cdata.bge_rx_jumbo_ring_map);
1985
1986 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1987 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1988
1989 /* Destroy RX return ring. */
1990 if (sc->bge_cdata.bge_rx_return_ring_map)
1991 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1992 sc->bge_cdata.bge_rx_return_ring_map);
1993
1994 if (sc->bge_cdata.bge_rx_return_ring_map &&
1995 sc->bge_ldata.bge_rx_return_ring)
1996 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1997 sc->bge_ldata.bge_rx_return_ring,
1998 sc->bge_cdata.bge_rx_return_ring_map);
1999
2000 if (sc->bge_cdata.bge_rx_return_ring_tag)
2001 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2002
2003 /* Destroy TX ring. */
2004 if (sc->bge_cdata.bge_tx_ring_map)
2005 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2006 sc->bge_cdata.bge_tx_ring_map);
2007
2008 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2009 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2010 sc->bge_ldata.bge_tx_ring,
2011 sc->bge_cdata.bge_tx_ring_map);
2012
2013 if (sc->bge_cdata.bge_tx_ring_tag)
2014 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2015
2016 /* Destroy status block. */
2017 if (sc->bge_cdata.bge_status_map)
2018 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2019 sc->bge_cdata.bge_status_map);
2020
2021 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2022 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2023 sc->bge_ldata.bge_status_block,
2024 sc->bge_cdata.bge_status_map);
2025
2026 if (sc->bge_cdata.bge_status_tag)
2027 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2028
2029 /* Destroy statistics block. */
2030 if (sc->bge_cdata.bge_stats_map)
2031 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2032 sc->bge_cdata.bge_stats_map);
2033
2034 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2035 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2036 sc->bge_ldata.bge_stats,
2037 sc->bge_cdata.bge_stats_map);
2038
2039 if (sc->bge_cdata.bge_stats_tag)
2040 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2041
2042 /* Destroy the parent tag. */
2043 if (sc->bge_cdata.bge_parent_tag)
2044 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2045 }
2046
2047 static int
2048 bge_dma_alloc(device_t dev)
2049 {
2050 struct bge_dmamap_arg ctx;
2051 struct bge_softc *sc;
2052 int i, error;
2053
2054 sc = device_get_softc(dev);
2055
2056 /*
2057 * Allocate the parent bus DMA tag appropriate for PCI.
2058 */
2059 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2060 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2061 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2062 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2063
2064 if (error != 0) {
2065 device_printf(sc->bge_dev,
2066 "could not allocate parent dma tag\n");
2067 return (ENOMEM);
2068 }
2069
2070 /*
2071 * Create tag for mbufs.
2072 */
2073 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
2074 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2075 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
2076 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
2077
2078 if (error) {
2079 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2080 return (ENOMEM);
2081 }
2082
2083 /* Create DMA maps for RX buffers. */
2084 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2085 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
2086 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2087 if (error) {
2088 device_printf(sc->bge_dev,
2089 "can't create DMA map for RX\n");
2090 return (ENOMEM);
2091 }
2092 }
2093
2094 /* Create DMA maps for TX buffers. */
2095 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2096 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
2097 &sc->bge_cdata.bge_tx_dmamap[i]);
2098 if (error) {
2099 device_printf(sc->bge_dev,
2100 "can't create DMA map for RX\n");
2101 return (ENOMEM);
2102 }
2103 }
2104
2105 /* Create tag for standard RX ring. */
2106 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2107 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2108 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
2109 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
2110
2111 if (error) {
2112 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2113 return (ENOMEM);
2114 }
2115
2116 /* Allocate DMA'able memory for standard RX ring. */
2117 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
2118 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
2119 &sc->bge_cdata.bge_rx_std_ring_map);
2120 if (error)
2121 return (ENOMEM);
2122
2123 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
2124
2125 /* Load the address of the standard RX ring. */
2126 ctx.bge_maxsegs = 1;
2127 ctx.sc = sc;
2128
2129 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
2130 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
2131 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2132
2133 if (error)
2134 return (ENOMEM);
2135
2136 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
2137
2138 /* Create tags for jumbo mbufs. */
2139 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2140 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2141 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2142 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2143 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2144 if (error) {
2145 device_printf(sc->bge_dev,
2146 "could not allocate jumbo dma tag\n");
2147 return (ENOMEM);
2148 }
2149
2150 /* Create tag for jumbo RX ring. */
2151 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2152 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2153 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2154 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2155
2156 if (error) {
2157 device_printf(sc->bge_dev,
2158 "could not allocate jumbo ring dma tag\n");
2159 return (ENOMEM);
2160 }
2161
2162 /* Allocate DMA'able memory for jumbo RX ring. */
2163 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2164 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
2165 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2166 &sc->bge_cdata.bge_rx_jumbo_ring_map);
2167 if (error)
2168 return (ENOMEM);
2169
2170 /* Load the address of the jumbo RX ring. */
2171 ctx.bge_maxsegs = 1;
2172 ctx.sc = sc;
2173
2174 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2175 sc->bge_cdata.bge_rx_jumbo_ring_map,
2176 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2177 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2178
2179 if (error)
2180 return (ENOMEM);
2181
2182 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2183
2184 /* Create DMA maps for jumbo RX buffers. */
2185 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2186 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2187 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2188 if (error) {
2189 device_printf(sc->bge_dev,
2190 "can't create DMA map for jumbo RX\n");
2191 return (ENOMEM);
2192 }
2193 }
2194
2195 }
2196
2197 /* Create tag for RX return ring. */
2198 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2199 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2200 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2201 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2202
2203 if (error) {
2204 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2205 return (ENOMEM);
2206 }
2207
2208 /* Allocate DMA'able memory for RX return ring. */
2209 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2210 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2211 &sc->bge_cdata.bge_rx_return_ring_map);
2212 if (error)
2213 return (ENOMEM);
2214
2215 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2216 BGE_RX_RTN_RING_SZ(sc));
2217
2218 /* Load the address of the RX return ring. */
2219 ctx.bge_maxsegs = 1;
2220 ctx.sc = sc;
2221
2222 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2223 sc->bge_cdata.bge_rx_return_ring_map,
2224 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2225 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2226
2227 if (error)
2228 return (ENOMEM);
2229
2230 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2231
2232 /* Create tag for TX ring. */
2233 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2234 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2235 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2236 &sc->bge_cdata.bge_tx_ring_tag);
2237
2238 if (error) {
2239 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2240 return (ENOMEM);
2241 }
2242
2243 /* Allocate DMA'able memory for TX ring. */
2244 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2245 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2246 &sc->bge_cdata.bge_tx_ring_map);
2247 if (error)
2248 return (ENOMEM);
2249
2250 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2251
2252 /* Load the address of the TX ring. */
2253 ctx.bge_maxsegs = 1;
2254 ctx.sc = sc;
2255
2256 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2257 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2258 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2259
2260 if (error)
2261 return (ENOMEM);
2262
2263 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2264
2265 /* Create tag for status block. */
2266 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2267 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2268 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2269 NULL, NULL, &sc->bge_cdata.bge_status_tag);
2270
2271 if (error) {
2272 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2273 return (ENOMEM);
2274 }
2275
2276 /* Allocate DMA'able memory for status block. */
2277 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2278 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2279 &sc->bge_cdata.bge_status_map);
2280 if (error)
2281 return (ENOMEM);
2282
2283 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2284
2285 /* Load the address of the status block. */
2286 ctx.sc = sc;
2287 ctx.bge_maxsegs = 1;
2288
2289 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2290 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2291 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2292
2293 if (error)
2294 return (ENOMEM);
2295
2296 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2297
2298 /* Create tag for statistics block. */
2299 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2300 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2301 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2302 &sc->bge_cdata.bge_stats_tag);
2303
2304 if (error) {
2305 device_printf(sc->bge_dev, "could not allocate dma tag\n");
2306 return (ENOMEM);
2307 }
2308
2309 /* Allocate DMA'able memory for statistics block. */
2310 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2311 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2312 &sc->bge_cdata.bge_stats_map);
2313 if (error)
2314 return (ENOMEM);
2315
2316 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2317
2318 /* Load the address of the statstics block. */
2319 ctx.sc = sc;
2320 ctx.bge_maxsegs = 1;
2321
2322 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2323 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2324 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2325
2326 if (error)
2327 return (ENOMEM);
2328
2329 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2330
2331 return (0);
2332 }
2333
2334 #if __FreeBSD_version > 602105
2335 /*
2336 * Return true if this device has more than one port.
2337 */
2338 static int
2339 bge_has_multiple_ports(struct bge_softc *sc)
2340 {
2341 device_t dev = sc->bge_dev;
2342 u_int b, d, f, fscan, s;
2343
2344 d = pci_get_domain(dev);
2345 b = pci_get_bus(dev);
2346 s = pci_get_slot(dev);
2347 f = pci_get_function(dev);
2348 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2349 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2350 return (1);
2351 return (0);
2352 }
2353
2354 /*
2355 * Return true if MSI can be used with this device.
2356 */
2357 static int
2358 bge_can_use_msi(struct bge_softc *sc)
2359 {
2360 int can_use_msi = 0;
2361
2362 switch (sc->bge_asicrev) {
2363 case BGE_ASICREV_BCM5714_A0:
2364 case BGE_ASICREV_BCM5714:
2365 /*
2366 * Apparently, MSI doesn't work when these chips are
2367 * configured in single-port mode.
2368 */
2369 if (bge_has_multiple_ports(sc))
2370 can_use_msi = 1;
2371 break;
2372 case BGE_ASICREV_BCM5750:
2373 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2374 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2375 can_use_msi = 1;
2376 break;
2377 default:
2378 if (BGE_IS_575X_PLUS(sc))
2379 can_use_msi = 1;
2380 }
2381 return (can_use_msi);
2382 }
2383 #endif
2384
2385 static int
2386 bge_attach(device_t dev)
2387 {
2388 struct ifnet *ifp;
2389 struct bge_softc *sc;
2390 uint32_t hwcfg = 0, misccfg;
2391 u_char eaddr[ETHER_ADDR_LEN];
2392 int error, reg, rid, trys;
2393
2394 sc = device_get_softc(dev);
2395 sc->bge_dev = dev;
2396
2397 /*
2398 * Map control/status registers.
2399 */
2400 pci_enable_busmaster(dev);
2401
2402 rid = BGE_PCI_BAR0;
2403 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2404 RF_ACTIVE);
2405
2406 if (sc->bge_res == NULL) {
2407 device_printf (sc->bge_dev, "couldn't map memory\n");
2408 error = ENXIO;
2409 goto fail;
2410 }
2411
2412 /* Save various chip information. */
2413 sc->bge_chipid =
2414 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2415 BGE_PCIMISCCTL_ASICREV;
2416 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2417 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2418
2419 /*
2420 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2421 * 5705 A0 and A1 chips.
2422 */
2423 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
2424 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
2425 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2426 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
2427 sc->bge_flags |= BGE_FLAG_WIRESPEED;
2428
2429 if (bge_has_eaddr(sc))
2430 sc->bge_flags |= BGE_FLAG_EADDR;
2431
2432 /* Save chipset family. */
2433 switch (sc->bge_asicrev) {
2434 case BGE_ASICREV_BCM5700:
2435 case BGE_ASICREV_BCM5701:
2436 case BGE_ASICREV_BCM5703:
2437 case BGE_ASICREV_BCM5704:
2438 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2439 break;
2440 case BGE_ASICREV_BCM5714_A0:
2441 case BGE_ASICREV_BCM5780:
2442 case BGE_ASICREV_BCM5714:
2443 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2444 /* FALLTHROUGH */
2445 case BGE_ASICREV_BCM5750:
2446 case BGE_ASICREV_BCM5752:
2447 case BGE_ASICREV_BCM5755:
2448 case BGE_ASICREV_BCM5787:
2449 case BGE_ASICREV_BCM5906:
2450 sc->bge_flags |= BGE_FLAG_575X_PLUS;
2451 /* FALLTHROUGH */
2452 case BGE_ASICREV_BCM5705:
2453 sc->bge_flags |= BGE_FLAG_5705_PLUS;
2454 break;
2455 }
2456
2457 /* Set various bug flags. */
2458 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2459 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2460 sc->bge_flags |= BGE_FLAG_CRC_BUG;
2461 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2462 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2463 sc->bge_flags |= BGE_FLAG_ADC_BUG;
2464 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2465 sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
2466 if (BGE_IS_5705_PLUS(sc) &&
2467 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) {
2468 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2469 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2470 if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0)
2471 sc->bge_flags |= BGE_FLAG_JITTER_BUG;
2472 } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
2473 sc->bge_flags |= BGE_FLAG_BER_BUG;
2474 }
2475
2476
2477 /*
2478 * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe()
2479 * but I do not know the DEVICEID for the 5788M.
2480 */
2481 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID;
2482 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2483 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
2484 sc->bge_flags |= BGE_FLAG_5788;
2485
2486 /*
2487 * Check if this is a PCI-X or PCI Express device.
2488 */
2489 #if __FreeBSD_version > 602101
2490 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
2491 /*
2492 * Found a PCI Express capabilities register, this
2493 * must be a PCI Express device.
2494 */
2495 if (reg != 0) {
2496 sc->bge_flags |= BGE_FLAG_PCIE;
2497 #else
2498 if (BGE_IS_5705_PLUS(sc)) {
2499 reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2500 if ((reg & 0xFF) == BGE_PCIE_CAPID) {
2501 sc->bge_flags |= BGE_FLAG_PCIE;
2502 reg = BGE_PCIE_CAPID;
2503 #endif
2504 bge_set_max_readrq(sc, reg);
2505 }
2506 } else {
2507 /*
2508 * Check if the device is in PCI-X Mode.
2509 * (This bit is not valid on PCI Express controllers.)
2510 */
2511 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2512 BGE_PCISTATE_PCI_BUSMODE) == 0)
2513 sc->bge_flags |= BGE_FLAG_PCIX;
2514 }
2515
2516 #if __FreeBSD_version > 602105
2517 {
2518 int msicount;
2519
2520 /*
2521 * Allocate the interrupt, using MSI if possible. These devices
2522 * support 8 MSI messages, but only the first one is used in
2523 * normal operation.
2524 */
2525 if (bge_can_use_msi(sc)) {
2526 msicount = pci_msi_count(dev);
2527 if (msicount > 1)
2528 msicount = 1;
2529 } else
2530 msicount = 0;
2531 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2532 rid = 1;
2533 sc->bge_flags |= BGE_FLAG_MSI;
2534 } else
2535 rid = 0;
2536 }
2537 #else
2538 rid = 0;
2539 #endif
2540
2541 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2542 RF_SHAREABLE | RF_ACTIVE);
2543
2544 if (sc->bge_irq == NULL) {
2545 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2546 error = ENXIO;
2547 goto fail;
2548 }
2549
2550 if (bootverbose)
2551 device_printf(dev,
2552 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
2553 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
2554 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" :
2555 ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI"));
2556
2557 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2558
2559 /* Try to reset the chip. */
2560 if (bge_reset(sc)) {
2561 device_printf(sc->bge_dev, "chip reset failed\n");
2562 error = ENXIO;
2563 goto fail;
2564 }
2565
2566 sc->bge_asf_mode = 0;
2567 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2568 == BGE_MAGIC_NUMBER)) {
2569 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2570 & BGE_HWCFG_ASF) {
2571 sc->bge_asf_mode |= ASF_ENABLE;
2572 sc->bge_asf_mode |= ASF_STACKUP;
2573 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2574 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2575 }
2576 }
2577 }
2578
2579 /* Try to reset the chip again the nice way. */
2580 bge_stop_fw(sc);
2581 bge_sig_pre_reset(sc, BGE_RESET_STOP);
2582 if (bge_reset(sc)) {
2583 device_printf(sc->bge_dev, "chip reset failed\n");
2584 error = ENXIO;
2585 goto fail;
2586 }
2587
2588 bge_sig_legacy(sc, BGE_RESET_STOP);
2589 bge_sig_post_reset(sc, BGE_RESET_STOP);
2590
2591 if (bge_chipinit(sc)) {
2592 device_printf(sc->bge_dev, "chip initialization failed\n");
2593 error = ENXIO;
2594 goto fail;
2595 }
2596
2597 error = bge_get_eaddr(sc, eaddr);
2598 if (error) {
2599 device_printf(sc->bge_dev,
2600 "failed to read station address\n");
2601 error = ENXIO;
2602 goto fail;
2603 }
2604
2605 /* 5705 limits RX return ring to 512 entries. */
2606 if (BGE_IS_5705_PLUS(sc))
2607 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2608 else
2609 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2610
2611 if (bge_dma_alloc(dev)) {
2612 device_printf(sc->bge_dev,
2613 "failed to allocate DMA resources\n");
2614 error = ENXIO;
2615 goto fail;
2616 }
2617
2618 /* Set default tuneable values. */
2619 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2620 sc->bge_rx_coal_ticks = 150;
2621 sc->bge_tx_coal_ticks = 150;
2622 sc->bge_rx_max_coal_bds = 10;
2623 sc->bge_tx_max_coal_bds = 10;
2624
2625 /* Set up ifnet structure */
2626 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2627 if (ifp == NULL) {
2628 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2629 error = ENXIO;
2630 goto fail;
2631 }
2632 ifp->if_softc = sc;
2633 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2634 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2635 ifp->if_ioctl = bge_ioctl;
2636 ifp->if_start = bge_start;
2637 ifp->if_init = bge_init;
2638 ifp->if_mtu = ETHERMTU;
2639 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2640 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2641 IFQ_SET_READY(&ifp->if_snd);
2642 ifp->if_hwassist = BGE_CSUM_FEATURES;
2643 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2644 IFCAP_VLAN_MTU;
2645 #ifdef IFCAP_VLAN_HWCSUM
2646 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
2647 #endif
2648 ifp->if_capenable = ifp->if_capabilities;
2649 #ifdef DEVICE_POLLING
2650 ifp->if_capabilities |= IFCAP_POLLING;
2651 #endif
2652
2653 /*
2654 * 5700 B0 chips do not support checksumming correctly due
2655 * to hardware bugs.
2656 */
2657 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2658 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2659 ifp->if_capenable &= IFCAP_HWCSUM;
2660 ifp->if_hwassist = 0;
2661 }
2662
2663 /*
2664 * Figure out what sort of media we have by checking the
2665 * hardware config word in the first 32k of NIC internal memory,
2666 * or fall back to examining the EEPROM if necessary.
2667 * Note: on some BCM5700 cards, this value appears to be unset.
2668 * If that's the case, we have to rely on identifying the NIC
2669 * by its PCI subsystem ID, as we do below for the SysKonnect
2670 * SK-9D41.
2671 */
2672 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2673 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2674 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
2675 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2676 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2677 sizeof(hwcfg))) {
2678 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2679 error = ENXIO;
2680 goto fail;
2681 }
2682 hwcfg = ntohl(hwcfg);
2683 }
2684
2685 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2686 sc->bge_flags |= BGE_FLAG_TBI;
2687
2688 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2689 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2690 sc->bge_flags |= BGE_FLAG_TBI;
2691
2692 if (sc->bge_flags & BGE_FLAG_TBI) {
2693 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2694 bge_ifmedia_sts);
2695 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
2696 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2697 0, NULL);
2698 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
2699 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
2700 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2701 } else {
2702 /*
2703 * Do transceiver setup and tell the firmware the
2704 * driver is down so we can try to get access the
2705 * probe if ASF is running. Retry a couple of times
2706 * if we get a conflict with the ASF firmware accessing
2707 * the PHY.
2708 */
2709 trys = 0;
2710 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2711 again:
2712 bge_asf_driver_up(sc);
2713
2714 if (mii_phy_probe(dev, &sc->bge_miibus,
2715 bge_ifmedia_upd, bge_ifmedia_sts)) {
2716 if (trys++ < 4) {
2717 device_printf(sc->bge_dev, "Try again\n");
2718 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
2719 BMCR_RESET);
2720 goto again;
2721 }
2722
2723 device_printf(sc->bge_dev, "MII without any PHY!\n");
2724 error = ENXIO;
2725 goto fail;
2726 }
2727
2728 /*
2729 * Now tell the firmware we are going up after probing the PHY
2730 */
2731 if (sc->bge_asf_mode & ASF_STACKUP)
2732 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2733 }
2734
2735 /*
2736 * When using the BCM5701 in PCI-X mode, data corruption has
2737 * been observed in the first few bytes of some received packets.
2738 * Aligning the packet buffer in memory eliminates the corruption.
2739 * Unfortunately, this misaligns the packet payloads. On platforms
2740 * which do not support unaligned accesses, we will realign the
2741 * payloads by copying the received packets.
2742 */
2743 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2744 sc->bge_flags & BGE_FLAG_PCIX)
2745 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2746
2747 /*
2748 * Call MI attach routine.
2749 */
2750 ether_ifattach(ifp, eaddr);
2751 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2752
2753 /*
2754 * Hookup IRQ last.
2755 */
2756 #if __FreeBSD_version > 700030
2757 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2758 NULL, bge_intr, sc, &sc->bge_intrhand);
2759 #else
2760 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2761 bge_intr, sc, &sc->bge_intrhand);
2762 #endif
2763
2764 if (error) {
2765 bge_detach(dev);
2766 device_printf(sc->bge_dev, "couldn't set up irq\n");
2767 }
2768
2769 bge_add_sysctls(sc);
2770
2771 return (0);
2772
2773 fail:
2774 bge_release_resources(sc);
2775
2776 return (error);
2777 }
2778
2779 static int
2780 bge_detach(device_t dev)
2781 {
2782 struct bge_softc *sc;
2783 struct ifnet *ifp;
2784
2785 sc = device_get_softc(dev);
2786 ifp = sc->bge_ifp;
2787
2788 #ifdef DEVICE_POLLING
2789 if (ifp->if_capenable & IFCAP_POLLING)
2790 ether_poll_deregister(ifp);
2791 #endif
2792
2793 BGE_LOCK(sc);
2794 bge_stop(sc);
2795 bge_reset(sc);
2796 BGE_UNLOCK(sc);
2797
2798 callout_drain(&sc->bge_stat_ch);
2799
2800 ether_ifdetach(ifp);
2801
2802 if (sc->bge_flags & BGE_FLAG_TBI) {
2803 ifmedia_removeall(&sc->bge_ifmedia);
2804 } else {
2805 bus_generic_detach(dev);
2806 device_delete_child(dev, sc->bge_miibus);
2807 }
2808
2809 bge_release_resources(sc);
2810
2811 return (0);
2812 }
2813
2814 static void
2815 bge_release_resources(struct bge_softc *sc)
2816 {
2817 device_t dev;
2818
2819 dev = sc->bge_dev;
2820
2821 if (sc->bge_intrhand != NULL)
2822 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2823
2824 if (sc->bge_irq != NULL)
2825 bus_release_resource(dev, SYS_RES_IRQ,
2826 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
2827
2828 #if __FreeBSD_version > 602105
2829 if (sc->bge_flags & BGE_FLAG_MSI)
2830 pci_release_msi(dev);
2831 #endif
2832
2833 if (sc->bge_res != NULL)
2834 bus_release_resource(dev, SYS_RES_MEMORY,
2835 BGE_PCI_BAR0, sc->bge_res);
2836
2837 if (sc->bge_ifp != NULL)
2838 if_free(sc->bge_ifp);
2839
2840 bge_dma_free(sc);
2841
2842 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2843 BGE_LOCK_DESTROY(sc);
2844 }
2845
2846 static int
2847 bge_reset(struct bge_softc *sc)
2848 {
2849 device_t dev;
2850 uint32_t cachesize, command, pcistate, reset, val;
2851 void (*write_op)(struct bge_softc *, int, int);
2852 int i;
2853
2854 dev = sc->bge_dev;
2855
2856 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
2857 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
2858 if (sc->bge_flags & BGE_FLAG_PCIE)
2859 write_op = bge_writemem_direct;
2860 else
2861 write_op = bge_writemem_ind;
2862 } else
2863 write_op = bge_writereg_ind;
2864
2865 /* Save some important PCI state. */
2866 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2867 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2868 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2869
2870 pci_write_config(dev, BGE_PCI_MISC_CTL,
2871 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
2872 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
2873
2874 /* Disable fastboot on controllers that support it. */
2875 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2876 sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2877 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2878 if (bootverbose)
2879 device_printf(sc->bge_dev, "Disabling fastboot\n");
2880 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2881 }
2882
2883 /*
2884 * Write the magic number to SRAM at offset 0xB50.
2885 * When firmware finishes its initialization it will
2886 * write ~BGE_MAGIC_NUMBER to the same location.
2887 */
2888 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2889
2890 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
2891
2892 /* XXX: Broadcom Linux driver. */
2893 if (sc->bge_flags & BGE_FLAG_PCIE) {
2894 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
2895 CSR_WRITE_4(sc, 0x7E2C, 0x20);
2896 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2897 /* Prevent PCIE link training during global reset */
2898 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
2899 reset |= 1 << 29;
2900 }
2901 }
2902
2903 /*
2904 * Set GPHY Power Down Override to leave GPHY
2905 * powered up in D0 uninitialized.
2906 */
2907 if (BGE_IS_5705_PLUS(sc))
2908 reset |= 0x04000000;
2909
2910 /* Issue global reset */
2911 write_op(sc, BGE_MISC_CFG, reset);
2912
2913 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2914 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2915 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2916 val | BGE_VCPU_STATUS_DRV_RESET);
2917 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2918 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2919 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2920 }
2921
2922 DELAY(1000);
2923
2924 /* XXX: Broadcom Linux driver. */
2925 if (sc->bge_flags & BGE_FLAG_PCIE) {
2926 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2927 DELAY(500000); /* wait for link training to complete */
2928 val = pci_read_config(dev, 0xC4, 4);
2929 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
2930 }
2931 /*
2932 * Set PCIE max payload size to 128 bytes and clear error
2933 * status.
2934 */
2935 pci_write_config(dev, 0xD8, 0xF5000, 4);
2936 }
2937
2938 /* Reset some of the PCI state that got zapped by reset. */
2939 pci_write_config(dev, BGE_PCI_MISC_CTL,
2940 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
2941 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
2942 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2943 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2944 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
2945
2946 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */
2947 if (BGE_IS_5714_FAMILY(sc)) {
2948 /* This chip disables MSI on reset. */
2949 if (sc->bge_flags & BGE_FLAG_MSI) {
2950 val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2);
2951 pci_write_config(dev, BGE_PCI_MSI_CTL,
2952 val | PCIM_MSICTRL_MSI_ENABLE, 2);
2953 val = CSR_READ_4(sc, BGE_MSI_MODE);
2954 CSR_WRITE_4(sc, BGE_MSI_MODE,
2955 val | BGE_MSIMODE_ENABLE);
2956 }
2957 val = CSR_READ_4(sc, BGE_MARB_MODE);
2958 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2959 } else
2960 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2961
2962 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2963 for (i = 0; i < BGE_TIMEOUT; i++) {
2964 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2965 if (val & BGE_VCPU_STATUS_INIT_DONE)
2966 break;
2967 DELAY(100);
2968 }
2969 if (i == BGE_TIMEOUT) {
2970 device_printf(sc->bge_dev, "reset timed out\n");
2971 return (1);
2972 }
2973 } else {
2974 /*
2975 * Poll until we see the 1's complement of the magic number.
2976 * This indicates that the firmware initialization is complete.
2977 * We expect this to fail if no chip containing the Ethernet
2978 * address is fitted though.
2979 */
2980 for (i = 0; i < BGE_TIMEOUT; i++) {
2981 DELAY(10);
2982 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2983 if (val == ~BGE_MAGIC_NUMBER)
2984 break;
2985 }
2986
2987 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
2988 device_printf(sc->bge_dev, "firmware handshake timed out, "
2989 "found 0x%08x\n", val);
2990 }
2991
2992 /*
2993 * XXX Wait for the value of the PCISTATE register to
2994 * return to its original pre-reset state. This is a
2995 * fairly good indicator of reset completion. If we don't
2996 * wait for the reset to fully complete, trying to read
2997 * from the device's non-PCI registers may yield garbage
2998 * results.
2999 */
3000 for (i = 0; i < BGE_TIMEOUT; i++) {
3001 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3002 break;
3003 DELAY(10);
3004 }
3005
3006 if (sc->bge_flags & BGE_FLAG_PCIE) {
3007 reset = bge_readmem_ind(sc, 0x7C00);
3008 bge_writemem_ind(sc, 0x7C00, reset | (1 << 25));
3009 }
3010
3011 /* Fix up byte swapping. */
3012 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
3013 BGE_MODECTL_BYTESWAP_DATA);
3014
3015 /* Tell the ASF firmware we are up */
3016 if (sc->bge_asf_mode & ASF_STACKUP)
3017 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3018
3019 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3020
3021 /*
3022 * The 5704 in TBI mode apparently needs some special
3023 * adjustment to insure the SERDES drive level is set
3024 * to 1.2V.
3025 */
3026 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3027 sc->bge_flags & BGE_FLAG_TBI) {
3028 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3029 val = (val & ~0xFFF) | 0x880;
3030 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3031 }
3032
3033 /* XXX: Broadcom Linux driver. */
3034 if (sc->bge_flags & BGE_FLAG_PCIE &&
3035 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3036 val = CSR_READ_4(sc, 0x7C00);
3037 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3038 }
3039 DELAY(10000);
3040
3041 return(0);
3042 }
3043
3044 /*
3045 * Frame reception handling. This is called if there's a frame
3046 * on the receive return list.
3047 *
3048 * Note: we have to be able to handle two possibilities here:
3049 * 1) the frame is from the jumbo receive ring
3050 * 2) the frame is from the standard receive ring
3051 */
3052
3053 static int
3054 bge_rxeof(struct bge_softc *sc)
3055 {
3056 struct ifnet *ifp;
3057 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
3058 uint16_t rx_prod, rx_cons;
3059
3060 BGE_LOCK_ASSERT(sc);
3061 rx_cons = sc->bge_rx_saved_considx;
3062 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
3063
3064 /* Nothing to do. */
3065 if (rx_cons == rx_prod)
3066 return (rx_npkts);
3067
3068 ifp = sc->bge_ifp;
3069
3070 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3071 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3072 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3073 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
3074 if (BGE_IS_JUMBO_CAPABLE(sc))
3075 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3076 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
3077
3078 while (rx_cons != rx_prod) {
3079 struct bge_rx_bd *cur_rx;
3080 uint32_t rxidx;
3081 struct mbuf *m = NULL;
3082 uint16_t vlan_tag = 0;
3083 int have_tag = 0;
3084
3085 #ifdef DEVICE_POLLING
3086 if (ifp->if_capenable & IFCAP_POLLING) {
3087 if (sc->rxcycles <= 0)
3088 break;
3089 sc->rxcycles--;
3090 }
3091 #endif
3092
3093 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3094
3095 rxidx = cur_rx->bge_idx;
3096 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3097
3098 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3099 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3100 have_tag = 1;
3101 vlan_tag = cur_rx->bge_vlan_tag;
3102 }
3103
3104 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3105 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3106 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
3107 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
3108 BUS_DMASYNC_POSTREAD);
3109 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
3110 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
3111 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3112 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3113 jumbocnt++;
3114 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3115 ifp->if_ierrors++;
3116 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3117 continue;
3118 }
3119 if (bge_newbuf_jumbo(sc,
3120 sc->bge_jumbo, NULL) == ENOBUFS) {
3121 ifp->if_ierrors++;
3122 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3123 continue;
3124 }
3125 } else {
3126 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3127 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
3128 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
3129 BUS_DMASYNC_POSTREAD);
3130 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
3131 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
3132 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3133 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3134 stdcnt++;
3135 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3136 ifp->if_ierrors++;
3137 bge_newbuf_std(sc, sc->bge_std, m);
3138 continue;
3139 }
3140 if (bge_newbuf_std(sc, sc->bge_std,
3141 NULL) == ENOBUFS) {
3142 ifp->if_ierrors++;
3143 bge_newbuf_std(sc, sc->bge_std, m);
3144 continue;
3145 }
3146 }
3147
3148 ifp->if_ipackets++;
3149 #ifndef __NO_STRICT_ALIGNMENT
3150 /*
3151 * For architectures with strict alignment we must make sure
3152 * the payload is aligned.
3153 */
3154 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3155 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3156 cur_rx->bge_len);
3157 m->m_data += ETHER_ALIGN;
3158 }
3159 #endif
3160 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3161 m->m_pkthdr.rcvif = ifp;
3162
3163 if (ifp->if_capenable & IFCAP_RXCSUM) {
3164 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
3165 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3166 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
3167 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3168 }
3169 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3170 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
3171 m->m_pkthdr.csum_data =
3172 cur_rx->bge_tcp_udp_csum;
3173 m->m_pkthdr.csum_flags |=
3174 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3175 }
3176 }
3177
3178 /*
3179 * If we received a packet with a vlan tag,
3180 * attach that information to the packet.
3181 */
3182 if (have_tag) {
3183 #if __FreeBSD_version > 700022
3184 m->m_pkthdr.ether_vtag = vlan_tag;
3185 m->m_flags |= M_VLANTAG;
3186 #else
3187 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
3188 if (m == NULL)
3189 continue;
3190 #endif
3191 }
3192
3193 BGE_UNLOCK(sc);
3194 (*ifp->if_input)(ifp, m);
3195 BGE_LOCK(sc);
3196 rx_npkts++;
3197
3198 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3199 return (rx_npkts);
3200 }
3201
3202 if (stdcnt > 0)
3203 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3204 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3205
3206 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
3207 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3208 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3209
3210 sc->bge_rx_saved_considx = rx_cons;
3211 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3212 if (stdcnt)
3213 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3214 if (jumbocnt)
3215 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3216 #ifdef notyet
3217 /*
3218 * This register wraps very quickly under heavy packet drops.
3219 * If you need correct statistics, you can enable this check.
3220 */
3221 if (BGE_IS_5705_PLUS(sc))
3222 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3223 #endif
3224 return (rx_npkts);
3225 }
3226
3227 static void
3228 bge_txeof(struct bge_softc *sc)
3229 {
3230 struct bge_tx_bd *cur_tx = NULL;
3231 struct ifnet *ifp;
3232
3233 BGE_LOCK_ASSERT(sc);
3234
3235 /* Nothing to do. */
3236 if (sc->bge_tx_saved_considx ==
3237 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
3238 return;
3239
3240 ifp = sc->bge_ifp;
3241
3242 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
3243 sc->bge_cdata.bge_tx_ring_map,
3244 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3245 /*
3246 * Go through our tx ring and free mbufs for those
3247 * frames that have been sent.
3248 */
3249 while (sc->bge_tx_saved_considx !=
3250 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
3251 uint32_t idx = 0;
3252
3253 idx = sc->bge_tx_saved_considx;
3254 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
3255 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3256 ifp->if_opackets++;
3257 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
3258 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
3259 sc->bge_cdata.bge_tx_dmamap[idx],
3260 BUS_DMASYNC_POSTWRITE);
3261 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
3262 sc->bge_cdata.bge_tx_dmamap[idx]);
3263 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
3264 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3265 }
3266 sc->bge_txcnt--;
3267 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3268 }
3269
3270 if (cur_tx != NULL)
3271 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3272 if (sc->bge_txcnt == 0)
3273 sc->bge_timer = 0;
3274 }
3275
3276 #ifdef DEVICE_POLLING
3277 static int
3278 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3279 {
3280 struct bge_softc *sc = ifp->if_softc;
3281 uint32_t statusword;
3282 int rx_npkts = 0;
3283
3284 BGE_LOCK(sc);
3285 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3286 BGE_UNLOCK(sc);
3287 return (rx_npkts);
3288 }
3289
3290 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3291 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3292
3293 statusword = atomic_readandclear_32(
3294 &sc->bge_ldata.bge_status_block->bge_status);
3295
3296 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3297 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3298
3299 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
3300 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3301 sc->bge_link_evt++;
3302
3303 if (cmd == POLL_AND_CHECK_STATUS)
3304 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3305 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3306 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3307 bge_link_upd(sc);
3308
3309 sc->rxcycles = count;
3310 rx_npkts = bge_rxeof(sc);
3311 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3312 BGE_UNLOCK(sc);
3313 return (rx_npkts);
3314 }
3315 bge_txeof(sc);
3316 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3317 bge_start_locked(ifp);
3318
3319 BGE_UNLOCK(sc);
3320 return (rx_npkts);
3321 }
3322 #endif /* DEVICE_POLLING */
3323
3324 static void
3325 bge_intr(void *xsc)
3326 {
3327 struct bge_softc *sc;
3328 struct ifnet *ifp;
3329 uint32_t statusword;
3330
3331 sc = xsc;
3332
3333 BGE_LOCK(sc);
3334
3335 ifp = sc->bge_ifp;
3336
3337 #ifdef DEVICE_POLLING
3338 if (ifp->if_capenable & IFCAP_POLLING) {
3339 BGE_UNLOCK(sc);
3340 return;
3341 }
3342 #endif
3343
3344 /*
3345 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
3346 * disable interrupts by writing nonzero like we used to, since with
3347 * our current organization this just gives complications and
3348 * pessimizations for re-enabling interrupts. We used to have races
3349 * instead of the necessary complications. Disabling interrupts
3350 * would just reduce the chance of a status update while we are
3351 * running (by switching to the interrupt-mode coalescence
3352 * parameters), but this chance is already very low so it is more
3353 * efficient to get another interrupt than prevent it.
3354 *
3355 * We do the ack first to ensure another interrupt if there is a
3356 * status update after the ack. We don't check for the status
3357 * changing later because it is more efficient to get another
3358 * interrupt than prevent it, not quite as above (not checking is
3359 * a smaller optimization than not toggling the interrupt enable,
3360 * since checking doesn't involve PCI accesses and toggling require
3361 * the status check). So toggling would probably be a pessimization
3362 * even with MSI. It would only be needed for using a task queue.
3363 */
3364 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3365
3366 /*
3367 * Do the mandatory PCI flush as well as get the link status.
3368 */
3369 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3370
3371 /* Make sure the descriptor ring indexes are coherent. */
3372 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3373 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3374 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3375 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3376
3377 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3378 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3379 statusword || sc->bge_link_evt)
3380 bge_link_upd(sc);
3381
3382 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3383 /* Check RX return ring producer/consumer. */
3384 bge_rxeof(sc);
3385 }
3386
3387 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3388 /* Check TX ring producer/consumer. */
3389 bge_txeof(sc);
3390 }
3391
3392 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3393 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3394 bge_start_locked(ifp);
3395
3396 BGE_UNLOCK(sc);
3397 }
3398
3399 static void
3400 bge_asf_driver_up(struct bge_softc *sc)
3401 {
3402 if (sc->bge_asf_mode & ASF_STACKUP) {
3403 /* Send ASF heartbeat aprox. every 2s */
3404 if (sc->bge_asf_count)
3405 sc->bge_asf_count --;
3406 else {
3407 sc->bge_asf_count = 5;
3408 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3409 BGE_FW_DRV_ALIVE);
3410 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3411 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3412 CSR_WRITE_4(sc, BGE_CPU_EVENT,
3413 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14));
3414 }
3415 }
3416 }
3417
3418 static void
3419 bge_tick(void *xsc)
3420 {
3421 struct bge_softc *sc = xsc;
3422 struct mii_data *mii = NULL;
3423
3424 BGE_LOCK_ASSERT(sc);
3425
3426 /* Synchronize with possible callout reset/stop. */
3427 if (callout_pending(&sc->bge_stat_ch) ||
3428 !callout_active(&sc->bge_stat_ch))
3429 return;
3430
3431 if (BGE_IS_5705_PLUS(sc))
3432 bge_stats_update_regs(sc);
3433 else
3434 bge_stats_update(sc);
3435
3436 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3437 mii = device_get_softc(sc->bge_miibus);
3438 /*
3439 * Do not touch PHY if we have link up. This could break
3440 * IPMI/ASF mode or produce extra input errors
3441 * (extra errors was reported for bcm5701 & bcm5704).
3442 */
3443 if (!sc->bge_link)
3444 mii_tick(mii);
3445 } else {
3446 /*
3447 * Since in TBI mode auto-polling can't be used we should poll
3448 * link status manually. Here we register pending link event
3449 * and trigger interrupt.
3450 */
3451 #ifdef DEVICE_POLLING
3452 /* In polling mode we poll link state in bge_poll(). */
3453 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3454 #endif
3455 {
3456 sc->bge_link_evt++;
3457 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3458 sc->bge_flags & BGE_FLAG_5788)
3459 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3460 else
3461 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3462 }
3463 }
3464
3465 bge_asf_driver_up(sc);
3466 bge_watchdog(sc);
3467
3468 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3469 }
3470
3471 static void
3472 bge_stats_update_regs(struct bge_softc *sc)
3473 {
3474 struct ifnet *ifp;
3475
3476 ifp = sc->bge_ifp;
3477
3478 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3479 offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3480
3481 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3482 }
3483
3484 static void
3485 bge_stats_update(struct bge_softc *sc)
3486 {
3487 struct ifnet *ifp;
3488 bus_size_t stats;
3489 uint32_t cnt; /* current register value */
3490
3491 ifp = sc->bge_ifp;
3492
3493 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3494
3495 #define READ_STAT(sc, stats, stat) \
3496 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3497
3498 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3499 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
3500 sc->bge_tx_collisions = cnt;
3501
3502 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3503 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
3504 sc->bge_rx_discards = cnt;
3505
3506 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3507 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
3508 sc->bge_tx_discards = cnt;
3509
3510 #undef READ_STAT
3511 }
3512
3513 /*
3514 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3515 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3516 * but when such padded frames employ the bge IP/TCP checksum offload,
3517 * the hardware checksum assist gives incorrect results (possibly
3518 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3519 * If we pad such runts with zeros, the onboard checksum comes out correct.
3520 */
3521 static __inline int
3522 bge_cksum_pad(struct mbuf *m)
3523 {
3524 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3525 struct mbuf *last;
3526
3527 /* If there's only the packet-header and we can pad there, use it. */
3528 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3529 M_TRAILINGSPACE(m) >= padlen) {
3530 last = m;
3531 } else {
3532 /*
3533 * Walk packet chain to find last mbuf. We will either
3534 * pad there, or append a new mbuf and pad it.
3535 */
3536 for (last = m; last->m_next != NULL; last = last->m_next);
3537 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3538 /* Allocate new empty mbuf, pad it. Compact later. */
3539 struct mbuf *n;
3540
3541 MGET(n, M_DONTWAIT, MT_DATA);
3542 if (n == NULL)
3543 return (ENOBUFS);
3544 n->m_len = 0;
3545 last->m_next = n;
3546 last = n;
3547 }
3548 }
3549
3550 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
3551 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3552 last->m_len += padlen;
3553 m->m_pkthdr.len += padlen;
3554
3555 return (0);
3556 }
3557
3558 /*
3559 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3560 * pointers to descriptors.
3561 */
3562 static int
3563 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3564 {
3565 bus_dma_segment_t segs[BGE_NSEG_NEW];
3566 bus_dmamap_t map;
3567 struct bge_tx_bd *d;
3568 struct mbuf *m = *m_head;
3569 uint32_t idx = *txidx;
3570 uint16_t csum_flags;
3571 int nsegs, i, error;
3572
3573 csum_flags = 0;
3574 if (m->m_pkthdr.csum_flags) {
3575 if (m->m_pkthdr.csum_flags & CSUM_IP)
3576 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3577 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3578 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3579 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3580 (error = bge_cksum_pad(m)) != 0) {
3581 m_freem(m);
3582 *m_head = NULL;
3583 return (error);
3584 }
3585 }
3586 if (m->m_flags & M_LASTFRAG)
3587 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3588 else if (m->m_flags & M_FRAG)
3589 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3590 }
3591
3592 map = sc->bge_cdata.bge_tx_dmamap[idx];
3593 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
3594 &nsegs, BUS_DMA_NOWAIT);
3595 if (error == EFBIG) {
3596 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
3597 if (m == NULL) {
3598 m_freem(*m_head);
3599 *m_head = NULL;
3600 return (ENOBUFS);
3601 }
3602 *m_head = m;
3603 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
3604 segs, &nsegs, BUS_DMA_NOWAIT);
3605 if (error) {
3606 m_freem(m);
3607 *m_head = NULL;
3608 return (error);
3609 }
3610 } else if (error != 0)
3611 return (error);
3612
3613 /*
3614 * Sanity check: avoid coming within 16 descriptors
3615 * of the end of the ring.
3616 */
3617 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3618 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3619 return (ENOBUFS);
3620 }
3621
3622 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3623
3624 for (i = 0; ; i++) {
3625 d = &sc->bge_ldata.bge_tx_ring[idx];
3626 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3627 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3628 d->bge_len = segs[i].ds_len;
3629 d->bge_flags = csum_flags;
3630 if (i == nsegs - 1)
3631 break;
3632 BGE_INC(idx, BGE_TX_RING_CNT);
3633 }
3634
3635 /* Mark the last segment as end of packet... */
3636 d->bge_flags |= BGE_TXBDFLAG_END;
3637
3638 /* ... and put VLAN tag into first segment. */
3639 d = &sc->bge_ldata.bge_tx_ring[*txidx];
3640 #if __FreeBSD_version > 700022
3641 if (m->m_flags & M_VLANTAG) {
3642 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3643 d->bge_vlan_tag = m->m_pkthdr.ether_vtag;
3644 } else
3645 d->bge_vlan_tag = 0;
3646 #else
3647 {
3648 struct m_tag *mtag;
3649
3650 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) {
3651 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3652 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
3653 } else
3654 d->bge_vlan_tag = 0;
3655 }
3656 #endif
3657
3658 /*
3659 * Insure that the map for this transmission
3660 * is placed at the array index of the last descriptor
3661 * in this chain.
3662 */
3663 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3664 sc->bge_cdata.bge_tx_dmamap[idx] = map;
3665 sc->bge_cdata.bge_tx_chain[idx] = m;
3666 sc->bge_txcnt += nsegs;
3667
3668 BGE_INC(idx, BGE_TX_RING_CNT);
3669 *txidx = idx;
3670
3671 return (0);
3672 }
3673
3674 /*
3675 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3676 * to the mbuf data regions directly in the transmit descriptors.
3677 */
3678 static void
3679 bge_start_locked(struct ifnet *ifp)
3680 {
3681 struct bge_softc *sc;
3682 struct mbuf *m_head = NULL;
3683 uint32_t prodidx;
3684 int count = 0;
3685
3686 sc = ifp->if_softc;
3687
3688 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3689 return;
3690
3691 prodidx = sc->bge_tx_prodidx;
3692
3693 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3694 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3695 if (m_head == NULL)
3696 break;
3697
3698 /*
3699 * XXX
3700 * The code inside the if() block is never reached since we
3701 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3702 * requests to checksum TCP/UDP in a fragmented packet.
3703 *
3704 * XXX
3705 * safety overkill. If this is a fragmented packet chain
3706 * with delayed TCP/UDP checksums, then only encapsulate
3707 * it if we have enough descriptors to handle the entire
3708 * chain at once.
3709 * (paranoia -- may not actually be needed)
3710 */
3711 if (m_head->m_flags & M_FIRSTFRAG &&
3712 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3713 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3714 m_head->m_pkthdr.csum_data + 16) {
3715 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3716 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3717 break;
3718 }
3719 }
3720
3721 /*
3722 * Pack the data into the transmit ring. If we
3723 * don't have room, set the OACTIVE flag and wait
3724 * for the NIC to drain the ring.
3725 */
3726 if (bge_encap(sc, &m_head, &prodidx)) {
3727 if (m_head == NULL)
3728 break;
3729 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3730 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3731 break;
3732 }
3733 ++count;
3734
3735 /*
3736 * If there's a BPF listener, bounce a copy of this frame
3737 * to him.
3738 */
3739 #ifdef ETHER_BPF_MTAP
3740 ETHER_BPF_MTAP(ifp, m_head);
3741 #else
3742 BPF_MTAP(ifp, m_head);
3743 #endif
3744 }
3745
3746 if (count == 0)
3747 /* No packets were dequeued. */
3748 return;
3749
3750 /* Transmit. */
3751 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3752 /* 5700 b2 errata */
3753 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3754 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3755
3756 sc->bge_tx_prodidx = prodidx;
3757
3758 /*
3759 * Set a timeout in case the chip goes out to lunch.
3760 */
3761 sc->bge_timer = 5;
3762 }
3763
3764 /*
3765 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3766 * to the mbuf data regions directly in the transmit descriptors.
3767 */
3768 static void
3769 bge_start(struct ifnet *ifp)
3770 {
3771 struct bge_softc *sc;
3772
3773 sc = ifp->if_softc;
3774 BGE_LOCK(sc);
3775 bge_start_locked(ifp);
3776 BGE_UNLOCK(sc);
3777 }
3778
3779 static void
3780 bge_init_locked(struct bge_softc *sc)
3781 {
3782 struct ifnet *ifp;
3783 uint16_t *m;
3784
3785 BGE_LOCK_ASSERT(sc);
3786
3787 ifp = sc->bge_ifp;
3788
3789 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3790 return;
3791
3792 /* Cancel pending I/O and flush buffers. */
3793 bge_stop(sc);
3794
3795 bge_stop_fw(sc);
3796 bge_sig_pre_reset(sc, BGE_RESET_START);
3797 bge_reset(sc);
3798 bge_sig_legacy(sc, BGE_RESET_START);
3799 bge_sig_post_reset(sc, BGE_RESET_START);
3800
3801 bge_chipinit(sc);
3802
3803 /*
3804 * Init the various state machines, ring
3805 * control blocks and firmware.
3806 */
3807 if (bge_blockinit(sc)) {
3808 device_printf(sc->bge_dev, "initialization failure\n");
3809 return;
3810 }
3811
3812 ifp = sc->bge_ifp;
3813
3814 /* Specify MTU. */
3815 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3816 ETHER_HDR_LEN + ETHER_CRC_LEN +
3817 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
3818
3819 /* Load our MAC address. */
3820 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3821 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3822 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3823
3824 /* Program promiscuous mode. */
3825 bge_setpromisc(sc);
3826
3827 /* Program multicast filter. */
3828 bge_setmulti(sc);
3829
3830 /* Program VLAN tag stripping. */
3831 bge_setvlan(sc);
3832
3833 /* Init RX ring. */
3834 bge_init_rx_ring_std(sc);
3835
3836 /*
3837 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3838 * memory to insure that the chip has in fact read the first
3839 * entry of the ring.
3840 */
3841 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3842 uint32_t v, i;
3843 for (i = 0; i < 10; i++) {
3844 DELAY(20);
3845 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3846 if (v == (MCLBYTES - ETHER_ALIGN))
3847 break;
3848 }
3849 if (i == 10)
3850 device_printf (sc->bge_dev,
3851 "5705 A0 chip failed to load RX ring\n");
3852 }
3853
3854 /* Init jumbo RX ring. */
3855 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3856 bge_init_rx_ring_jumbo(sc);
3857
3858 /* Init our RX return ring index. */
3859 sc->bge_rx_saved_considx = 0;
3860
3861 /* Init our RX/TX stat counters. */
3862 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
3863
3864 /* Init TX ring. */
3865 bge_init_tx_ring(sc);
3866
3867 /* Turn on transmitter. */
3868 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3869
3870 /* Turn on receiver. */
3871 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3872
3873 /* Tell firmware we're alive. */
3874 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3875
3876 #ifdef DEVICE_POLLING
3877 /* Disable interrupts if we are polling. */
3878 if (ifp->if_capenable & IFCAP_POLLING) {
3879 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3880 BGE_PCIMISCCTL_MASK_PCI_INTR);
3881 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3882 } else
3883 #endif
3884
3885 /* Enable host interrupts. */
3886 {
3887 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3888 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3889 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3890 }
3891
3892 bge_ifmedia_upd_locked(ifp);
3893
3894 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3895 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3896
3897 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3898 }
3899
3900 static void
3901 bge_init(void *xsc)
3902 {
3903 struct bge_softc *sc = xsc;
3904
3905 BGE_LOCK(sc);
3906 bge_init_locked(sc);
3907 BGE_UNLOCK(sc);
3908 }
3909
3910 /*
3911 * Set media options.
3912 */
3913 static int
3914 bge_ifmedia_upd(struct ifnet *ifp)
3915 {
3916 struct bge_softc *sc = ifp->if_softc;
3917 int res;
3918
3919 BGE_LOCK(sc);
3920 res = bge_ifmedia_upd_locked(ifp);
3921 BGE_UNLOCK(sc);
3922
3923 return (res);
3924 }
3925
3926 static int
3927 bge_ifmedia_upd_locked(struct ifnet *ifp)
3928 {
3929 struct bge_softc *sc = ifp->if_softc;
3930 struct mii_data *mii;
3931 struct mii_softc *miisc;
3932 struct ifmedia *ifm;
3933
3934 BGE_LOCK_ASSERT(sc);
3935
3936 ifm = &sc->bge_ifmedia;
3937
3938 /* If this is a 1000baseX NIC, enable the TBI port. */
3939 if (sc->bge_flags & BGE_FLAG_TBI) {
3940 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3941 return (EINVAL);
3942 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3943 case IFM_AUTO:
3944 /*
3945 * The BCM5704 ASIC appears to have a special
3946 * mechanism for programming the autoneg
3947 * advertisement registers in TBI mode.
3948 */
3949 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3950 uint32_t sgdig;
3951 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
3952 if (sgdig & BGE_SGDIGSTS_DONE) {
3953 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3954 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3955 sgdig |= BGE_SGDIGCFG_AUTO |
3956 BGE_SGDIGCFG_PAUSE_CAP |
3957 BGE_SGDIGCFG_ASYM_PAUSE;
3958 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3959 sgdig | BGE_SGDIGCFG_SEND);
3960 DELAY(5);
3961 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3962 }
3963 }
3964 break;
3965 case IFM_1000_SX:
3966 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3967 BGE_CLRBIT(sc, BGE_MAC_MODE,
3968 BGE_MACMODE_HALF_DUPLEX);
3969 } else {
3970 BGE_SETBIT(sc, BGE_MAC_MODE,
3971 BGE_MACMODE_HALF_DUPLEX);
3972 }
3973 break;
3974 default:
3975 return (EINVAL);
3976 }
3977 return (0);
3978 }
3979
3980 sc->bge_link_evt++;
3981 mii = device_get_softc(sc->bge_miibus);
3982 if (mii->mii_instance)
3983 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3984 mii_phy_reset(miisc);
3985 mii_mediachg(mii);
3986
3987 /*
3988 * Force an interrupt so that we will call bge_link_upd
3989 * if needed and clear any pending link state attention.
3990 * Without this we are not getting any further interrupts
3991 * for link state changes and thus will not UP the link and
3992 * not be able to send in bge_start_locked. The only
3993 * way to get things working was to receive a packet and
3994 * get an RX intr.
3995 * bge_tick should help for fiber cards and we might not
3996 * need to do this here if BGE_FLAG_TBI is set but as
3997 * we poll for fiber anyway it should not harm.
3998 */
3999 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4000 sc->bge_flags & BGE_FLAG_5788)
4001 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4002 else
4003 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4004
4005 return (0);
4006 }
4007
4008 /*
4009 * Report current media status.
4010 */
4011 static void
4012 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4013 {
4014 struct bge_softc *sc = ifp->if_softc;
4015 struct mii_data *mii;
4016
4017 BGE_LOCK(sc);
4018
4019 if (sc->bge_flags & BGE_FLAG_TBI) {
4020 ifmr->ifm_status = IFM_AVALID;
4021 ifmr->ifm_active = IFM_ETHER;
4022 if (CSR_READ_4(sc, BGE_MAC_STS) &
4023 BGE_MACSTAT_TBI_PCS_SYNCHED)
4024 ifmr->ifm_status |= IFM_ACTIVE;
4025 else {
4026 ifmr->ifm_active |= IFM_NONE;
4027 BGE_UNLOCK(sc);
4028 return;
4029 }
4030 ifmr->ifm_active |= IFM_1000_SX;
4031 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4032 ifmr->ifm_active |= IFM_HDX;
4033 else
4034 ifmr->ifm_active |= IFM_FDX;
4035 BGE_UNLOCK(sc);
4036 return;
4037 }
4038
4039 mii = device_get_softc(sc->bge_miibus);
4040 mii_pollstat(mii);
4041 ifmr->ifm_active = mii->mii_media_active;
4042 ifmr->ifm_status = mii->mii_media_status;
4043
4044 BGE_UNLOCK(sc);
4045 }
4046
4047 static int
4048 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4049 {
4050 struct bge_softc *sc = ifp->if_softc;
4051 struct ifreq *ifr = (struct ifreq *) data;
4052 struct mii_data *mii;
4053 int flags, mask, error = 0;
4054
4055 switch (command) {
4056 case SIOCSIFMTU:
4057 if (ifr->ifr_mtu < ETHERMIN ||
4058 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
4059 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
4060 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
4061 ifr->ifr_mtu > ETHERMTU))
4062 error = EINVAL;
4063 else if (ifp->if_mtu != ifr->ifr_mtu) {
4064 ifp->if_mtu = ifr->ifr_mtu;
4065 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4066 bge_init(sc);
4067 }
4068 break;
4069 case SIOCSIFFLAGS:
4070 BGE_LOCK(sc);
4071 if (ifp->if_flags & IFF_UP) {
4072 /*
4073 * If only the state of the PROMISC flag changed,
4074 * then just use the 'set promisc mode' command
4075 * instead of reinitializing the entire NIC. Doing
4076 * a full re-init means reloading the firmware and
4077 * waiting for it to start up, which may take a
4078 * second or two. Similarly for ALLMULTI.
4079 */
4080 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4081 flags = ifp->if_flags ^ sc->bge_if_flags;
4082 if (flags & IFF_PROMISC)
4083 bge_setpromisc(sc);
4084 if (flags & IFF_ALLMULTI)
4085 bge_setmulti(sc);
4086 } else
4087 bge_init_locked(sc);
4088 } else {
4089 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4090 bge_stop(sc);
4091 }
4092 }
4093 sc->bge_if_flags = ifp->if_flags;
4094 BGE_UNLOCK(sc);
4095 error = 0;
4096 break;
4097 case SIOCADDMULTI:
4098 case SIOCDELMULTI:
4099 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4100 BGE_LOCK(sc);
4101 bge_setmulti(sc);
4102 BGE_UNLOCK(sc);
4103 error = 0;
4104 }
4105 break;
4106 case SIOCSIFMEDIA:
4107 case SIOCGIFMEDIA:
4108 if (sc->bge_flags & BGE_FLAG_TBI) {
4109 error = ifmedia_ioctl(ifp, ifr,
4110 &sc->bge_ifmedia, command);
4111 } else {
4112 mii = device_get_softc(sc->bge_miibus);
4113 error = ifmedia_ioctl(ifp, ifr,
4114 &mii->mii_media, command);
4115 }
4116 break;
4117 case SIOCSIFCAP:
4118 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4119 #ifdef DEVICE_POLLING
4120 if (mask & IFCAP_POLLING) {
4121 if (ifr->ifr_reqcap & IFCAP_POLLING) {
4122 error = ether_poll_register(bge_poll, ifp);
4123 if (error)
4124 return (error);
4125 BGE_LOCK(sc);
4126 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
4127 BGE_PCIMISCCTL_MASK_PCI_INTR);
4128 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4129 ifp->if_capenable |= IFCAP_POLLING;
4130 BGE_UNLOCK(sc);
4131 } else {
4132 error = ether_poll_deregister(ifp);
4133 /* Enable interrupt even in error case */
4134 BGE_LOCK(sc);
4135 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
4136 BGE_PCIMISCCTL_MASK_PCI_INTR);
4137 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4138 ifp->if_capenable &= ~IFCAP_POLLING;
4139 BGE_UNLOCK(sc);
4140 }
4141 }
4142 #endif
4143 if (mask & IFCAP_HWCSUM) {
4144 ifp->if_capenable ^= IFCAP_HWCSUM;
4145 if (IFCAP_HWCSUM & ifp->if_capenable &&
4146 IFCAP_HWCSUM & ifp->if_capabilities)
4147 ifp->if_hwassist = BGE_CSUM_FEATURES;
4148 else
4149 ifp->if_hwassist = 0;
4150 #ifdef VLAN_CAPABILITIES
4151 VLAN_CAPABILITIES(ifp);
4152 #endif
4153 }
4154
4155 if (mask & IFCAP_VLAN_MTU) {
4156 ifp->if_capenable ^= IFCAP_VLAN_MTU;
4157 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4158 bge_init(sc);
4159 }
4160
4161 if (mask & IFCAP_VLAN_HWTAGGING) {
4162 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4163 BGE_LOCK(sc);
4164 bge_setvlan(sc);
4165 BGE_UNLOCK(sc);
4166 #ifdef VLAN_CAPABILITIES
4167 VLAN_CAPABILITIES(ifp);
4168 #endif
4169 }
4170
4171 break;
4172 default:
4173 error = ether_ioctl(ifp, command, data);
4174 break;
4175 }
4176
4177 return (error);
4178 }
4179
4180 static void
4181 bge_watchdog(struct bge_softc *sc)
4182 {
4183 struct ifnet *ifp;
4184
4185 BGE_LOCK_ASSERT(sc);
4186
4187 if (sc->bge_timer == 0 || --sc->bge_timer)
4188 return;
4189
4190 ifp = sc->bge_ifp;
4191
4192 if_printf(ifp, "watchdog timeout -- resetting\n");
4193
4194 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4195 bge_init_locked(sc);
4196
4197 ifp->if_oerrors++;
4198 }
4199
4200 /*
4201 * Stop the adapter and free any mbufs allocated to the
4202 * RX and TX lists.
4203 */
4204 static void
4205 bge_stop(struct bge_softc *sc)
4206 {
4207 struct ifnet *ifp;
4208 struct ifmedia_entry *ifm;
4209 struct mii_data *mii = NULL;
4210 int mtmp, itmp;
4211
4212 BGE_LOCK_ASSERT(sc);
4213
4214 ifp = sc->bge_ifp;
4215
4216 if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
4217 mii = device_get_softc(sc->bge_miibus);
4218
4219 callout_stop(&sc->bge_stat_ch);
4220
4221 /*
4222 * Disable all of the receiver blocks.
4223 */
4224 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4225 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4226 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4227 if (!(BGE_IS_5705_PLUS(sc)))
4228 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4229 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4230 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4231 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4232
4233 /*
4234 * Disable all of the transmit blocks.
4235 */
4236 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4237 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4238 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4239 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4240 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4241 if (!(BGE_IS_5705_PLUS(sc)))
4242 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4243 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4244
4245 /*
4246 * Shut down all of the memory managers and related
4247 * state machines.
4248 */
4249 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4250 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4251 if (!(BGE_IS_5705_PLUS(sc)))
4252 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4253 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4254 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4255 if (!(BGE_IS_5705_PLUS(sc))) {
4256 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4257 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4258 }
4259
4260 /* Disable host interrupts. */
4261 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4262 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4263
4264 /*
4265 * Tell firmware we're shutting down.
4266 */
4267
4268 bge_stop_fw(sc);
4269 bge_sig_pre_reset(sc, BGE_RESET_STOP);
4270 bge_reset(sc);
4271 bge_sig_legacy(sc, BGE_RESET_STOP);
4272 bge_sig_post_reset(sc, BGE_RESET_STOP);
4273
4274 /*
4275 * Keep the ASF firmware running if up.
4276 */
4277 if (sc->bge_asf_mode & ASF_STACKUP)
4278 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4279 else
4280 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4281
4282 /* Free the RX lists. */
4283 bge_free_rx_ring_std(sc);
4284
4285 /* Free jumbo RX list. */
4286 if (BGE_IS_JUMBO_CAPABLE(sc))
4287 bge_free_rx_ring_jumbo(sc);
4288
4289 /* Free TX buffers. */
4290 bge_free_tx_ring(sc);
4291
4292 /*
4293 * Isolate/power down the PHY, but leave the media selection
4294 * unchanged so that things will be put back to normal when
4295 * we bring the interface back up.
4296 */
4297 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4298 itmp = ifp->if_flags;
4299 ifp->if_flags |= IFF_UP;
4300 /*
4301 * If we are called from bge_detach(), mii is already NULL.
4302 */
4303 if (mii != NULL) {
4304 ifm = mii->mii_media.ifm_cur;
4305 mtmp = ifm->ifm_media;
4306 ifm->ifm_media = IFM_ETHER | IFM_NONE;
4307 mii_mediachg(mii);
4308 ifm->ifm_media = mtmp;
4309 }
4310 ifp->if_flags = itmp;
4311 }
4312
4313 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4314
4315 /* Clear MAC's link state (PHY may still have link UP). */
4316 if (bootverbose && sc->bge_link)
4317 if_printf(sc->bge_ifp, "link DOWN\n");
4318 sc->bge_link = 0;
4319
4320 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4321 }
4322
4323 /*
4324 * Stop all chip I/O so that the kernel's probe routines don't
4325 * get confused by errant DMAs when rebooting.
4326 */
4327 static int
4328 bge_shutdown(device_t dev)
4329 {
4330 struct bge_softc *sc;
4331
4332 sc = device_get_softc(dev);
4333 BGE_LOCK(sc);
4334 bge_stop(sc);
4335 bge_reset(sc);
4336 BGE_UNLOCK(sc);
4337
4338 return (0);
4339 }
4340
4341 static int
4342 bge_suspend(device_t dev)
4343 {
4344 struct bge_softc *sc;
4345
4346 sc = device_get_softc(dev);
4347 BGE_LOCK(sc);
4348 bge_stop(sc);
4349 BGE_UNLOCK(sc);
4350
4351 return (0);
4352 }
4353
4354 static int
4355 bge_resume(device_t dev)
4356 {
4357 struct bge_softc *sc;
4358 struct ifnet *ifp;
4359
4360 sc = device_get_softc(dev);
4361 BGE_LOCK(sc);
4362 ifp = sc->bge_ifp;
4363 if (ifp->if_flags & IFF_UP) {
4364 bge_init_locked(sc);
4365 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4366 bge_start_locked(ifp);
4367 }
4368 BGE_UNLOCK(sc);
4369
4370 return (0);
4371 }
4372
4373 static void
4374 bge_link_upd(struct bge_softc *sc)
4375 {
4376 struct mii_data *mii;
4377 uint32_t link, status;
4378
4379 BGE_LOCK_ASSERT(sc);
4380
4381 /* Clear 'pending link event' flag. */
4382 sc->bge_link_evt = 0;
4383
4384 /*
4385 * Process link state changes.
4386 * Grrr. The link status word in the status block does
4387 * not work correctly on the BCM5700 rev AX and BX chips,
4388 * according to all available information. Hence, we have
4389 * to enable MII interrupts in order to properly obtain
4390 * async link changes. Unfortunately, this also means that
4391 * we have to read the MAC status register to detect link
4392 * changes, thereby adding an additional register access to
4393 * the interrupt handler.
4394 *
4395 * XXX: perhaps link state detection procedure used for
4396 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4397 */
4398
4399 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4400 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
4401 status = CSR_READ_4(sc, BGE_MAC_STS);
4402 if (status & BGE_MACSTAT_MI_INTERRUPT) {
4403 mii = device_get_softc(sc->bge_miibus);
4404 mii_pollstat(mii);
4405 if (!sc->bge_link &&
4406 mii->mii_media_status & IFM_ACTIVE &&
4407 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4408 sc->bge_link++;
4409 if (bootverbose)
4410 if_printf(sc->bge_ifp, "link UP\n");
4411 } else if (sc->bge_link &&
4412 (!(mii->mii_media_status & IFM_ACTIVE) ||
4413 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4414 sc->bge_link = 0;
4415 if (bootverbose)
4416 if_printf(sc->bge_ifp, "link DOWN\n");
4417 }
4418
4419 /* Clear the interrupt. */
4420 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4421 BGE_EVTENB_MI_INTERRUPT);
4422 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4423 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4424 BRGPHY_INTRS);
4425 }
4426 return;
4427 }
4428
4429 if (sc->bge_flags & BGE_FLAG_TBI) {
4430 status = CSR_READ_4(sc, BGE_MAC_STS);
4431 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4432 if (!sc->bge_link) {
4433 sc->bge_link++;
4434 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
4435 BGE_CLRBIT(sc, BGE_MAC_MODE,
4436 BGE_MACMODE_TBI_SEND_CFGS);
4437 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4438 if (bootverbose)
4439 if_printf(sc->bge_ifp, "link UP\n");
4440 if_link_state_change(sc->bge_ifp,
4441 LINK_STATE_UP);
4442 }
4443 } else if (sc->bge_link) {
4444 sc->bge_link = 0;
4445 if (bootverbose)
4446 if_printf(sc->bge_ifp, "link DOWN\n");
4447 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
4448 }
4449 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
4450 /*
4451 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4452 * in status word always set. Workaround this bug by reading
4453 * PHY link status directly.
4454 */
4455 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
4456
4457 if (link != sc->bge_link ||
4458 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
4459 mii = device_get_softc(sc->bge_miibus);
4460 mii_pollstat(mii);
4461 if (!sc->bge_link &&
4462 mii->mii_media_status & IFM_ACTIVE &&
4463 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4464 sc->bge_link++;
4465 if (bootverbose)
4466 if_printf(sc->bge_ifp, "link UP\n");
4467 } else if (sc->bge_link &&
4468 (!(mii->mii_media_status & IFM_ACTIVE) ||
4469 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4470 sc->bge_link = 0;
4471 if (bootverbose)
4472 if_printf(sc->bge_ifp, "link DOWN\n");
4473 }
4474 }
4475 } else {
4476 /*
4477 * Discard link events for MII/GMII controllers
4478 * if MI auto-polling is disabled.
4479 */
4480 }
4481
4482 /* Clear the attention. */
4483 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
4484 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
4485 BGE_MACSTAT_LINK_CHANGED);
4486 }
4487
4488 #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
4489 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
4490 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
4491 desc)
4492
4493 static void
4494 bge_add_sysctls(struct bge_softc *sc)
4495 {
4496 struct sysctl_ctx_list *ctx;
4497 struct sysctl_oid_list *children, *schildren;
4498 struct sysctl_oid *tree;
4499
4500 ctx = device_get_sysctl_ctx(sc->bge_dev);
4501 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
4502
4503 #ifdef BGE_REGISTER_DEBUG
4504 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
4505 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
4506 "Debug Information");
4507
4508 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
4509 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
4510 "Register Read");
4511
4512 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
4513 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
4514 "Memory Read");
4515
4516 #endif
4517
4518 if (BGE_IS_5705_PLUS(sc))
4519 return;
4520
4521 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4522 NULL, "BGE Statistics");
4523 schildren = children = SYSCTL_CHILDREN(tree);
4524 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
4525 children, COSFramesDroppedDueToFilters,
4526 "FramesDroppedDueToFilters");
4527 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
4528 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
4529 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
4530 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
4531 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
4532 children, nicNoMoreRxBDs, "NoMoreRxBDs");
4533 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
4534 children, ifInDiscards, "InputDiscards");
4535 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
4536 children, ifInErrors, "InputErrors");
4537 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
4538 children, nicRecvThresholdHit, "RecvThresholdHit");
4539 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
4540 children, nicDmaReadQueueFull, "DmaReadQueueFull");
4541 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
4542 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
4543 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
4544 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
4545 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
4546 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
4547 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
4548 children, nicRingStatusUpdate, "RingStatusUpdate");
4549 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
4550 children, nicInterrupts, "Interrupts");
4551 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
4552 children, nicAvoidedInterrupts, "AvoidedInterrupts");
4553 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
4554 children, nicSendThresholdHit, "SendThresholdHit");
4555
4556 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
4557 NULL, "BGE RX Statistics");
4558 children = SYSCTL_CHILDREN(tree);
4559 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
4560 children, rxstats.ifHCInOctets, "Octets");
4561 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
4562 children, rxstats.etherStatsFragments, "Fragments");
4563 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
4564 children, rxstats.ifHCInUcastPkts, "UcastPkts");
4565 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
4566 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
4567 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
4568 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
4569 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
4570 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
4571 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
4572 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
4573 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
4574 children, rxstats.xoffPauseFramesReceived,
4575 "xoffPauseFramesReceived");
4576 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
4577 children, rxstats.macControlFramesReceived,
4578 "ControlFramesReceived");
4579 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
4580 children, rxstats.xoffStateEntered, "xoffStateEntered");
4581 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
4582 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
4583 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
4584 children, rxstats.etherStatsJabbers, "Jabbers");
4585 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
4586 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
4587 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
4588 children, rxstats.inRangeLengthError, "inRangeLengthError");
4589 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
4590 children, rxstats.outRangeLengthError, "outRangeLengthError");
4591
4592 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
4593 NULL, "BGE TX Statistics");
4594 children = SYSCTL_CHILDREN(tree);
4595 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
4596 children, txstats.ifHCOutOctets, "Octets");
4597 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
4598 children, txstats.etherStatsCollisions, "Collisions");
4599 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
4600 children, txstats.outXonSent, "XonSent");
4601 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
4602 children, txstats.outXoffSent, "XoffSent");
4603 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
4604 children, txstats.flowControlDone, "flowControlDone");
4605 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
4606 children, txstats.dot3StatsInternalMacTransmitErrors,
4607 "InternalMacTransmitErrors");
4608 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
4609 children, txstats.dot3StatsSingleCollisionFrames,
4610 "SingleCollisionFrames");
4611 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
4612 children, txstats.dot3StatsMultipleCollisionFrames,
4613 "MultipleCollisionFrames");
4614 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
4615 children, txstats.dot3StatsDeferredTransmissions,
4616 "DeferredTransmissions");
4617 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
4618 children, txstats.dot3StatsExcessiveCollisions,
4619 "ExcessiveCollisions");
4620 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
4621 children, txstats.dot3StatsLateCollisions,
4622 "LateCollisions");
4623 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
4624 children, txstats.ifHCOutUcastPkts, "UcastPkts");
4625 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
4626 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
4627 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
4628 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
4629 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
4630 children, txstats.dot3StatsCarrierSenseErrors,
4631 "CarrierSenseErrors");
4632 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
4633 children, txstats.ifOutDiscards, "Discards");
4634 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
4635 children, txstats.ifOutErrors, "Errors");
4636 }
4637
4638 static int
4639 bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
4640 {
4641 struct bge_softc *sc;
4642 uint32_t result;
4643 int offset;
4644
4645 sc = (struct bge_softc *)arg1;
4646 offset = arg2;
4647 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
4648 offsetof(bge_hostaddr, bge_addr_lo));
4649 return (sysctl_handle_int(oidp, &result, 0, req));
4650 }
4651
4652 #ifdef BGE_REGISTER_DEBUG
4653 static int
4654 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4655 {
4656 struct bge_softc *sc;
4657 uint16_t *sbdata;
4658 int error;
4659 int result;
4660 int i, j;
4661
4662 result = -1;
4663 error = sysctl_handle_int(oidp, &result, 0, req);
4664 if (error || (req->newptr == NULL))
4665 return (error);
4666
4667 if (result == 1) {
4668 sc = (struct bge_softc *)arg1;
4669
4670 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
4671 printf("Status Block:\n");
4672 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
4673 printf("%06x:", i);
4674 for (j = 0; j < 8; j++) {
4675 printf(" %04x", sbdata[i]);
4676 i += 4;
4677 }
4678 printf("\n");
4679 }
4680
4681 printf("Registers:\n");
4682 for (i = 0x800; i < 0xA00; ) {
4683 printf("%06x:", i);
4684 for (j = 0; j < 8; j++) {
4685 printf(" %08x", CSR_READ_4(sc, i));
4686 i += 4;
4687 }
4688 printf("\n");
4689 }
4690
4691 printf("Hardware Flags:\n");
4692 if (BGE_IS_575X_PLUS(sc))
4693 printf(" - 575X Plus\n");
4694 if (BGE_IS_5705_PLUS(sc))
4695 printf(" - 5705 Plus\n");
4696 if (BGE_IS_5714_FAMILY(sc))
4697 printf(" - 5714 Family\n");
4698 if (BGE_IS_5700_FAMILY(sc))
4699 printf(" - 5700 Family\n");
4700 if (sc->bge_flags & BGE_FLAG_JUMBO)
4701 printf(" - Supports Jumbo Frames\n");
4702 if (sc->bge_flags & BGE_FLAG_PCIX)
4703 printf(" - PCI-X Bus\n");
4704 if (sc->bge_flags & BGE_FLAG_PCIE)
4705 printf(" - PCI Express Bus\n");
4706 if (sc->bge_flags & BGE_FLAG_NO_3LED)
4707 printf(" - No 3 LEDs\n");
4708 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
4709 printf(" - RX Alignment Bug\n");
4710 }
4711
4712 return (error);
4713 }
4714
4715 static int
4716 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
4717 {
4718 struct bge_softc *sc;
4719 int error;
4720 uint16_t result;
4721 uint32_t val;
4722
4723 result = -1;
4724 error = sysctl_handle_int(oidp, &result, 0, req);
4725 if (error || (req->newptr == NULL))
4726 return (error);
4727
4728 if (result < 0x8000) {
4729 sc = (struct bge_softc *)arg1;
4730 val = CSR_READ_4(sc, result);
4731 printf("reg 0x%06X = 0x%08X\n", result, val);
4732 }
4733
4734 return (error);
4735 }
4736
4737 static int
4738 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
4739 {
4740 struct bge_softc *sc;
4741 int error;
4742 uint16_t result;
4743 uint32_t val;
4744
4745 result = -1;
4746 error = sysctl_handle_int(oidp, &result, 0, req);
4747 if (error || (req->newptr == NULL))
4748 return (error);
4749
4750 if (result < 0x8000) {
4751 sc = (struct bge_softc *)arg1;
4752 val = bge_readmem_ind(sc, result);
4753 printf("mem 0x%06X = 0x%08X\n", result, val);
4754 }
4755
4756 return (error);
4757 }
4758 #endif
4759
4760 static int
4761 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
4762 {
4763
4764 if (sc->bge_flags & BGE_FLAG_EADDR)
4765 return (1);
4766
4767 #ifdef __sparc64__
4768 OF_getetheraddr(sc->bge_dev, ether_addr);
4769 return (0);
4770 #endif
4771 return (1);
4772 }
4773
4774 static int
4775 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
4776 {
4777 uint32_t mac_addr;
4778
4779 mac_addr = bge_readmem_ind(sc, 0x0c14);
4780 if ((mac_addr >> 16) == 0x484b) {
4781 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4782 ether_addr[1] = (uint8_t)mac_addr;
4783 mac_addr = bge_readmem_ind(sc, 0x0c18);
4784 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4785 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4786 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4787 ether_addr[5] = (uint8_t)mac_addr;
4788 return (0);
4789 }
4790 return (1);
4791 }
4792
4793 static int
4794 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
4795 {
4796 int mac_offset = BGE_EE_MAC_OFFSET;
4797
4798 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4799 mac_offset = BGE_EE_MAC_OFFSET_5906;
4800
4801 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
4802 ETHER_ADDR_LEN));
4803 }
4804
4805 static int
4806 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
4807 {
4808
4809 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
4810 return (1);
4811
4812 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4813 ETHER_ADDR_LEN));
4814 }
4815
4816 static int
4817 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
4818 {
4819 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
4820 /* NOTE: Order is critical */
4821 bge_get_eaddr_fw,
4822 bge_get_eaddr_mem,
4823 bge_get_eaddr_nvram,
4824 bge_get_eaddr_eeprom,
4825 NULL
4826 };
4827 const bge_eaddr_fcn_t *func;
4828
4829 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
4830 if ((*func)(sc, eaddr) == 0)
4831 break;
4832 }
4833 return (*func == NULL ? ENXIO : 0);
4834 }
Cache object: 30e3934b586c7679fb62a953889ec4c5
|