FreeBSD/Linux Kernel Cross Reference
sys/dev/bge/if_bge.c
1 /*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 /*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
83 #include <sys/taskqueue.h>
84
85 #include <net/if.h>
86 #include <net/if_arp.h>
87 #include <net/ethernet.h>
88 #include <net/if_dl.h>
89 #include <net/if_media.h>
90
91 #include <net/bpf.h>
92
93 #include <net/if_types.h>
94 #include <net/if_vlan_var.h>
95
96 #include <netinet/in_systm.h>
97 #include <netinet/in.h>
98 #include <netinet/ip.h>
99 #include <netinet/tcp.h>
100
101 #include <machine/bus.h>
102 #include <machine/resource.h>
103 #include <sys/bus.h>
104 #include <sys/rman.h>
105
106 #include <dev/mii/mii.h>
107 #include <dev/mii/miivar.h>
108 #include "miidevs.h"
109 #include <dev/mii/brgphyreg.h>
110
111 #ifdef __sparc64__
112 #include <dev/ofw/ofw_bus.h>
113 #include <dev/ofw/openfirm.h>
114 #include <machine/ofw_machdep.h>
115 #include <machine/ver.h>
116 #endif
117
118 #include <dev/pci/pcireg.h>
119 #include <dev/pci/pcivar.h>
120
121 #include <dev/bge/if_bgereg.h>
122
123 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
124 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125
126 MODULE_DEPEND(bge, pci, 1, 1, 1);
127 MODULE_DEPEND(bge, ether, 1, 1, 1);
128 MODULE_DEPEND(bge, miibus, 1, 1, 1);
129
130 /* "device miibus" required. See GENERIC if you get errors here. */
131 #include "miibus_if.h"
132
133 /*
134 * Various supported device vendors/types and their names. Note: the
135 * spec seems to indicate that the hardware still has Alteon's vendor
136 * ID burned into it, though it will always be overriden by the vendor
137 * ID in the EEPROM. Just to be safe, we cover all possibilities.
138 */
139 static const struct bge_type {
140 uint16_t bge_vid;
141 uint16_t bge_did;
142 } const bge_devs[] = {
143 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
144 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
145
146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
147 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
148 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
149
150 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
151
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
227
228 { SK_VENDORID, SK_DEVICEID_ALTIMA },
229
230 { TC_VENDORID, TC_DEVICEID_3C996 },
231
232 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
233 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
234 { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 },
235
236 { 0, 0 }
237 };
238
239 static const struct bge_vendor {
240 uint16_t v_id;
241 const char *v_name;
242 } const bge_vendors[] = {
243 { ALTEON_VENDORID, "Alteon" },
244 { ALTIMA_VENDORID, "Altima" },
245 { APPLE_VENDORID, "Apple" },
246 { BCOM_VENDORID, "Broadcom" },
247 { SK_VENDORID, "SysKonnect" },
248 { TC_VENDORID, "3Com" },
249 { FJTSU_VENDORID, "Fujitsu" },
250
251 { 0, NULL }
252 };
253
254 static const struct bge_revision {
255 uint32_t br_chipid;
256 const char *br_name;
257 } const bge_revisions[] = {
258 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
259 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
260 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
261 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
262 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
263 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
264 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
265 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
266 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
267 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
268 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
269 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
270 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
271 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
272 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
273 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
274 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
275 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
276 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
277 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
278 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
279 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
280 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
281 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
282 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
283 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
284 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
285 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
286 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
287 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
288 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
289 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
290 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
291 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
292 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
293 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
294 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
295 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
296 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
297 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
298 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
299 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
300 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
301 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
302 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
303 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
304 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
305 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
306 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
307 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
308 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
309 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
310 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
311 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
312 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
313 /* 5754 and 5787 share the same ASIC ID */
314 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
315 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
316 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
317 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
318 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
319 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
320 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
321 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
322 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
323
324 { 0, NULL }
325 };
326
327 /*
328 * Some defaults for major revisions, so that newer steppings
329 * that we don't know about have a shot at working.
330 */
331 static const struct bge_revision const bge_majorrevs[] = {
332 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
333 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
334 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
335 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
336 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
337 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
338 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
339 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
340 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
341 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
342 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
343 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
344 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
345 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
346 /* 5754 and 5787 share the same ASIC ID */
347 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
348 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
349 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
350 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
351 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
352 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
353 { BGE_ASICREV_BCM5720, "unknown BCM5720" },
354
355 { 0, NULL }
356 };
357
358 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
359 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
360 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
361 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
362 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
363 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
364 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
365
366 const struct bge_revision * bge_lookup_rev(uint32_t);
367 const struct bge_vendor * bge_lookup_vendor(uint16_t);
368
369 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
370
371 static int bge_probe(device_t);
372 static int bge_attach(device_t);
373 static int bge_detach(device_t);
374 static int bge_suspend(device_t);
375 static int bge_resume(device_t);
376 static void bge_release_resources(struct bge_softc *);
377 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
378 static int bge_dma_alloc(struct bge_softc *);
379 static void bge_dma_free(struct bge_softc *);
380 static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
381 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
382
383 static void bge_devinfo(struct bge_softc *);
384 static int bge_mbox_reorder(struct bge_softc *);
385
386 static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
387 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
388 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
389 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
390 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
391
392 static void bge_txeof(struct bge_softc *, uint16_t);
393 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
394 static void bge_rxeof(struct bge_softc *, uint16_t, int);
395
396 static void bge_asf_driver_up (struct bge_softc *);
397 static void bge_tick(void *);
398 static void bge_stats_clear_regs(struct bge_softc *);
399 static void bge_stats_update(struct bge_softc *);
400 static void bge_stats_update_regs(struct bge_softc *);
401 static struct mbuf *bge_check_short_dma(struct mbuf *);
402 static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
403 uint16_t *, uint16_t *);
404 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
405
406 static void bge_intr(void *);
407 static int bge_msi_intr(void *);
408 static void bge_intr_task(void *, int);
409 static void bge_start_locked(struct ifnet *);
410 static void bge_start(struct ifnet *);
411 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
412 static void bge_init_locked(struct bge_softc *);
413 static void bge_init(void *);
414 static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
415 static void bge_stop(struct bge_softc *);
416 static void bge_watchdog(struct bge_softc *);
417 static int bge_shutdown(device_t);
418 static int bge_ifmedia_upd_locked(struct ifnet *);
419 static int bge_ifmedia_upd(struct ifnet *);
420 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
421
422 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
423 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
424
425 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
426 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
427
428 static void bge_setpromisc(struct bge_softc *);
429 static void bge_setmulti(struct bge_softc *);
430 static void bge_setvlan(struct bge_softc *);
431
432 static __inline void bge_rxreuse_std(struct bge_softc *, int);
433 static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
434 static int bge_newbuf_std(struct bge_softc *, int);
435 static int bge_newbuf_jumbo(struct bge_softc *, int);
436 static int bge_init_rx_ring_std(struct bge_softc *);
437 static void bge_free_rx_ring_std(struct bge_softc *);
438 static int bge_init_rx_ring_jumbo(struct bge_softc *);
439 static void bge_free_rx_ring_jumbo(struct bge_softc *);
440 static void bge_free_tx_ring(struct bge_softc *);
441 static int bge_init_tx_ring(struct bge_softc *);
442
443 static int bge_chipinit(struct bge_softc *);
444 static int bge_blockinit(struct bge_softc *);
445 static uint32_t bge_dma_swap_options(struct bge_softc *);
446
447 static int bge_has_eaddr(struct bge_softc *);
448 static uint32_t bge_readmem_ind(struct bge_softc *, int);
449 static void bge_writemem_ind(struct bge_softc *, int, int);
450 static void bge_writembx(struct bge_softc *, int, int);
451 #ifdef notdef
452 static uint32_t bge_readreg_ind(struct bge_softc *, int);
453 #endif
454 static void bge_writemem_direct(struct bge_softc *, int, int);
455 static void bge_writereg_ind(struct bge_softc *, int, int);
456
457 static int bge_miibus_readreg(device_t, int, int);
458 static int bge_miibus_writereg(device_t, int, int, int);
459 static void bge_miibus_statchg(device_t);
460 #ifdef DEVICE_POLLING
461 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
462 #endif
463
464 #define BGE_RESET_START 1
465 #define BGE_RESET_STOP 2
466 static void bge_sig_post_reset(struct bge_softc *, int);
467 static void bge_sig_legacy(struct bge_softc *, int);
468 static void bge_sig_pre_reset(struct bge_softc *, int);
469 static void bge_stop_fw(struct bge_softc *);
470 static int bge_reset(struct bge_softc *);
471 static void bge_link_upd(struct bge_softc *);
472
473 /*
474 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
475 * leak information to untrusted users. It is also known to cause alignment
476 * traps on certain architectures.
477 */
478 #ifdef BGE_REGISTER_DEBUG
479 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
480 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
481 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
482 #endif
483 static void bge_add_sysctls(struct bge_softc *);
484 static void bge_add_sysctl_stats_regs(struct bge_softc *,
485 struct sysctl_ctx_list *, struct sysctl_oid_list *);
486 static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
487 struct sysctl_oid_list *);
488 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
489
490 static device_method_t bge_methods[] = {
491 /* Device interface */
492 DEVMETHOD(device_probe, bge_probe),
493 DEVMETHOD(device_attach, bge_attach),
494 DEVMETHOD(device_detach, bge_detach),
495 DEVMETHOD(device_shutdown, bge_shutdown),
496 DEVMETHOD(device_suspend, bge_suspend),
497 DEVMETHOD(device_resume, bge_resume),
498
499 /* bus interface */
500 DEVMETHOD(bus_print_child, bus_generic_print_child),
501 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
502
503 /* MII interface */
504 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
505 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
506 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
507
508 { 0, 0 }
509 };
510
511 static driver_t bge_driver = {
512 "bge",
513 bge_methods,
514 sizeof(struct bge_softc)
515 };
516
517 static devclass_t bge_devclass;
518
519 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
520 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
521
522 static int bge_allow_asf = 0;
523
524 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
525
526 SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
527 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
528 "Allow ASF mode if available");
529
530 #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500"
531 #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2"
532 #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500"
533 #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3"
534 #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id"
535
536 static int
537 bge_has_eaddr(struct bge_softc *sc)
538 {
539 #ifdef __sparc64__
540 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)];
541 device_t dev;
542 uint32_t subvendor;
543
544 dev = sc->bge_dev;
545
546 /*
547 * The on-board BGEs found in sun4u machines aren't fitted with
548 * an EEPROM which means that we have to obtain the MAC address
549 * via OFW and that some tests will always fail. We distinguish
550 * such BGEs by the subvendor ID, which also has to be obtained
551 * from OFW instead of the PCI configuration space as the latter
552 * indicates Broadcom as the subvendor of the netboot interface.
553 * For early Blade 1500 and 2500 we even have to check the OFW
554 * device path as the subvendor ID always defaults to Broadcom
555 * there.
556 */
557 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR,
558 &subvendor, sizeof(subvendor)) == sizeof(subvendor) &&
559 (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID))
560 return (0);
561 memset(buf, 0, sizeof(buf));
562 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) {
563 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 &&
564 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0)
565 return (0);
566 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 &&
567 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0)
568 return (0);
569 }
570 #endif
571 return (1);
572 }
573
574 static uint32_t
575 bge_readmem_ind(struct bge_softc *sc, int off)
576 {
577 device_t dev;
578 uint32_t val;
579
580 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
581 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
582 return (0);
583
584 dev = sc->bge_dev;
585
586 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
587 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
588 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
589 return (val);
590 }
591
592 static void
593 bge_writemem_ind(struct bge_softc *sc, int off, int val)
594 {
595 device_t dev;
596
597 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
598 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
599 return;
600
601 dev = sc->bge_dev;
602
603 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
604 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
605 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
606 }
607
608 #ifdef notdef
609 static uint32_t
610 bge_readreg_ind(struct bge_softc *sc, int off)
611 {
612 device_t dev;
613
614 dev = sc->bge_dev;
615
616 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
617 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
618 }
619 #endif
620
621 static void
622 bge_writereg_ind(struct bge_softc *sc, int off, int val)
623 {
624 device_t dev;
625
626 dev = sc->bge_dev;
627
628 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
629 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
630 }
631
632 static void
633 bge_writemem_direct(struct bge_softc *sc, int off, int val)
634 {
635 CSR_WRITE_4(sc, off, val);
636 }
637
638 static void
639 bge_writembx(struct bge_softc *sc, int off, int val)
640 {
641 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
642 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
643
644 CSR_WRITE_4(sc, off, val);
645 if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0)
646 CSR_READ_4(sc, off);
647 }
648
649 /*
650 * Map a single buffer address.
651 */
652
653 static void
654 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
655 {
656 struct bge_dmamap_arg *ctx;
657
658 if (error)
659 return;
660
661 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
662
663 ctx = arg;
664 ctx->bge_busaddr = segs->ds_addr;
665 }
666
667 static uint8_t
668 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
669 {
670 uint32_t access, byte = 0;
671 int i;
672
673 /* Lock. */
674 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
675 for (i = 0; i < 8000; i++) {
676 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
677 break;
678 DELAY(20);
679 }
680 if (i == 8000)
681 return (1);
682
683 /* Enable access. */
684 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
685 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
686
687 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
688 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
689 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
690 DELAY(10);
691 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
692 DELAY(10);
693 break;
694 }
695 }
696
697 if (i == BGE_TIMEOUT * 10) {
698 if_printf(sc->bge_ifp, "nvram read timed out\n");
699 return (1);
700 }
701
702 /* Get result. */
703 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
704
705 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
706
707 /* Disable access. */
708 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
709
710 /* Unlock. */
711 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
712 CSR_READ_4(sc, BGE_NVRAM_SWARB);
713
714 return (0);
715 }
716
717 /*
718 * Read a sequence of bytes from NVRAM.
719 */
720 static int
721 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
722 {
723 int err = 0, i;
724 uint8_t byte = 0;
725
726 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
727 return (1);
728
729 for (i = 0; i < cnt; i++) {
730 err = bge_nvram_getbyte(sc, off + i, &byte);
731 if (err)
732 break;
733 *(dest + i) = byte;
734 }
735
736 return (err ? 1 : 0);
737 }
738
739 /*
740 * Read a byte of data stored in the EEPROM at address 'addr.' The
741 * BCM570x supports both the traditional bitbang interface and an
742 * auto access interface for reading the EEPROM. We use the auto
743 * access method.
744 */
745 static uint8_t
746 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
747 {
748 int i;
749 uint32_t byte = 0;
750
751 /*
752 * Enable use of auto EEPROM access so we can avoid
753 * having to use the bitbang method.
754 */
755 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
756
757 /* Reset the EEPROM, load the clock period. */
758 CSR_WRITE_4(sc, BGE_EE_ADDR,
759 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
760 DELAY(20);
761
762 /* Issue the read EEPROM command. */
763 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
764
765 /* Wait for completion */
766 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
767 DELAY(10);
768 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
769 break;
770 }
771
772 if (i == BGE_TIMEOUT * 10) {
773 device_printf(sc->bge_dev, "EEPROM read timed out\n");
774 return (1);
775 }
776
777 /* Get result. */
778 byte = CSR_READ_4(sc, BGE_EE_DATA);
779
780 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
781
782 return (0);
783 }
784
785 /*
786 * Read a sequence of bytes from the EEPROM.
787 */
788 static int
789 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
790 {
791 int i, error = 0;
792 uint8_t byte = 0;
793
794 for (i = 0; i < cnt; i++) {
795 error = bge_eeprom_getbyte(sc, off + i, &byte);
796 if (error)
797 break;
798 *(dest + i) = byte;
799 }
800
801 return (error ? 1 : 0);
802 }
803
804 static int
805 bge_miibus_readreg(device_t dev, int phy, int reg)
806 {
807 struct bge_softc *sc;
808 uint32_t val;
809 int i;
810
811 sc = device_get_softc(dev);
812
813 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
814 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
815 CSR_WRITE_4(sc, BGE_MI_MODE,
816 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
817 DELAY(80);
818 }
819
820 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
821 BGE_MIPHY(phy) | BGE_MIREG(reg));
822
823 /* Poll for the PHY register access to complete. */
824 for (i = 0; i < BGE_TIMEOUT; i++) {
825 DELAY(10);
826 val = CSR_READ_4(sc, BGE_MI_COMM);
827 if ((val & BGE_MICOMM_BUSY) == 0) {
828 DELAY(5);
829 val = CSR_READ_4(sc, BGE_MI_COMM);
830 break;
831 }
832 }
833
834 if (i == BGE_TIMEOUT) {
835 device_printf(sc->bge_dev,
836 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
837 phy, reg, val);
838 val = 0;
839 }
840
841 /* Restore the autopoll bit if necessary. */
842 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
843 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
844 DELAY(80);
845 }
846
847 if (val & BGE_MICOMM_READFAIL)
848 return (0);
849
850 return (val & 0xFFFF);
851 }
852
853 static int
854 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
855 {
856 struct bge_softc *sc;
857 int i;
858
859 sc = device_get_softc(dev);
860
861 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
862 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
863 return (0);
864
865 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
866 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
867 CSR_WRITE_4(sc, BGE_MI_MODE,
868 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
869 DELAY(80);
870 }
871
872 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
873 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
874
875 for (i = 0; i < BGE_TIMEOUT; i++) {
876 DELAY(10);
877 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
878 DELAY(5);
879 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
880 break;
881 }
882 }
883
884 /* Restore the autopoll bit if necessary. */
885 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
886 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
887 DELAY(80);
888 }
889
890 if (i == BGE_TIMEOUT)
891 device_printf(sc->bge_dev,
892 "PHY write timed out (phy %d, reg %d, val %d)\n",
893 phy, reg, val);
894
895 return (0);
896 }
897
898 static void
899 bge_miibus_statchg(device_t dev)
900 {
901 struct bge_softc *sc;
902 struct mii_data *mii;
903 sc = device_get_softc(dev);
904 mii = device_get_softc(sc->bge_miibus);
905
906 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
907 (IFM_ACTIVE | IFM_AVALID)) {
908 switch (IFM_SUBTYPE(mii->mii_media_active)) {
909 case IFM_10_T:
910 case IFM_100_TX:
911 sc->bge_link = 1;
912 break;
913 case IFM_1000_T:
914 case IFM_1000_SX:
915 case IFM_2500_SX:
916 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
917 sc->bge_link = 1;
918 else
919 sc->bge_link = 0;
920 break;
921 default:
922 sc->bge_link = 0;
923 break;
924 }
925 } else
926 sc->bge_link = 0;
927 if (sc->bge_link == 0)
928 return;
929 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
930 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
931 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
932 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
933 else
934 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
935
936 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
937 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
938 if ((IFM_OPTIONS(mii->mii_media_active) &
939 IFM_ETH_TXPAUSE) != 0)
940 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
941 else
942 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
943 if ((IFM_OPTIONS(mii->mii_media_active) &
944 IFM_ETH_RXPAUSE) != 0)
945 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
946 else
947 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
948 } else {
949 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
950 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
951 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
952 }
953 }
954
955 /*
956 * Intialize a standard receive ring descriptor.
957 */
958 static int
959 bge_newbuf_std(struct bge_softc *sc, int i)
960 {
961 struct mbuf *m;
962 struct bge_rx_bd *r;
963 bus_dma_segment_t segs[1];
964 bus_dmamap_t map;
965 int error, nsegs;
966
967 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
968 (sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
969 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
970 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
971 if (m == NULL)
972 return (ENOBUFS);
973 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
974 } else {
975 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
976 if (m == NULL)
977 return (ENOBUFS);
978 m->m_len = m->m_pkthdr.len = MCLBYTES;
979 }
980 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
981 m_adj(m, ETHER_ALIGN);
982
983 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
984 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
985 if (error != 0) {
986 m_freem(m);
987 return (error);
988 }
989 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
990 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
991 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
992 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
993 sc->bge_cdata.bge_rx_std_dmamap[i]);
994 }
995 map = sc->bge_cdata.bge_rx_std_dmamap[i];
996 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
997 sc->bge_cdata.bge_rx_std_sparemap = map;
998 sc->bge_cdata.bge_rx_std_chain[i] = m;
999 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
1000 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
1001 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1002 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1003 r->bge_flags = BGE_RXBDFLAG_END;
1004 r->bge_len = segs[0].ds_len;
1005 r->bge_idx = i;
1006
1007 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1008 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1009
1010 return (0);
1011 }
1012
1013 /*
1014 * Initialize a jumbo receive ring descriptor. This allocates
1015 * a jumbo buffer from the pool managed internally by the driver.
1016 */
1017 static int
1018 bge_newbuf_jumbo(struct bge_softc *sc, int i)
1019 {
1020 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1021 bus_dmamap_t map;
1022 struct bge_extrx_bd *r;
1023 struct mbuf *m;
1024 int error, nsegs;
1025
1026 MGETHDR(m, M_DONTWAIT, MT_DATA);
1027 if (m == NULL)
1028 return (ENOBUFS);
1029
1030 m_cljget(m, M_DONTWAIT, MJUM9BYTES);
1031 if (!(m->m_flags & M_EXT)) {
1032 m_freem(m);
1033 return (ENOBUFS);
1034 }
1035 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1036 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1037 m_adj(m, ETHER_ALIGN);
1038
1039 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1040 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1041 if (error != 0) {
1042 m_freem(m);
1043 return (error);
1044 }
1045
1046 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1047 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1048 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1049 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1050 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1051 }
1052 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1053 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1054 sc->bge_cdata.bge_rx_jumbo_sparemap;
1055 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1056 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1057 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1058 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1059 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1060 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1061
1062 /*
1063 * Fill in the extended RX buffer descriptor.
1064 */
1065 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1066 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1067 r->bge_idx = i;
1068 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1069 switch (nsegs) {
1070 case 4:
1071 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1072 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1073 r->bge_len3 = segs[3].ds_len;
1074 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1075 case 3:
1076 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1077 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1078 r->bge_len2 = segs[2].ds_len;
1079 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1080 case 2:
1081 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1082 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1083 r->bge_len1 = segs[1].ds_len;
1084 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1085 case 1:
1086 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1087 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1088 r->bge_len0 = segs[0].ds_len;
1089 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1090 break;
1091 default:
1092 panic("%s: %d segments\n", __func__, nsegs);
1093 }
1094
1095 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1096 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1097
1098 return (0);
1099 }
1100
1101 static int
1102 bge_init_rx_ring_std(struct bge_softc *sc)
1103 {
1104 int error, i;
1105
1106 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1107 sc->bge_std = 0;
1108 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1109 if ((error = bge_newbuf_std(sc, i)) != 0)
1110 return (error);
1111 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1112 }
1113
1114 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1115 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1116
1117 sc->bge_std = 0;
1118 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1119
1120 return (0);
1121 }
1122
1123 static void
1124 bge_free_rx_ring_std(struct bge_softc *sc)
1125 {
1126 int i;
1127
1128 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1129 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1130 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1131 sc->bge_cdata.bge_rx_std_dmamap[i],
1132 BUS_DMASYNC_POSTREAD);
1133 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1134 sc->bge_cdata.bge_rx_std_dmamap[i]);
1135 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1136 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1137 }
1138 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1139 sizeof(struct bge_rx_bd));
1140 }
1141 }
1142
1143 static int
1144 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1145 {
1146 struct bge_rcb *rcb;
1147 int error, i;
1148
1149 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1150 sc->bge_jumbo = 0;
1151 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1152 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1153 return (error);
1154 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1155 }
1156
1157 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1158 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1159
1160 sc->bge_jumbo = 0;
1161
1162 /* Enable the jumbo receive producer ring. */
1163 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1164 rcb->bge_maxlen_flags =
1165 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1166 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1167
1168 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1169
1170 return (0);
1171 }
1172
1173 static void
1174 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1175 {
1176 int i;
1177
1178 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1179 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1180 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1181 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1182 BUS_DMASYNC_POSTREAD);
1183 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1184 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1185 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1186 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1187 }
1188 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1189 sizeof(struct bge_extrx_bd));
1190 }
1191 }
1192
1193 static void
1194 bge_free_tx_ring(struct bge_softc *sc)
1195 {
1196 int i;
1197
1198 if (sc->bge_ldata.bge_tx_ring == NULL)
1199 return;
1200
1201 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1202 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1203 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1204 sc->bge_cdata.bge_tx_dmamap[i],
1205 BUS_DMASYNC_POSTWRITE);
1206 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1207 sc->bge_cdata.bge_tx_dmamap[i]);
1208 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1209 sc->bge_cdata.bge_tx_chain[i] = NULL;
1210 }
1211 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1212 sizeof(struct bge_tx_bd));
1213 }
1214 }
1215
1216 static int
1217 bge_init_tx_ring(struct bge_softc *sc)
1218 {
1219 sc->bge_txcnt = 0;
1220 sc->bge_tx_saved_considx = 0;
1221
1222 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1223 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1224 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1225
1226 /* Initialize transmit producer index for host-memory send ring. */
1227 sc->bge_tx_prodidx = 0;
1228 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1229
1230 /* 5700 b2 errata */
1231 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1232 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1233
1234 /* NIC-memory send ring not used; initialize to zero. */
1235 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1236 /* 5700 b2 errata */
1237 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1238 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1239
1240 return (0);
1241 }
1242
1243 static void
1244 bge_setpromisc(struct bge_softc *sc)
1245 {
1246 struct ifnet *ifp;
1247
1248 BGE_LOCK_ASSERT(sc);
1249
1250 ifp = sc->bge_ifp;
1251
1252 /* Enable or disable promiscuous mode as needed. */
1253 if (ifp->if_flags & IFF_PROMISC)
1254 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1255 else
1256 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1257 }
1258
1259 static void
1260 bge_setmulti(struct bge_softc *sc)
1261 {
1262 struct ifnet *ifp;
1263 struct ifmultiaddr *ifma;
1264 uint32_t hashes[4] = { 0, 0, 0, 0 };
1265 int h, i;
1266
1267 BGE_LOCK_ASSERT(sc);
1268
1269 ifp = sc->bge_ifp;
1270
1271 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1272 for (i = 0; i < 4; i++)
1273 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1274 return;
1275 }
1276
1277 /* First, zot all the existing filters. */
1278 for (i = 0; i < 4; i++)
1279 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1280
1281 /* Now program new ones. */
1282 IF_ADDR_LOCK(ifp);
1283 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1284 if (ifma->ifma_addr->sa_family != AF_LINK)
1285 continue;
1286 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1287 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1288 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1289 }
1290 IF_ADDR_UNLOCK(ifp);
1291
1292 for (i = 0; i < 4; i++)
1293 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1294 }
1295
1296 static void
1297 bge_setvlan(struct bge_softc *sc)
1298 {
1299 struct ifnet *ifp;
1300
1301 BGE_LOCK_ASSERT(sc);
1302
1303 ifp = sc->bge_ifp;
1304
1305 /* Enable or disable VLAN tag stripping as needed. */
1306 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1307 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1308 else
1309 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1310 }
1311
1312 static void
1313 bge_sig_pre_reset(struct bge_softc *sc, int type)
1314 {
1315
1316 /*
1317 * Some chips don't like this so only do this if ASF is enabled
1318 */
1319 if (sc->bge_asf_mode)
1320 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1321
1322 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1323 switch (type) {
1324 case BGE_RESET_START:
1325 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1326 BGE_FW_DRV_STATE_START);
1327 break;
1328 case BGE_RESET_STOP:
1329 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1330 BGE_FW_DRV_STATE_UNLOAD);
1331 break;
1332 }
1333 }
1334 }
1335
1336 static void
1337 bge_sig_post_reset(struct bge_softc *sc, int type)
1338 {
1339
1340 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1341 switch (type) {
1342 case BGE_RESET_START:
1343 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1344 BGE_FW_DRV_STATE_START_DONE);
1345 /* START DONE */
1346 break;
1347 case BGE_RESET_STOP:
1348 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1349 BGE_FW_DRV_STATE_UNLOAD_DONE);
1350 break;
1351 }
1352 }
1353 }
1354
1355 static void
1356 bge_sig_legacy(struct bge_softc *sc, int type)
1357 {
1358
1359 if (sc->bge_asf_mode) {
1360 switch (type) {
1361 case BGE_RESET_START:
1362 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1363 BGE_FW_DRV_STATE_START);
1364 break;
1365 case BGE_RESET_STOP:
1366 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1367 BGE_FW_DRV_STATE_UNLOAD);
1368 break;
1369 }
1370 }
1371 }
1372
1373 static void
1374 bge_stop_fw(struct bge_softc *sc)
1375 {
1376 int i;
1377
1378 if (sc->bge_asf_mode) {
1379 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1380 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1381 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1382
1383 for (i = 0; i < 100; i++ ) {
1384 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1385 BGE_RX_CPU_DRV_EVENT))
1386 break;
1387 DELAY(10);
1388 }
1389 }
1390 }
1391
1392 static uint32_t
1393 bge_dma_swap_options(struct bge_softc *sc)
1394 {
1395 uint32_t dma_options;
1396
1397 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
1398 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
1399 #if BYTE_ORDER == BIG_ENDIAN
1400 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
1401 #endif
1402 if ((sc)->bge_asicrev == BGE_ASICREV_BCM5720)
1403 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1404 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
1405 BGE_MODECTL_HTX2B_ENABLE;
1406
1407 return (dma_options);
1408 }
1409
1410 /*
1411 * Do endian, PCI and DMA initialization.
1412 */
1413 static int
1414 bge_chipinit(struct bge_softc *sc)
1415 {
1416 uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
1417 uint16_t val;
1418 int i;
1419
1420 /* Set endianness before we access any non-PCI registers. */
1421 misc_ctl = BGE_INIT;
1422 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1423 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1424 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1425
1426 /* Clear the MAC control register */
1427 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1428
1429 /*
1430 * Clear the MAC statistics block in the NIC's
1431 * internal memory.
1432 */
1433 for (i = BGE_STATS_BLOCK;
1434 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1435 BGE_MEMWIN_WRITE(sc, i, 0);
1436
1437 for (i = BGE_STATUS_BLOCK;
1438 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1439 BGE_MEMWIN_WRITE(sc, i, 0);
1440
1441 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1442 /*
1443 * Fix data corruption caused by non-qword write with WB.
1444 * Fix master abort in PCI mode.
1445 * Fix PCI latency timer.
1446 */
1447 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1448 val |= (1 << 10) | (1 << 12) | (1 << 13);
1449 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1450 }
1451
1452 /*
1453 * Set up the PCI DMA control register.
1454 */
1455 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1456 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1457 if (sc->bge_flags & BGE_FLAG_PCIE) {
1458 /* Read watermark not used, 128 bytes for write. */
1459 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1460 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1461 if (BGE_IS_5714_FAMILY(sc)) {
1462 /* 256 bytes for read and write. */
1463 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1464 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1465 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1466 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1467 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1468 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1469 /*
1470 * In the BCM5703, the DMA read watermark should
1471 * be set to less than or equal to the maximum
1472 * memory read byte count of the PCI-X command
1473 * register.
1474 */
1475 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1476 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1477 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1478 /* 1536 bytes for read, 384 bytes for write. */
1479 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1480 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1481 } else {
1482 /* 384 bytes for read and write. */
1483 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1484 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1485 0x0F;
1486 }
1487 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1488 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1489 uint32_t tmp;
1490
1491 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1492 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1493 if (tmp == 6 || tmp == 7)
1494 dma_rw_ctl |=
1495 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1496
1497 /* Set PCI-X DMA write workaround. */
1498 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1499 }
1500 } else {
1501 /* Conventional PCI bus: 256 bytes for read and write. */
1502 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1503 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1504
1505 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1506 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1507 dma_rw_ctl |= 0x0F;
1508 }
1509 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1510 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1511 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1512 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1513 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1514 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1515 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1516 if (BGE_IS_5717_PLUS(sc)) {
1517 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1518 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1519 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1520 /*
1521 * Enable HW workaround for controllers that misinterpret
1522 * a status tag update and leave interrupts permanently
1523 * disabled.
1524 */
1525 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1526 sc->bge_asicrev != BGE_ASICREV_BCM57765)
1527 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1528 }
1529 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1530
1531 /*
1532 * Set up general mode register.
1533 */
1534 mode_ctl = bge_dma_swap_options(sc) | BGE_MODECTL_MAC_ATTN_INTR |
1535 BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM;
1536
1537 /*
1538 * BCM5701 B5 have a bug causing data corruption when using
1539 * 64-bit DMA reads, which can be terminated early and then
1540 * completed later as 32-bit accesses, in combination with
1541 * certain bridges.
1542 */
1543 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1544 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1545 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1546
1547 /*
1548 * Tell the firmware the driver is running
1549 */
1550 if (sc->bge_asf_mode & ASF_STACKUP)
1551 mode_ctl |= BGE_MODECTL_STACKUP;
1552
1553 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1554
1555 /*
1556 * Disable memory write invalidate. Apparently it is not supported
1557 * properly by these devices. Also ensure that INTx isn't disabled,
1558 * as these chips need it even when using MSI.
1559 */
1560 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD,
1561 PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4);
1562
1563 /* Set the timer prescaler (always 66Mhz) */
1564 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1565
1566 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1567 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1568 DELAY(40); /* XXX */
1569
1570 /* Put PHY into ready state */
1571 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1572 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1573 DELAY(40);
1574 }
1575
1576 return (0);
1577 }
1578
1579 static int
1580 bge_blockinit(struct bge_softc *sc)
1581 {
1582 struct bge_rcb *rcb;
1583 bus_size_t vrcb;
1584 bge_hostaddr taddr;
1585 uint32_t dmactl, val;
1586 int i, limit;
1587
1588 /*
1589 * Initialize the memory window pointer register so that
1590 * we can access the first 32K of internal NIC RAM. This will
1591 * allow us to set up the TX send ring RCBs and the RX return
1592 * ring RCBs, plus other things which live in NIC memory.
1593 */
1594 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1595
1596 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1597
1598 if (!(BGE_IS_5705_PLUS(sc))) {
1599 /* Configure mbuf memory pool */
1600 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1601 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1602 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1603 else
1604 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1605
1606 /* Configure DMA resource pool */
1607 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1608 BGE_DMA_DESCRIPTORS);
1609 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1610 }
1611
1612 /* Configure mbuf pool watermarks */
1613 if (BGE_IS_5717_PLUS(sc)) {
1614 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1615 if (sc->bge_ifp->if_mtu > ETHERMTU) {
1616 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1617 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1618 } else {
1619 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1620 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1621 }
1622 } else if (!BGE_IS_5705_PLUS(sc)) {
1623 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1624 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1625 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1626 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1627 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1628 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1629 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1630 } else {
1631 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1632 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1633 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1634 }
1635
1636 /* Configure DMA resource watermarks */
1637 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1638 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1639
1640 /* Enable buffer manager */
1641 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1642 /*
1643 * Change the arbitration algorithm of TXMBUF read request to
1644 * round-robin instead of priority based for BCM5719. When
1645 * TXFIFO is almost empty, RDMA will hold its request until
1646 * TXFIFO is not almost empty.
1647 */
1648 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1649 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1650 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1651
1652 /* Poll for buffer manager start indication */
1653 for (i = 0; i < BGE_TIMEOUT; i++) {
1654 DELAY(10);
1655 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1656 break;
1657 }
1658
1659 if (i == BGE_TIMEOUT) {
1660 device_printf(sc->bge_dev, "buffer manager failed to start\n");
1661 return (ENXIO);
1662 }
1663
1664 /* Enable flow-through queues */
1665 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1666 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1667
1668 /* Wait until queue initialization is complete */
1669 for (i = 0; i < BGE_TIMEOUT; i++) {
1670 DELAY(10);
1671 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1672 break;
1673 }
1674
1675 if (i == BGE_TIMEOUT) {
1676 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1677 return (ENXIO);
1678 }
1679
1680 /*
1681 * Summary of rings supported by the controller:
1682 *
1683 * Standard Receive Producer Ring
1684 * - This ring is used to feed receive buffers for "standard"
1685 * sized frames (typically 1536 bytes) to the controller.
1686 *
1687 * Jumbo Receive Producer Ring
1688 * - This ring is used to feed receive buffers for jumbo sized
1689 * frames (i.e. anything bigger than the "standard" frames)
1690 * to the controller.
1691 *
1692 * Mini Receive Producer Ring
1693 * - This ring is used to feed receive buffers for "mini"
1694 * sized frames to the controller.
1695 * - This feature required external memory for the controller
1696 * but was never used in a production system. Should always
1697 * be disabled.
1698 *
1699 * Receive Return Ring
1700 * - After the controller has placed an incoming frame into a
1701 * receive buffer that buffer is moved into a receive return
1702 * ring. The driver is then responsible to passing the
1703 * buffer up to the stack. Many versions of the controller
1704 * support multiple RR rings.
1705 *
1706 * Send Ring
1707 * - This ring is used for outgoing frames. Many versions of
1708 * the controller support multiple send rings.
1709 */
1710
1711 /* Initialize the standard receive producer ring control block. */
1712 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1713 rcb->bge_hostaddr.bge_addr_lo =
1714 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1715 rcb->bge_hostaddr.bge_addr_hi =
1716 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1717 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1718 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1719 if (BGE_IS_5717_PLUS(sc)) {
1720 /*
1721 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1722 * Bits 15-2 : Maximum RX frame size
1723 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
1724 * Bit 0 : Reserved
1725 */
1726 rcb->bge_maxlen_flags =
1727 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
1728 } else if (BGE_IS_5705_PLUS(sc)) {
1729 /*
1730 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1731 * Bits 15-2 : Reserved (should be 0)
1732 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1733 * Bit 0 : Reserved
1734 */
1735 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1736 } else {
1737 /*
1738 * Ring size is always XXX entries
1739 * Bits 31-16: Maximum RX frame size
1740 * Bits 15-2 : Reserved (should be 0)
1741 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
1742 * Bit 0 : Reserved
1743 */
1744 rcb->bge_maxlen_flags =
1745 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1746 }
1747 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1748 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1749 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1750 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1751 else
1752 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1753 /* Write the standard receive producer ring control block. */
1754 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1755 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1756 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1757 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1758
1759 /* Reset the standard receive producer ring producer index. */
1760 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1761
1762 /*
1763 * Initialize the jumbo RX producer ring control
1764 * block. We set the 'ring disabled' bit in the
1765 * flags field until we're actually ready to start
1766 * using this ring (i.e. once we set the MTU
1767 * high enough to require it).
1768 */
1769 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1770 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1771 /* Get the jumbo receive producer ring RCB parameters. */
1772 rcb->bge_hostaddr.bge_addr_lo =
1773 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1774 rcb->bge_hostaddr.bge_addr_hi =
1775 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1776 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1777 sc->bge_cdata.bge_rx_jumbo_ring_map,
1778 BUS_DMASYNC_PREREAD);
1779 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1780 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1781 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1782 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1783 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1784 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
1785 else
1786 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1787 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1788 rcb->bge_hostaddr.bge_addr_hi);
1789 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1790 rcb->bge_hostaddr.bge_addr_lo);
1791 /* Program the jumbo receive producer ring RCB parameters. */
1792 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1793 rcb->bge_maxlen_flags);
1794 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1795 /* Reset the jumbo receive producer ring producer index. */
1796 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1797 }
1798
1799 /* Disable the mini receive producer ring RCB. */
1800 if (BGE_IS_5700_FAMILY(sc)) {
1801 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1802 rcb->bge_maxlen_flags =
1803 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1804 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1805 rcb->bge_maxlen_flags);
1806 /* Reset the mini receive producer ring producer index. */
1807 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1808 }
1809
1810 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1811 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1812 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1813 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1814 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1815 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1816 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1817 }
1818 /*
1819 * The BD ring replenish thresholds control how often the
1820 * hardware fetches new BD's from the producer rings in host
1821 * memory. Setting the value too low on a busy system can
1822 * starve the hardware and recue the throughpout.
1823 *
1824 * Set the BD ring replentish thresholds. The recommended
1825 * values are 1/8th the number of descriptors allocated to
1826 * each ring.
1827 * XXX The 5754 requires a lower threshold, so it might be a
1828 * requirement of all 575x family chips. The Linux driver sets
1829 * the lower threshold for all 5705 family chips as well, but there
1830 * are reports that it might not need to be so strict.
1831 *
1832 * XXX Linux does some extra fiddling here for the 5906 parts as
1833 * well.
1834 */
1835 if (BGE_IS_5705_PLUS(sc))
1836 val = 8;
1837 else
1838 val = BGE_STD_RX_RING_CNT / 8;
1839 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1840 if (BGE_IS_JUMBO_CAPABLE(sc))
1841 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
1842 BGE_JUMBO_RX_RING_CNT/8);
1843 if (BGE_IS_5717_PLUS(sc)) {
1844 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
1845 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
1846 }
1847
1848 /*
1849 * Disable all send rings by setting the 'ring disabled' bit
1850 * in the flags field of all the TX send ring control blocks,
1851 * located in NIC memory.
1852 */
1853 if (!BGE_IS_5705_PLUS(sc))
1854 /* 5700 to 5704 had 16 send rings. */
1855 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
1856 else
1857 limit = 1;
1858 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1859 for (i = 0; i < limit; i++) {
1860 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1861 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1862 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1863 vrcb += sizeof(struct bge_rcb);
1864 }
1865
1866 /* Configure send ring RCB 0 (we use only the first ring) */
1867 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1868 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1869 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1870 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1871 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1872 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1873 sc->bge_asicrev == BGE_ASICREV_BCM5720)
1874 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
1875 else
1876 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1877 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1878 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1879 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1880
1881 /*
1882 * Disable all receive return rings by setting the
1883 * 'ring diabled' bit in the flags field of all the receive
1884 * return ring control blocks, located in NIC memory.
1885 */
1886 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
1887 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
1888 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
1889 /* Should be 17, use 16 until we get an SRAM map. */
1890 limit = 16;
1891 } else if (!BGE_IS_5705_PLUS(sc))
1892 limit = BGE_RX_RINGS_MAX;
1893 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1894 sc->bge_asicrev == BGE_ASICREV_BCM57765)
1895 limit = 4;
1896 else
1897 limit = 1;
1898 /* Disable all receive return rings. */
1899 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1900 for (i = 0; i < limit; i++) {
1901 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1902 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1903 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1904 BGE_RCB_FLAG_RING_DISABLED);
1905 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1906 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1907 (i * (sizeof(uint64_t))), 0);
1908 vrcb += sizeof(struct bge_rcb);
1909 }
1910
1911 /*
1912 * Set up receive return ring 0. Note that the NIC address
1913 * for RX return rings is 0x0. The return rings live entirely
1914 * within the host, so the nicaddr field in the RCB isn't used.
1915 */
1916 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1917 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1918 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1919 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1920 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1921 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1922 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1923
1924 /* Set random backoff seed for TX */
1925 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1926 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1927 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1928 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1929 BGE_TX_BACKOFF_SEED_MASK);
1930
1931 /* Set inter-packet gap */
1932 val = 0x2620;
1933 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
1934 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
1935 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
1936 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
1937
1938 /*
1939 * Specify which ring to use for packets that don't match
1940 * any RX rules.
1941 */
1942 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1943
1944 /*
1945 * Configure number of RX lists. One interrupt distribution
1946 * list, sixteen active lists, one bad frames class.
1947 */
1948 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1949
1950 /* Inialize RX list placement stats mask. */
1951 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1952 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1953
1954 /* Disable host coalescing until we get it set up */
1955 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1956
1957 /* Poll to make sure it's shut down. */
1958 for (i = 0; i < BGE_TIMEOUT; i++) {
1959 DELAY(10);
1960 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1961 break;
1962 }
1963
1964 if (i == BGE_TIMEOUT) {
1965 device_printf(sc->bge_dev,
1966 "host coalescing engine failed to idle\n");
1967 return (ENXIO);
1968 }
1969
1970 /* Set up host coalescing defaults */
1971 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1972 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1973 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1974 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1975 if (!(BGE_IS_5705_PLUS(sc))) {
1976 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1977 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1978 }
1979 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1980 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1981
1982 /* Set up address of statistics block */
1983 if (!(BGE_IS_5705_PLUS(sc))) {
1984 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1985 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1986 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1987 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1988 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1989 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1990 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1991 }
1992
1993 /* Set up address of status block */
1994 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1995 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1996 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1997 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1998
1999 /* Set up status block size. */
2000 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2001 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2002 val = BGE_STATBLKSZ_FULL;
2003 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2004 } else {
2005 val = BGE_STATBLKSZ_32BYTE;
2006 bzero(sc->bge_ldata.bge_status_block, 32);
2007 }
2008 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2009 sc->bge_cdata.bge_status_map,
2010 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2011
2012 /* Turn on host coalescing state machine */
2013 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2014
2015 /* Turn on RX BD completion state machine and enable attentions */
2016 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2017 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2018
2019 /* Turn on RX list placement state machine */
2020 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2021
2022 /* Turn on RX list selector state machine. */
2023 if (!(BGE_IS_5705_PLUS(sc)))
2024 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2025
2026 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2027 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2028 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2029 BGE_MACMODE_FRMHDR_DMA_ENB;
2030
2031 if (sc->bge_flags & BGE_FLAG_TBI)
2032 val |= BGE_PORTMODE_TBI;
2033 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2034 val |= BGE_PORTMODE_GMII;
2035 else
2036 val |= BGE_PORTMODE_MII;
2037
2038 /* Turn on DMA, clear stats */
2039 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2040
2041 /* Set misc. local control, enable interrupts on attentions */
2042 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2043
2044 #ifdef notdef
2045 /* Assert GPIO pins for PHY reset */
2046 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2047 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2048 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2049 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2050 #endif
2051
2052 /* Turn on DMA completion state machine */
2053 if (!(BGE_IS_5705_PLUS(sc)))
2054 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2055
2056 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2057
2058 /* Enable host coalescing bug fix. */
2059 if (BGE_IS_5755_PLUS(sc))
2060 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2061
2062 /* Request larger DMA burst size to get better performance. */
2063 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2064 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2065
2066 /* Turn on write DMA state machine */
2067 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2068 DELAY(40);
2069
2070 /* Turn on read DMA state machine */
2071 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2072
2073 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2074 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2075
2076 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2077 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2078 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2079 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2080 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2081 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2082 if (sc->bge_flags & BGE_FLAG_PCIE)
2083 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2084 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2085 val |= BGE_RDMAMODE_TSO4_ENABLE;
2086 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2087 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2088 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2089 val |= BGE_RDMAMODE_TSO6_ENABLE;
2090 }
2091
2092 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2093 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2094 BGE_RDMAMODE_H2BNC_VLAN_DET;
2095 /*
2096 * Allow multiple outstanding read requests from
2097 * non-LSO read DMA engine.
2098 */
2099 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2100 }
2101
2102 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2103 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2104 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2105 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2106 BGE_IS_5717_PLUS(sc)) {
2107 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
2108 /*
2109 * Adjust tx margin to prevent TX data corruption and
2110 * fix internal FIFO overflow.
2111 */
2112 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2113 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2114 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2115 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2116 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2117 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2118 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2119 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2120 }
2121 /*
2122 * Enable fix for read DMA FIFO overruns.
2123 * The fix is to limit the number of RX BDs
2124 * the hardware would fetch at a fime.
2125 */
2126 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
2127 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2128 }
2129
2130 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2131 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2132 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2133 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2134 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2135 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2136 /*
2137 * Allow 4KB burst length reads for non-LSO frames.
2138 * Enable 512B burst length reads for buffer descriptors.
2139 */
2140 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2141 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2142 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2143 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2144 }
2145
2146 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2147 DELAY(40);
2148
2149 /* Turn on RX data completion state machine */
2150 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2151
2152 /* Turn on RX BD initiator state machine */
2153 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2154
2155 /* Turn on RX data and RX BD initiator state machine */
2156 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2157
2158 /* Turn on Mbuf cluster free state machine */
2159 if (!(BGE_IS_5705_PLUS(sc)))
2160 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2161
2162 /* Turn on send BD completion state machine */
2163 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2164
2165 /* Turn on send data completion state machine */
2166 val = BGE_SDCMODE_ENABLE;
2167 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2168 val |= BGE_SDCMODE_CDELAY;
2169 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2170
2171 /* Turn on send data initiator state machine */
2172 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2173 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2174 BGE_SDIMODE_HW_LSO_PRE_DMA);
2175 else
2176 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2177
2178 /* Turn on send BD initiator state machine */
2179 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2180
2181 /* Turn on send BD selector state machine */
2182 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2183
2184 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2185 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2186 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2187
2188 /* ack/clear link change events */
2189 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2190 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2191 BGE_MACSTAT_LINK_CHANGED);
2192 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2193
2194 /*
2195 * Enable attention when the link has changed state for
2196 * devices that use auto polling.
2197 */
2198 if (sc->bge_flags & BGE_FLAG_TBI) {
2199 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2200 } else {
2201 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2202 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2203 DELAY(80);
2204 }
2205 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2206 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2207 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2208 BGE_EVTENB_MI_INTERRUPT);
2209 }
2210
2211 /*
2212 * Clear any pending link state attention.
2213 * Otherwise some link state change events may be lost until attention
2214 * is cleared by bge_intr() -> bge_link_upd() sequence.
2215 * It's not necessary on newer BCM chips - perhaps enabling link
2216 * state change attentions implies clearing pending attention.
2217 */
2218 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2219 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2220 BGE_MACSTAT_LINK_CHANGED);
2221
2222 /* Enable link state change attentions. */
2223 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2224
2225 return (0);
2226 }
2227
2228 const struct bge_revision *
2229 bge_lookup_rev(uint32_t chipid)
2230 {
2231 const struct bge_revision *br;
2232
2233 for (br = bge_revisions; br->br_name != NULL; br++) {
2234 if (br->br_chipid == chipid)
2235 return (br);
2236 }
2237
2238 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2239 if (br->br_chipid == BGE_ASICREV(chipid))
2240 return (br);
2241 }
2242
2243 return (NULL);
2244 }
2245
2246 const struct bge_vendor *
2247 bge_lookup_vendor(uint16_t vid)
2248 {
2249 const struct bge_vendor *v;
2250
2251 for (v = bge_vendors; v->v_name != NULL; v++)
2252 if (v->v_id == vid)
2253 return (v);
2254
2255 panic("%s: unknown vendor %d", __func__, vid);
2256 return (NULL);
2257 }
2258
2259 /*
2260 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2261 * against our list and return its name if we find a match.
2262 *
2263 * Note that since the Broadcom controller contains VPD support, we
2264 * try to get the device name string from the controller itself instead
2265 * of the compiled-in string. It guarantees we'll always announce the
2266 * right product name. We fall back to the compiled-in string when
2267 * VPD is unavailable or corrupt.
2268 */
2269 static int
2270 bge_probe(device_t dev)
2271 {
2272 char buf[96];
2273 char model[64];
2274 const struct bge_revision *br;
2275 const char *pname;
2276 struct bge_softc *sc = device_get_softc(dev);
2277 const struct bge_type *t = bge_devs;
2278 const struct bge_vendor *v;
2279 uint32_t id;
2280 uint16_t did, vid;
2281
2282 sc->bge_dev = dev;
2283 vid = pci_get_vendor(dev);
2284 did = pci_get_device(dev);
2285 while(t->bge_vid != 0) {
2286 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2287 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2288 BGE_PCIMISCCTL_ASICREV_SHIFT;
2289 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2290 /*
2291 * Find the ASCI revision. Different chips
2292 * use different registers.
2293 */
2294 switch (pci_get_device(dev)) {
2295 case BCOM_DEVICEID_BCM5717:
2296 case BCOM_DEVICEID_BCM5718:
2297 case BCOM_DEVICEID_BCM5719:
2298 case BCOM_DEVICEID_BCM5720:
2299 id = pci_read_config(dev,
2300 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2301 break;
2302 case BCOM_DEVICEID_BCM57761:
2303 case BCOM_DEVICEID_BCM57765:
2304 case BCOM_DEVICEID_BCM57781:
2305 case BCOM_DEVICEID_BCM57785:
2306 case BCOM_DEVICEID_BCM57791:
2307 case BCOM_DEVICEID_BCM57795:
2308 id = pci_read_config(dev,
2309 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2310 break;
2311 default:
2312 id = pci_read_config(dev,
2313 BGE_PCI_PRODID_ASICREV, 4);
2314 }
2315 }
2316 br = bge_lookup_rev(id);
2317 v = bge_lookup_vendor(vid);
2318 if (bge_has_eaddr(sc) &&
2319 pci_get_vpd_ident(dev, &pname) == 0)
2320 snprintf(model, 64, "%s", pname);
2321 else
2322 snprintf(model, 64, "%s %s", v->v_name,
2323 br != NULL ? br->br_name :
2324 "NetXtreme Ethernet Controller");
2325 snprintf(buf, 96, "%s, %sASIC rev. %#08x", model,
2326 br != NULL ? "" : "unknown ", id);
2327 device_set_desc_copy(dev, buf);
2328 return (0);
2329 }
2330 t++;
2331 }
2332
2333 return (ENXIO);
2334 }
2335
2336 static void
2337 bge_dma_free(struct bge_softc *sc)
2338 {
2339 int i;
2340
2341 /* Destroy DMA maps for RX buffers. */
2342 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2343 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2344 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2345 sc->bge_cdata.bge_rx_std_dmamap[i]);
2346 }
2347 if (sc->bge_cdata.bge_rx_std_sparemap)
2348 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2349 sc->bge_cdata.bge_rx_std_sparemap);
2350
2351 /* Destroy DMA maps for jumbo RX buffers. */
2352 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2353 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2354 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2355 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2356 }
2357 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2358 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2359 sc->bge_cdata.bge_rx_jumbo_sparemap);
2360
2361 /* Destroy DMA maps for TX buffers. */
2362 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2363 if (sc->bge_cdata.bge_tx_dmamap[i])
2364 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2365 sc->bge_cdata.bge_tx_dmamap[i]);
2366 }
2367
2368 if (sc->bge_cdata.bge_rx_mtag)
2369 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2370 if (sc->bge_cdata.bge_mtag_jumbo)
2371 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo);
2372 if (sc->bge_cdata.bge_tx_mtag)
2373 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2374
2375 /* Destroy standard RX ring. */
2376 if (sc->bge_cdata.bge_rx_std_ring_map)
2377 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2378 sc->bge_cdata.bge_rx_std_ring_map);
2379 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
2380 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2381 sc->bge_ldata.bge_rx_std_ring,
2382 sc->bge_cdata.bge_rx_std_ring_map);
2383
2384 if (sc->bge_cdata.bge_rx_std_ring_tag)
2385 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2386
2387 /* Destroy jumbo RX ring. */
2388 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
2389 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2390 sc->bge_cdata.bge_rx_jumbo_ring_map);
2391
2392 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
2393 sc->bge_ldata.bge_rx_jumbo_ring)
2394 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2395 sc->bge_ldata.bge_rx_jumbo_ring,
2396 sc->bge_cdata.bge_rx_jumbo_ring_map);
2397
2398 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2399 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2400
2401 /* Destroy RX return ring. */
2402 if (sc->bge_cdata.bge_rx_return_ring_map)
2403 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2404 sc->bge_cdata.bge_rx_return_ring_map);
2405
2406 if (sc->bge_cdata.bge_rx_return_ring_map &&
2407 sc->bge_ldata.bge_rx_return_ring)
2408 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2409 sc->bge_ldata.bge_rx_return_ring,
2410 sc->bge_cdata.bge_rx_return_ring_map);
2411
2412 if (sc->bge_cdata.bge_rx_return_ring_tag)
2413 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2414
2415 /* Destroy TX ring. */
2416 if (sc->bge_cdata.bge_tx_ring_map)
2417 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2418 sc->bge_cdata.bge_tx_ring_map);
2419
2420 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
2421 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2422 sc->bge_ldata.bge_tx_ring,
2423 sc->bge_cdata.bge_tx_ring_map);
2424
2425 if (sc->bge_cdata.bge_tx_ring_tag)
2426 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2427
2428 /* Destroy status block. */
2429 if (sc->bge_cdata.bge_status_map)
2430 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2431 sc->bge_cdata.bge_status_map);
2432
2433 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
2434 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2435 sc->bge_ldata.bge_status_block,
2436 sc->bge_cdata.bge_status_map);
2437
2438 if (sc->bge_cdata.bge_status_tag)
2439 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2440
2441 /* Destroy statistics block. */
2442 if (sc->bge_cdata.bge_stats_map)
2443 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2444 sc->bge_cdata.bge_stats_map);
2445
2446 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
2447 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2448 sc->bge_ldata.bge_stats,
2449 sc->bge_cdata.bge_stats_map);
2450
2451 if (sc->bge_cdata.bge_stats_tag)
2452 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2453
2454 if (sc->bge_cdata.bge_buffer_tag)
2455 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2456
2457 /* Destroy the parent tag. */
2458 if (sc->bge_cdata.bge_parent_tag)
2459 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2460 }
2461
2462 static int
2463 bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2464 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2465 bus_addr_t *paddr, const char *msg)
2466 {
2467 struct bge_dmamap_arg ctx;
2468 bus_addr_t lowaddr;
2469 bus_size_t ring_end;
2470 int error;
2471
2472 lowaddr = BUS_SPACE_MAXADDR;
2473 again:
2474 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2475 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2476 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2477 if (error != 0) {
2478 device_printf(sc->bge_dev,
2479 "could not create %s dma tag\n", msg);
2480 return (ENOMEM);
2481 }
2482 /* Allocate DMA'able memory for ring. */
2483 error = bus_dmamem_alloc(*tag, (void **)ring,
2484 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2485 if (error != 0) {
2486 device_printf(sc->bge_dev,
2487 "could not allocate DMA'able memory for %s\n", msg);
2488 return (ENOMEM);
2489 }
2490 /* Load the address of the ring. */
2491 ctx.bge_busaddr = 0;
2492 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2493 &ctx, BUS_DMA_NOWAIT);
2494 if (error != 0) {
2495 device_printf(sc->bge_dev,
2496 "could not load DMA'able memory for %s\n", msg);
2497 return (ENOMEM);
2498 }
2499 *paddr = ctx.bge_busaddr;
2500 ring_end = *paddr + maxsize;
2501 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2502 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2503 /*
2504 * 4GB boundary crossed. Limit maximum allowable DMA
2505 * address space to 32bit and try again.
2506 */
2507 bus_dmamap_unload(*tag, *map);
2508 bus_dmamem_free(*tag, *ring, *map);
2509 bus_dma_tag_destroy(*tag);
2510 if (bootverbose)
2511 device_printf(sc->bge_dev, "4GB boundary crossed, "
2512 "limit DMA address space to 32bit for %s\n", msg);
2513 *ring = NULL;
2514 *tag = NULL;
2515 *map = NULL;
2516 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2517 goto again;
2518 }
2519 return (0);
2520 }
2521
2522 static int
2523 bge_dma_alloc(struct bge_softc *sc)
2524 {
2525 bus_addr_t lowaddr;
2526 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2527 int i, error;
2528
2529 lowaddr = BUS_SPACE_MAXADDR;
2530 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2531 lowaddr = BGE_DMA_MAXADDR;
2532 /*
2533 * Allocate the parent bus DMA tag appropriate for PCI.
2534 */
2535 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2536 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2537 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2538 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2539 if (error != 0) {
2540 device_printf(sc->bge_dev,
2541 "could not allocate parent dma tag\n");
2542 return (ENOMEM);
2543 }
2544
2545 /* Create tag for standard RX ring. */
2546 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2547 &sc->bge_cdata.bge_rx_std_ring_tag,
2548 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2549 &sc->bge_cdata.bge_rx_std_ring_map,
2550 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2551 if (error)
2552 return (error);
2553
2554 /* Create tag for RX return ring. */
2555 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2556 &sc->bge_cdata.bge_rx_return_ring_tag,
2557 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2558 &sc->bge_cdata.bge_rx_return_ring_map,
2559 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2560 if (error)
2561 return (error);
2562
2563 /* Create tag for TX ring. */
2564 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2565 &sc->bge_cdata.bge_tx_ring_tag,
2566 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2567 &sc->bge_cdata.bge_tx_ring_map,
2568 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2569 if (error)
2570 return (error);
2571
2572 /*
2573 * Create tag for status block.
2574 * Because we only use single Tx/Rx/Rx return ring, use
2575 * minimum status block size except BCM5700 AX/BX which
2576 * seems to want to see full status block size regardless
2577 * of configured number of ring.
2578 */
2579 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2580 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2581 sbsz = BGE_STATUS_BLK_SZ;
2582 else
2583 sbsz = 32;
2584 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2585 &sc->bge_cdata.bge_status_tag,
2586 (uint8_t **)&sc->bge_ldata.bge_status_block,
2587 &sc->bge_cdata.bge_status_map,
2588 &sc->bge_ldata.bge_status_block_paddr, "status block");
2589 if (error)
2590 return (error);
2591
2592 /* Create tag for statistics block. */
2593 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2594 &sc->bge_cdata.bge_stats_tag,
2595 (uint8_t **)&sc->bge_ldata.bge_stats,
2596 &sc->bge_cdata.bge_stats_map,
2597 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2598 if (error)
2599 return (error);
2600
2601 /* Create tag for jumbo RX ring. */
2602 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2603 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
2604 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
2605 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
2606 &sc->bge_cdata.bge_rx_jumbo_ring_map,
2607 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
2608 if (error)
2609 return (error);
2610 }
2611
2612 /* Create parent tag for buffers. */
2613 boundary = 0;
2614 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
2615 boundary = BGE_DMA_BNDRY;
2616 /*
2617 * XXX
2618 * watchdog timeout issue was observed on BCM5704 which
2619 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
2620 * Both limiting DMA address space to 32bits and flushing
2621 * mailbox write seem to address the issue.
2622 */
2623 if (sc->bge_pcixcap != 0)
2624 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2625 }
2626 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2627 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
2628 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2629 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
2630 if (error != 0) {
2631 device_printf(sc->bge_dev,
2632 "could not allocate buffer dma tag\n");
2633 return (ENOMEM);
2634 }
2635 /* Create tag for Tx mbufs. */
2636 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2637 txsegsz = BGE_TSOSEG_SZ;
2638 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
2639 } else {
2640 txsegsz = MCLBYTES;
2641 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
2642 }
2643 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
2644 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2645 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
2646 &sc->bge_cdata.bge_tx_mtag);
2647
2648 if (error) {
2649 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
2650 return (ENOMEM);
2651 }
2652
2653 /* Create tag for Rx mbufs. */
2654 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
2655 rxmaxsegsz = MJUM9BYTES;
2656 else
2657 rxmaxsegsz = MCLBYTES;
2658 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
2659 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
2660 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
2661
2662 if (error) {
2663 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
2664 return (ENOMEM);
2665 }
2666
2667 /* Create DMA maps for RX buffers. */
2668 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2669 &sc->bge_cdata.bge_rx_std_sparemap);
2670 if (error) {
2671 device_printf(sc->bge_dev,
2672 "can't create spare DMA map for RX\n");
2673 return (ENOMEM);
2674 }
2675 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2676 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
2677 &sc->bge_cdata.bge_rx_std_dmamap[i]);
2678 if (error) {
2679 device_printf(sc->bge_dev,
2680 "can't create DMA map for RX\n");
2681 return (ENOMEM);
2682 }
2683 }
2684
2685 /* Create DMA maps for TX buffers. */
2686 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2687 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
2688 &sc->bge_cdata.bge_tx_dmamap[i]);
2689 if (error) {
2690 device_printf(sc->bge_dev,
2691 "can't create DMA map for TX\n");
2692 return (ENOMEM);
2693 }
2694 }
2695
2696 /* Create tags for jumbo RX buffers. */
2697 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2698 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
2699 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2700 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
2701 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
2702 if (error) {
2703 device_printf(sc->bge_dev,
2704 "could not allocate jumbo dma tag\n");
2705 return (ENOMEM);
2706 }
2707 /* Create DMA maps for jumbo RX buffers. */
2708 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2709 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
2710 if (error) {
2711 device_printf(sc->bge_dev,
2712 "can't create spare DMA map for jumbo RX\n");
2713 return (ENOMEM);
2714 }
2715 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2716 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2717 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2718 if (error) {
2719 device_printf(sc->bge_dev,
2720 "can't create DMA map for jumbo RX\n");
2721 return (ENOMEM);
2722 }
2723 }
2724 }
2725
2726 return (0);
2727 }
2728
2729 /*
2730 * Return true if this device has more than one port.
2731 */
2732 static int
2733 bge_has_multiple_ports(struct bge_softc *sc)
2734 {
2735 device_t dev = sc->bge_dev;
2736 u_int b, d, f, fscan, s;
2737
2738 d = pci_get_domain(dev);
2739 b = pci_get_bus(dev);
2740 s = pci_get_slot(dev);
2741 f = pci_get_function(dev);
2742 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2743 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
2744 return (1);
2745 return (0);
2746 }
2747
2748 /*
2749 * Return true if MSI can be used with this device.
2750 */
2751 static int
2752 bge_can_use_msi(struct bge_softc *sc)
2753 {
2754 int can_use_msi = 0;
2755
2756 if (sc->bge_msi == 0)
2757 return (0);
2758
2759 /* Disable MSI for polling(4). */
2760 #ifdef DEVICE_POLLING
2761 return (0);
2762 #endif
2763 switch (sc->bge_asicrev) {
2764 case BGE_ASICREV_BCM5714_A0:
2765 case BGE_ASICREV_BCM5714:
2766 /*
2767 * Apparently, MSI doesn't work when these chips are
2768 * configured in single-port mode.
2769 */
2770 if (bge_has_multiple_ports(sc))
2771 can_use_msi = 1;
2772 break;
2773 case BGE_ASICREV_BCM5750:
2774 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2775 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2776 can_use_msi = 1;
2777 break;
2778 default:
2779 if (BGE_IS_575X_PLUS(sc))
2780 can_use_msi = 1;
2781 }
2782 return (can_use_msi);
2783 }
2784
2785 static int
2786 bge_mbox_reorder(struct bge_softc *sc)
2787 {
2788 /* Lists of PCI bridges that are known to reorder mailbox writes. */
2789 static const struct mbox_reorder {
2790 const uint16_t vendor;
2791 const uint16_t device;
2792 const char *desc;
2793 } const mbox_reorder_lists[] = {
2794 { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" },
2795 };
2796 devclass_t pci, pcib;
2797 device_t bus, dev;
2798 int count, i;
2799
2800 count = sizeof(mbox_reorder_lists) / sizeof(mbox_reorder_lists[0]);
2801 pci = devclass_find("pci");
2802 pcib = devclass_find("pcib");
2803 dev = sc->bge_dev;
2804 bus = device_get_parent(dev);
2805 for (;;) {
2806 dev = device_get_parent(bus);
2807 bus = device_get_parent(dev);
2808 device_printf(sc->bge_dev, "dev : %s%d, bus : %s%d\n",
2809 device_get_name(dev), device_get_unit(dev),
2810 device_get_name(bus), device_get_unit(bus));
2811 if (device_get_devclass(dev) != pcib)
2812 break;
2813 for (i = 0; i < count; i++) {
2814 device_printf(sc->bge_dev,
2815 "probing dev : %s%d, vendor : 0x%04x "
2816 "device : 0x%04x\n",
2817 device_get_name(dev), device_get_unit(dev),
2818 pci_get_vendor(dev), pci_get_device(dev));
2819 if (pci_get_vendor(dev) ==
2820 mbox_reorder_lists[i].vendor &&
2821 pci_get_device(dev) ==
2822 mbox_reorder_lists[i].device) {
2823 device_printf(sc->bge_dev,
2824 "enabling MBOX workaround for %s\n",
2825 mbox_reorder_lists[i].desc);
2826 return (1);
2827 }
2828 }
2829 if (device_get_devclass(bus) != pci)
2830 break;
2831 }
2832 return (0);
2833 }
2834
2835 static void
2836 bge_devinfo(struct bge_softc *sc)
2837 {
2838 uint32_t cfg, clk;
2839
2840 device_printf(sc->bge_dev,
2841 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ",
2842 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev);
2843 if (sc->bge_flags & BGE_FLAG_PCIE)
2844 printf("PCI-E\n");
2845 else if (sc->bge_flags & BGE_FLAG_PCIX) {
2846 printf("PCI-X ");
2847 cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
2848 if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE)
2849 clk = 133;
2850 else {
2851 clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
2852 switch (clk) {
2853 case 0:
2854 clk = 33;
2855 break;
2856 case 2:
2857 clk = 50;
2858 break;
2859 case 4:
2860 clk = 66;
2861 break;
2862 case 6:
2863 clk = 100;
2864 break;
2865 case 7:
2866 clk = 133;
2867 break;
2868 }
2869 }
2870 printf("%u MHz\n", clk);
2871 } else {
2872 if (sc->bge_pcixcap != 0)
2873 printf("PCI on PCI-X ");
2874 else
2875 printf("PCI ");
2876 cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4);
2877 if (cfg & BGE_PCISTATE_PCI_BUSSPEED)
2878 clk = 66;
2879 else
2880 clk = 33;
2881 if (cfg & BGE_PCISTATE_32BIT_BUS)
2882 printf("%u MHz; 32bit\n", clk);
2883 else
2884 printf("%u MHz; 64bit\n", clk);
2885 }
2886 }
2887
2888 static int
2889 bge_attach(device_t dev)
2890 {
2891 struct ifnet *ifp;
2892 struct bge_softc *sc;
2893 uint32_t hwcfg = 0, misccfg;
2894 u_char eaddr[ETHER_ADDR_LEN];
2895 int capmask, error, f, msicount, phy_addr, reg, rid, trys;
2896
2897 sc = device_get_softc(dev);
2898 sc->bge_dev = dev;
2899
2900 TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
2901
2902 /*
2903 * Map control/status registers.
2904 */
2905 pci_enable_busmaster(dev);
2906
2907 rid = PCIR_BAR(0);
2908 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2909 RF_ACTIVE | PCI_RF_DENSE);
2910
2911 if (sc->bge_res == NULL) {
2912 device_printf (sc->bge_dev, "couldn't map memory\n");
2913 error = ENXIO;
2914 goto fail;
2915 }
2916
2917 /* Save various chip information. */
2918 sc->bge_chipid =
2919 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2920 BGE_PCIMISCCTL_ASICREV_SHIFT;
2921 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2922 /*
2923 * Find the ASCI revision. Different chips use different
2924 * registers.
2925 */
2926 switch (pci_get_device(dev)) {
2927 case BCOM_DEVICEID_BCM5717:
2928 case BCOM_DEVICEID_BCM5718:
2929 case BCOM_DEVICEID_BCM5719:
2930 case BCOM_DEVICEID_BCM5720:
2931 sc->bge_chipid = pci_read_config(dev,
2932 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2933 break;
2934 case BCOM_DEVICEID_BCM57761:
2935 case BCOM_DEVICEID_BCM57765:
2936 case BCOM_DEVICEID_BCM57781:
2937 case BCOM_DEVICEID_BCM57785:
2938 case BCOM_DEVICEID_BCM57791:
2939 case BCOM_DEVICEID_BCM57795:
2940 sc->bge_chipid = pci_read_config(dev,
2941 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2942 break;
2943 default:
2944 sc->bge_chipid = pci_read_config(dev,
2945 BGE_PCI_PRODID_ASICREV, 4);
2946 }
2947 }
2948 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2949 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2950
2951 /* Set default PHY address. */
2952 phy_addr = 1;
2953 /*
2954 * PHY address mapping for various devices.
2955 *
2956 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
2957 * ---------+-------+-------+-------+-------+
2958 * BCM57XX | 1 | X | X | X |
2959 * BCM5704 | 1 | X | 1 | X |
2960 * BCM5717 | 1 | 8 | 2 | 9 |
2961 * BCM5719 | 1 | 8 | 2 | 9 |
2962 * BCM5720 | 1 | 8 | 2 | 9 |
2963 *
2964 * Other addresses may respond but they are not
2965 * IEEE compliant PHYs and should be ignored.
2966 */
2967 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2968 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2969 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2970 f = pci_get_function(dev);
2971 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
2972 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
2973 BGE_SGDIGSTS_IS_SERDES)
2974 phy_addr = f + 8;
2975 else
2976 phy_addr = f + 1;
2977 } else {
2978 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
2979 BGE_CPMU_PHY_STRAP_IS_SERDES)
2980 phy_addr = f + 8;
2981 else
2982 phy_addr = f + 1;
2983 }
2984 }
2985
2986 /*
2987 * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
2988 * 5705 A0 and A1 chips.
2989 */
2990 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2991 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2992 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2993 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2994 sc->bge_asicrev == BGE_ASICREV_BCM5906)
2995 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2996
2997 if (bge_has_eaddr(sc))
2998 sc->bge_flags |= BGE_FLAG_EADDR;
2999
3000 /* Save chipset family. */
3001 switch (sc->bge_asicrev) {
3002 case BGE_ASICREV_BCM5717:
3003 case BGE_ASICREV_BCM5719:
3004 case BGE_ASICREV_BCM5720:
3005 case BGE_ASICREV_BCM57765:
3006 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
3007 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
3008 BGE_FLAG_JUMBO_FRAME;
3009 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3010 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3011 /* Jumbo frame on BCM5719 A0 does not work. */
3012 sc->bge_flags &= ~BGE_FLAG_JUMBO;
3013 }
3014 break;
3015 case BGE_ASICREV_BCM5755:
3016 case BGE_ASICREV_BCM5761:
3017 case BGE_ASICREV_BCM5784:
3018 case BGE_ASICREV_BCM5785:
3019 case BGE_ASICREV_BCM5787:
3020 case BGE_ASICREV_BCM57780:
3021 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
3022 BGE_FLAG_5705_PLUS;
3023 break;
3024 case BGE_ASICREV_BCM5700:
3025 case BGE_ASICREV_BCM5701:
3026 case BGE_ASICREV_BCM5703:
3027 case BGE_ASICREV_BCM5704:
3028 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
3029 break;
3030 case BGE_ASICREV_BCM5714_A0:
3031 case BGE_ASICREV_BCM5780:
3032 case BGE_ASICREV_BCM5714:
3033 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
3034 /* FALLTHROUGH */
3035 case BGE_ASICREV_BCM5750:
3036 case BGE_ASICREV_BCM5752:
3037 case BGE_ASICREV_BCM5906:
3038 sc->bge_flags |= BGE_FLAG_575X_PLUS;
3039 /* FALLTHROUGH */
3040 case BGE_ASICREV_BCM5705:
3041 sc->bge_flags |= BGE_FLAG_5705_PLUS;
3042 break;
3043 }
3044
3045 /* Add SYSCTLs, requires the chipset family to be set. */
3046 bge_add_sysctls(sc);
3047
3048 /* Set various PHY bug flags. */
3049 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
3050 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
3051 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
3052 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
3053 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
3054 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
3055 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
3056 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
3057 if (pci_get_subvendor(dev) == DELL_VENDORID)
3058 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
3059 if ((BGE_IS_5705_PLUS(sc)) &&
3060 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
3061 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
3062 sc->bge_asicrev != BGE_ASICREV_BCM5719 &&
3063 sc->bge_asicrev != BGE_ASICREV_BCM5720 &&
3064 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
3065 sc->bge_asicrev != BGE_ASICREV_BCM57765 &&
3066 sc->bge_asicrev != BGE_ASICREV_BCM57780) {
3067 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
3068 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3069 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3070 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
3071 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
3072 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
3073 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
3074 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
3075 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
3076 } else
3077 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
3078 }
3079
3080 /* Identify the chips that use an CPMU. */
3081 if (BGE_IS_5717_PLUS(sc) ||
3082 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3083 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3084 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
3085 sc->bge_asicrev == BGE_ASICREV_BCM57780)
3086 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
3087 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
3088 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
3089 else
3090 sc->bge_mi_mode = BGE_MIMODE_BASE;
3091 /* Enable auto polling for BCM570[0-5]. */
3092 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
3093 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
3094
3095 /*
3096 * All Broadcom controllers have 4GB boundary DMA bug.
3097 * Whenever an address crosses a multiple of the 4GB boundary
3098 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3099 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3100 * state machine will lockup and cause the device to hang.
3101 */
3102 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
3103
3104 /* BCM5755 or higher and BCM5906 have short DMA bug. */
3105 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
3106 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
3107
3108 /*
3109 * BCM5719 cannot handle DMA requests for DMA segments that
3110 * have larger than 4KB in size. However the maximum DMA
3111 * segment size created in DMA tag is 4KB for TSO, so we
3112 * wouldn't encounter the issue here.
3113 */
3114 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3115 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
3116
3117 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
3118 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
3119 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
3120 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
3121 sc->bge_flags |= BGE_FLAG_5788;
3122 }
3123
3124 capmask = BMSR_DEFCAPMASK;
3125 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
3126 (misccfg == 0x4000 || misccfg == 0x8000)) ||
3127 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3128 pci_get_vendor(dev) == BCOM_VENDORID &&
3129 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
3130 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
3131 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
3132 (pci_get_vendor(dev) == BCOM_VENDORID &&
3133 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
3134 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
3135 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
3136 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
3137 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3138 /* These chips are 10/100 only. */
3139 capmask &= ~BMSR_EXTSTAT;
3140 }
3141
3142 /*
3143 * Some controllers seem to require a special firmware to use
3144 * TSO. But the firmware is not available to FreeBSD and Linux
3145 * claims that the TSO performed by the firmware is slower than
3146 * hardware based TSO. Moreover the firmware based TSO has one
3147 * known bug which can't handle TSO if ethernet header + IP/TCP
3148 * header is greater than 80 bytes. The workaround for the TSO
3149 * bug exist but it seems it's too expensive than not using
3150 * TSO at all. Some hardwares also have the TSO bug so limit
3151 * the TSO to the controllers that are not affected TSO issues
3152 * (e.g. 5755 or higher).
3153 */
3154 if (BGE_IS_5717_PLUS(sc)) {
3155 /* BCM5717 requires different TSO configuration. */
3156 sc->bge_flags |= BGE_FLAG_TSO3;
3157 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3158 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3159 /* TSO on BCM5719 A0 does not work. */
3160 sc->bge_flags &= ~BGE_FLAG_TSO3;
3161 }
3162 } else if (BGE_IS_5755_PLUS(sc)) {
3163 /*
3164 * BCM5754 and BCM5787 shares the same ASIC id so
3165 * explicit device id check is required.
3166 * Due to unknown reason TSO does not work on BCM5755M.
3167 */
3168 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
3169 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
3170 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3171 sc->bge_flags |= BGE_FLAG_TSO;
3172 }
3173
3174 /*
3175 * Check if this is a PCI-X or PCI Express device.
3176 */
3177 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
3178 /*
3179 * Found a PCI Express capabilities register, this
3180 * must be a PCI Express device.
3181 */
3182 sc->bge_flags |= BGE_FLAG_PCIE;
3183 sc->bge_expcap = reg;
3184 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3185 sc->bge_asicrev == BGE_ASICREV_BCM5720)
3186 pci_set_max_read_req(dev, 2048);
3187 else if (pci_get_max_read_req(dev) != 4096)
3188 pci_set_max_read_req(dev, 4096);
3189 } else {
3190 /*
3191 * Check if the device is in PCI-X Mode.
3192 * (This bit is not valid on PCI Express controllers.)
3193 */
3194 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0)
3195 sc->bge_pcixcap = reg;
3196 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3197 BGE_PCISTATE_PCI_BUSMODE) == 0)
3198 sc->bge_flags |= BGE_FLAG_PCIX;
3199 }
3200
3201 /*
3202 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3203 * not actually a MAC controller bug but an issue with the embedded
3204 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3205 */
3206 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3207 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3208 /*
3209 * Some PCI-X bridges are known to trigger write reordering to
3210 * the mailbox registers. Typical phenomena is watchdog timeouts
3211 * caused by out-of-order TX completions. Enable workaround for
3212 * PCI-X devices that live behind these bridges.
3213 * Note, PCI-X controllers can run in PCI mode so we can't use
3214 * BGE_FLAG_PCIX flag to detect PCI-X controllers.
3215 */
3216 if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0)
3217 sc->bge_flags |= BGE_FLAG_MBOX_REORDER;
3218 /*
3219 * Allocate the interrupt, using MSI if possible. These devices
3220 * support 8 MSI messages, but only the first one is used in
3221 * normal operation.
3222 */
3223 rid = 0;
3224 if (pci_find_extcap(sc->bge_dev, PCIY_MSI, ®) == 0) {
3225 sc->bge_msicap = reg;
3226 if (bge_can_use_msi(sc)) {
3227 msicount = pci_msi_count(dev);
3228 if (msicount > 1)
3229 msicount = 1;
3230 } else
3231 msicount = 0;
3232 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
3233 rid = 1;
3234 sc->bge_flags |= BGE_FLAG_MSI;
3235 }
3236 }
3237
3238 /*
3239 * All controllers except BCM5700 supports tagged status but
3240 * we use tagged status only for MSI case on BCM5717. Otherwise
3241 * MSI on BCM5717 does not work.
3242 */
3243 #ifndef DEVICE_POLLING
3244 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3245 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3246 #endif
3247
3248 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3249 RF_SHAREABLE | RF_ACTIVE);
3250
3251 if (sc->bge_irq == NULL) {
3252 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3253 error = ENXIO;
3254 goto fail;
3255 }
3256
3257 bge_devinfo(sc);
3258
3259 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3260
3261 /* Try to reset the chip. */
3262 if (bge_reset(sc)) {
3263 device_printf(sc->bge_dev, "chip reset failed\n");
3264 error = ENXIO;
3265 goto fail;
3266 }
3267
3268 sc->bge_asf_mode = 0;
3269 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3270 BGE_SRAM_DATA_SIG_MAGIC)) {
3271 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG)
3272 & BGE_HWCFG_ASF) {
3273 sc->bge_asf_mode |= ASF_ENABLE;
3274 sc->bge_asf_mode |= ASF_STACKUP;
3275 if (BGE_IS_575X_PLUS(sc))
3276 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3277 }
3278 }
3279
3280 /* Try to reset the chip again the nice way. */
3281 bge_stop_fw(sc);
3282 bge_sig_pre_reset(sc, BGE_RESET_STOP);
3283 if (bge_reset(sc)) {
3284 device_printf(sc->bge_dev, "chip reset failed\n");
3285 error = ENXIO;
3286 goto fail;
3287 }
3288
3289 bge_sig_legacy(sc, BGE_RESET_STOP);
3290 bge_sig_post_reset(sc, BGE_RESET_STOP);
3291
3292 if (bge_chipinit(sc)) {
3293 device_printf(sc->bge_dev, "chip initialization failed\n");
3294 error = ENXIO;
3295 goto fail;
3296 }
3297
3298 error = bge_get_eaddr(sc, eaddr);
3299 if (error) {
3300 device_printf(sc->bge_dev,
3301 "failed to read station address\n");
3302 error = ENXIO;
3303 goto fail;
3304 }
3305
3306 /* 5705 limits RX return ring to 512 entries. */
3307 if (BGE_IS_5717_PLUS(sc))
3308 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3309 else if (BGE_IS_5705_PLUS(sc))
3310 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3311 else
3312 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3313
3314 if (bge_dma_alloc(sc)) {
3315 device_printf(sc->bge_dev,
3316 "failed to allocate DMA resources\n");
3317 error = ENXIO;
3318 goto fail;
3319 }
3320
3321 /* Set default tuneable values. */
3322 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3323 sc->bge_rx_coal_ticks = 150;
3324 sc->bge_tx_coal_ticks = 150;
3325 sc->bge_rx_max_coal_bds = 10;
3326 sc->bge_tx_max_coal_bds = 10;
3327
3328 /* Initialize checksum features to use. */
3329 sc->bge_csum_features = BGE_CSUM_FEATURES;
3330 if (sc->bge_forced_udpcsum != 0)
3331 sc->bge_csum_features |= CSUM_UDP;
3332
3333 /* Set up ifnet structure */
3334 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3335 if (ifp == NULL) {
3336 device_printf(sc->bge_dev, "failed to if_alloc()\n");
3337 error = ENXIO;
3338 goto fail;
3339 }
3340 ifp->if_softc = sc;
3341 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3342 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3343 ifp->if_ioctl = bge_ioctl;
3344 ifp->if_start = bge_start;
3345 ifp->if_init = bge_init;
3346 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
3347 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
3348 IFQ_SET_READY(&ifp->if_snd);
3349 ifp->if_hwassist = sc->bge_csum_features;
3350 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3351 IFCAP_VLAN_MTU;
3352 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3353 ifp->if_hwassist |= CSUM_TSO;
3354 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
3355 }
3356 #ifdef IFCAP_VLAN_HWCSUM
3357 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
3358 #endif
3359 ifp->if_capenable = ifp->if_capabilities;
3360 #ifdef DEVICE_POLLING
3361 ifp->if_capabilities |= IFCAP_POLLING;
3362 #endif
3363
3364 /*
3365 * 5700 B0 chips do not support checksumming correctly due
3366 * to hardware bugs.
3367 */
3368 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3369 ifp->if_capabilities &= ~IFCAP_HWCSUM;
3370 ifp->if_capenable &= ~IFCAP_HWCSUM;
3371 ifp->if_hwassist = 0;
3372 }
3373
3374 /*
3375 * Figure out what sort of media we have by checking the
3376 * hardware config word in the first 32k of NIC internal memory,
3377 * or fall back to examining the EEPROM if necessary.
3378 * Note: on some BCM5700 cards, this value appears to be unset.
3379 * If that's the case, we have to rely on identifying the NIC
3380 * by its PCI subsystem ID, as we do below for the SysKonnect
3381 * SK-9D41.
3382 */
3383 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3384 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3385 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3386 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3387 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3388 sizeof(hwcfg))) {
3389 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3390 error = ENXIO;
3391 goto fail;
3392 }
3393 hwcfg = ntohl(hwcfg);
3394 }
3395
3396 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3397 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3398 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3399 if (BGE_IS_5714_FAMILY(sc))
3400 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3401 else
3402 sc->bge_flags |= BGE_FLAG_TBI;
3403 }
3404
3405 if (sc->bge_flags & BGE_FLAG_TBI) {
3406 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3407 bge_ifmedia_sts);
3408 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3409 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3410 0, NULL);
3411 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3412 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3413 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3414 } else {
3415 /*
3416 * Do transceiver setup and tell the firmware the
3417 * driver is down so we can try to get access the
3418 * probe if ASF is running. Retry a couple of times
3419 * if we get a conflict with the ASF firmware accessing
3420 * the PHY.
3421 */
3422 trys = 0;
3423 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3424 again:
3425 bge_asf_driver_up(sc);
3426
3427 error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
3428 bge_ifmedia_sts, capmask, phy_addr, MII_OFFSET_ANY,
3429 MIIF_DOPAUSE | MIIF_FORCEPAUSE);
3430 if (error != 0) {
3431 if (trys++ < 4) {
3432 device_printf(sc->bge_dev, "Try again\n");
3433 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR,
3434 BMCR_RESET);
3435 goto again;
3436 }
3437 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3438 goto fail;
3439 }
3440
3441 /*
3442 * Now tell the firmware we are going up after probing the PHY
3443 */
3444 if (sc->bge_asf_mode & ASF_STACKUP)
3445 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3446 }
3447
3448 /*
3449 * When using the BCM5701 in PCI-X mode, data corruption has
3450 * been observed in the first few bytes of some received packets.
3451 * Aligning the packet buffer in memory eliminates the corruption.
3452 * Unfortunately, this misaligns the packet payloads. On platforms
3453 * which do not support unaligned accesses, we will realign the
3454 * payloads by copying the received packets.
3455 */
3456 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3457 sc->bge_flags & BGE_FLAG_PCIX)
3458 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3459
3460 /*
3461 * Call MI attach routine.
3462 */
3463 ether_ifattach(ifp, eaddr);
3464 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3465
3466 /* Tell upper layer we support long frames. */
3467 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3468
3469 /*
3470 * Hookup IRQ last.
3471 */
3472 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3473 /* Take advantage of single-shot MSI. */
3474 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3475 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3476 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3477 taskqueue_thread_enqueue, &sc->bge_tq);
3478 if (sc->bge_tq == NULL) {
3479 device_printf(dev, "could not create taskqueue.\n");
3480 ether_ifdetach(ifp);
3481 error = ENXIO;
3482 goto fail;
3483 }
3484 taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq",
3485 device_get_nameunit(sc->bge_dev));
3486 error = bus_setup_intr(dev, sc->bge_irq,
3487 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3488 &sc->bge_intrhand);
3489 if (error)
3490 ether_ifdetach(ifp);
3491 } else
3492 error = bus_setup_intr(dev, sc->bge_irq,
3493 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3494 &sc->bge_intrhand);
3495
3496 if (error) {
3497 bge_detach(dev);
3498 device_printf(sc->bge_dev, "couldn't set up irq\n");
3499 }
3500
3501 return (0);
3502
3503 fail:
3504 bge_release_resources(sc);
3505
3506 return (error);
3507 }
3508
3509 static int
3510 bge_detach(device_t dev)
3511 {
3512 struct bge_softc *sc;
3513 struct ifnet *ifp;
3514
3515 sc = device_get_softc(dev);
3516 ifp = sc->bge_ifp;
3517
3518 #ifdef DEVICE_POLLING
3519 if (ifp->if_capenable & IFCAP_POLLING)
3520 ether_poll_deregister(ifp);
3521 #endif
3522
3523 BGE_LOCK(sc);
3524 bge_stop(sc);
3525 bge_reset(sc);
3526 BGE_UNLOCK(sc);
3527
3528 callout_drain(&sc->bge_stat_ch);
3529
3530 if (sc->bge_tq)
3531 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3532 ether_ifdetach(ifp);
3533
3534 if (sc->bge_flags & BGE_FLAG_TBI) {
3535 ifmedia_removeall(&sc->bge_ifmedia);
3536 } else {
3537 bus_generic_detach(dev);
3538 device_delete_child(dev, sc->bge_miibus);
3539 }
3540
3541 bge_release_resources(sc);
3542
3543 return (0);
3544 }
3545
3546 static void
3547 bge_release_resources(struct bge_softc *sc)
3548 {
3549 device_t dev;
3550
3551 dev = sc->bge_dev;
3552
3553 if (sc->bge_tq != NULL)
3554 taskqueue_free(sc->bge_tq);
3555
3556 if (sc->bge_intrhand != NULL)
3557 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3558
3559 if (sc->bge_irq != NULL)
3560 bus_release_resource(dev, SYS_RES_IRQ,
3561 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
3562
3563 if (sc->bge_flags & BGE_FLAG_MSI)
3564 pci_release_msi(dev);
3565
3566 if (sc->bge_res != NULL)
3567 bus_release_resource(dev, SYS_RES_MEMORY,
3568 PCIR_BAR(0), sc->bge_res);
3569
3570 if (sc->bge_ifp != NULL)
3571 if_free(sc->bge_ifp);
3572
3573 bge_dma_free(sc);
3574
3575 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3576 BGE_LOCK_DESTROY(sc);
3577 }
3578
3579 static int
3580 bge_reset(struct bge_softc *sc)
3581 {
3582 device_t dev;
3583 uint32_t cachesize, command, pcistate, reset, val;
3584 void (*write_op)(struct bge_softc *, int, int);
3585 uint16_t devctl;
3586 int i;
3587
3588 dev = sc->bge_dev;
3589
3590 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3591 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3592 if (sc->bge_flags & BGE_FLAG_PCIE)
3593 write_op = bge_writemem_direct;
3594 else
3595 write_op = bge_writemem_ind;
3596 } else
3597 write_op = bge_writereg_ind;
3598
3599 /* Save some important PCI state. */
3600 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
3601 command = pci_read_config(dev, BGE_PCI_CMD, 4);
3602 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3603
3604 pci_write_config(dev, BGE_PCI_MISC_CTL,
3605 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3606 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3607
3608 /* Disable fastboot on controllers that support it. */
3609 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
3610 BGE_IS_5755_PLUS(sc)) {
3611 if (bootverbose)
3612 device_printf(dev, "Disabling fastboot\n");
3613 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
3614 }
3615
3616 /*
3617 * Write the magic number to SRAM at offset 0xB50.
3618 * When firmware finishes its initialization it will
3619 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3620 */
3621 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
3622
3623 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3624
3625 /* XXX: Broadcom Linux driver. */
3626 if (sc->bge_flags & BGE_FLAG_PCIE) {
3627 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
3628 CSR_WRITE_4(sc, 0x7E2C, 0x20);
3629 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3630 /* Prevent PCIE link training during global reset */
3631 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
3632 reset |= 1 << 29;
3633 }
3634 }
3635
3636 /*
3637 * Set GPHY Power Down Override to leave GPHY
3638 * powered up in D0 uninitialized.
3639 */
3640 if (BGE_IS_5705_PLUS(sc) &&
3641 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
3642 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
3643
3644 /* Issue global reset */
3645 write_op(sc, BGE_MISC_CFG, reset);
3646
3647 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3648 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3649 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3650 val | BGE_VCPU_STATUS_DRV_RESET);
3651 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3652 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3653 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3654 }
3655
3656 DELAY(1000);
3657
3658 /* XXX: Broadcom Linux driver. */
3659 if (sc->bge_flags & BGE_FLAG_PCIE) {
3660 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3661 DELAY(500000); /* wait for link training to complete */
3662 val = pci_read_config(dev, 0xC4, 4);
3663 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
3664 }
3665 devctl = pci_read_config(dev,
3666 sc->bge_expcap + PCIER_DEVICE_CTL, 2);
3667 /* Clear enable no snoop and disable relaxed ordering. */
3668 devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE |
3669 PCIEM_CTL_NOSNOOP_ENABLE);
3670 /* Set PCIE max payload size to 128. */
3671 devctl &= ~PCIEM_CTL_MAX_PAYLOAD;
3672 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL,
3673 devctl, 2);
3674 /* Clear error status. */
3675 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA,
3676 PCIEM_STA_CORRECTABLE_ERROR |
3677 PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR |
3678 PCIEM_STA_UNSUPPORTED_REQ, 2);
3679 }
3680
3681 /* Reset some of the PCI state that got zapped by reset. */
3682 pci_write_config(dev, BGE_PCI_MISC_CTL,
3683 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3684 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
3685 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
3686 pci_write_config(dev, BGE_PCI_CMD, command, 4);
3687 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
3688 /*
3689 * Disable PCI-X relaxed ordering to ensure status block update
3690 * comes first then packet buffer DMA. Otherwise driver may
3691 * read stale status block.
3692 */
3693 if (sc->bge_flags & BGE_FLAG_PCIX) {
3694 devctl = pci_read_config(dev,
3695 sc->bge_pcixcap + PCIXR_COMMAND, 2);
3696 devctl &= ~PCIXM_COMMAND_ERO;
3697 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
3698 devctl &= ~PCIXM_COMMAND_MAX_READ;
3699 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3700 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3701 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
3702 PCIXM_COMMAND_MAX_READ);
3703 devctl |= PCIXM_COMMAND_MAX_READ_2048;
3704 }
3705 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
3706 devctl, 2);
3707 }
3708 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
3709 if (BGE_IS_5714_FAMILY(sc)) {
3710 /* This chip disables MSI on reset. */
3711 if (sc->bge_flags & BGE_FLAG_MSI) {
3712 val = pci_read_config(dev,
3713 sc->bge_msicap + PCIR_MSI_CTRL, 2);
3714 pci_write_config(dev,
3715 sc->bge_msicap + PCIR_MSI_CTRL,
3716 val | PCIM_MSICTRL_MSI_ENABLE, 2);
3717 val = CSR_READ_4(sc, BGE_MSI_MODE);
3718 CSR_WRITE_4(sc, BGE_MSI_MODE,
3719 val | BGE_MSIMODE_ENABLE);
3720 }
3721 val = CSR_READ_4(sc, BGE_MARB_MODE);
3722 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3723 } else
3724 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3725
3726 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3727 for (i = 0; i < BGE_TIMEOUT; i++) {
3728 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3729 if (val & BGE_VCPU_STATUS_INIT_DONE)
3730 break;
3731 DELAY(100);
3732 }
3733 if (i == BGE_TIMEOUT) {
3734 device_printf(dev, "reset timed out\n");
3735 return (1);
3736 }
3737 } else {
3738 /*
3739 * Poll until we see the 1's complement of the magic number.
3740 * This indicates that the firmware initialization is complete.
3741 * We expect this to fail if no chip containing the Ethernet
3742 * address is fitted though.
3743 */
3744 for (i = 0; i < BGE_TIMEOUT; i++) {
3745 DELAY(10);
3746 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
3747 if (val == ~BGE_SRAM_FW_MB_MAGIC)
3748 break;
3749 }
3750
3751 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
3752 device_printf(dev,
3753 "firmware handshake timed out, found 0x%08x\n",
3754 val);
3755 /* BCM57765 A0 needs additional time before accessing. */
3756 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3757 DELAY(10 * 1000); /* XXX */
3758 }
3759
3760 /*
3761 * XXX Wait for the value of the PCISTATE register to
3762 * return to its original pre-reset state. This is a
3763 * fairly good indicator of reset completion. If we don't
3764 * wait for the reset to fully complete, trying to read
3765 * from the device's non-PCI registers may yield garbage
3766 * results.
3767 */
3768 for (i = 0; i < BGE_TIMEOUT; i++) {
3769 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
3770 break;
3771 DELAY(10);
3772 }
3773
3774 /* Fix up byte swapping. */
3775 CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
3776
3777 /* Tell the ASF firmware we are up */
3778 if (sc->bge_asf_mode & ASF_STACKUP)
3779 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3780
3781 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3782
3783 /*
3784 * The 5704 in TBI mode apparently needs some special
3785 * adjustment to insure the SERDES drive level is set
3786 * to 1.2V.
3787 */
3788 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
3789 sc->bge_flags & BGE_FLAG_TBI) {
3790 val = CSR_READ_4(sc, BGE_SERDES_CFG);
3791 val = (val & ~0xFFF) | 0x880;
3792 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3793 }
3794
3795 /* XXX: Broadcom Linux driver. */
3796 if (sc->bge_flags & BGE_FLAG_PCIE &&
3797 !BGE_IS_5717_PLUS(sc) &&
3798 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3799 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
3800 /* Enable Data FIFO protection. */
3801 val = CSR_READ_4(sc, 0x7C00);
3802 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
3803 }
3804 DELAY(10000);
3805
3806 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
3807 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
3808 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3809
3810 return (0);
3811 }
3812
3813 static __inline void
3814 bge_rxreuse_std(struct bge_softc *sc, int i)
3815 {
3816 struct bge_rx_bd *r;
3817
3818 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
3819 r->bge_flags = BGE_RXBDFLAG_END;
3820 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
3821 r->bge_idx = i;
3822 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3823 }
3824
3825 static __inline void
3826 bge_rxreuse_jumbo(struct bge_softc *sc, int i)
3827 {
3828 struct bge_extrx_bd *r;
3829
3830 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
3831 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
3832 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
3833 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
3834 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
3835 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
3836 r->bge_idx = i;
3837 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3838 }
3839
3840 /*
3841 * Frame reception handling. This is called if there's a frame
3842 * on the receive return list.
3843 *
3844 * Note: we have to be able to handle two possibilities here:
3845 * 1) the frame is from the jumbo receive ring
3846 * 2) the frame is from the standard receive ring
3847 */
3848
3849 static void
3850 bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
3851 {
3852 struct ifnet *ifp;
3853 int stdcnt = 0, jumbocnt = 0;
3854 uint16_t rx_cons;
3855
3856 rx_cons = sc->bge_rx_saved_considx;
3857
3858 /* Nothing to do. */
3859 if (rx_cons == rx_prod)
3860 return;
3861
3862 ifp = sc->bge_ifp;
3863
3864 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3865 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
3866 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3867 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
3868 if (BGE_IS_JUMBO_CAPABLE(sc) &&
3869 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
3870 (MCLBYTES - ETHER_ALIGN))
3871 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3872 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
3873
3874 while (rx_cons != rx_prod) {
3875 struct bge_rx_bd *cur_rx;
3876 uint32_t rxidx;
3877 struct mbuf *m = NULL;
3878 uint16_t vlan_tag = 0;
3879 int have_tag = 0;
3880
3881 #ifdef DEVICE_POLLING
3882 if (ifp->if_capenable & IFCAP_POLLING) {
3883 if (sc->rxcycles <= 0)
3884 break;
3885 sc->rxcycles--;
3886 }
3887 #endif
3888
3889 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
3890
3891 rxidx = cur_rx->bge_idx;
3892 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3893
3894 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
3895 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3896 have_tag = 1;
3897 vlan_tag = cur_rx->bge_vlan_tag;
3898 }
3899
3900 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3901 jumbocnt++;
3902 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3903 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3904 bge_rxreuse_jumbo(sc, rxidx);
3905 continue;
3906 }
3907 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
3908 bge_rxreuse_jumbo(sc, rxidx);
3909 ifp->if_iqdrops++;
3910 continue;
3911 }
3912 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3913 } else {
3914 stdcnt++;
3915 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3916 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3917 bge_rxreuse_std(sc, rxidx);
3918 continue;
3919 }
3920 if (bge_newbuf_std(sc, rxidx) != 0) {
3921 bge_rxreuse_std(sc, rxidx);
3922 ifp->if_iqdrops++;
3923 continue;
3924 }
3925 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3926 }
3927
3928 ifp->if_ipackets++;
3929 #ifndef __NO_STRICT_ALIGNMENT
3930 /*
3931 * For architectures with strict alignment we must make sure
3932 * the payload is aligned.
3933 */
3934 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
3935 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3936 cur_rx->bge_len);
3937 m->m_data += ETHER_ALIGN;
3938 }
3939 #endif
3940 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3941 m->m_pkthdr.rcvif = ifp;
3942
3943 if (ifp->if_capenable & IFCAP_RXCSUM)
3944 bge_rxcsum(sc, cur_rx, m);
3945
3946 /*
3947 * If we received a packet with a vlan tag,
3948 * attach that information to the packet.
3949 */
3950 if (have_tag) {
3951 m->m_pkthdr.ether_vtag = vlan_tag;
3952 m->m_flags |= M_VLANTAG;
3953 }
3954
3955 if (holdlck != 0) {
3956 BGE_UNLOCK(sc);
3957 (*ifp->if_input)(ifp, m);
3958 BGE_LOCK(sc);
3959 } else
3960 (*ifp->if_input)(ifp, m);
3961
3962 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
3963 return;
3964 }
3965
3966 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
3967 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
3968 if (stdcnt > 0)
3969 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
3970 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
3971
3972 if (jumbocnt > 0)
3973 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
3974 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
3975
3976 sc->bge_rx_saved_considx = rx_cons;
3977 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3978 if (stdcnt)
3979 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
3980 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
3981 if (jumbocnt)
3982 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
3983 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
3984 #ifdef notyet
3985 /*
3986 * This register wraps very quickly under heavy packet drops.
3987 * If you need correct statistics, you can enable this check.
3988 */
3989 if (BGE_IS_5705_PLUS(sc))
3990 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3991 #endif
3992 }
3993
3994 static void
3995 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3996 {
3997
3998 if (BGE_IS_5717_PLUS(sc)) {
3999 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
4000 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
4001 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4002 if ((cur_rx->bge_error_flag &
4003 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
4004 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4005 }
4006 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
4007 m->m_pkthdr.csum_data =
4008 cur_rx->bge_tcp_udp_csum;
4009 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
4010 CSUM_PSEUDO_HDR;
4011 }
4012 }
4013 } else {
4014 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
4015 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4016 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
4017 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4018 }
4019 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
4020 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
4021 m->m_pkthdr.csum_data =
4022 cur_rx->bge_tcp_udp_csum;
4023 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
4024 CSUM_PSEUDO_HDR;
4025 }
4026 }
4027 }
4028
4029 static void
4030 bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
4031 {
4032 struct bge_tx_bd *cur_tx;
4033 struct ifnet *ifp;
4034
4035 BGE_LOCK_ASSERT(sc);
4036
4037 /* Nothing to do. */
4038 if (sc->bge_tx_saved_considx == tx_cons)
4039 return;
4040
4041 ifp = sc->bge_ifp;
4042
4043 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4044 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
4045 /*
4046 * Go through our tx ring and free mbufs for those
4047 * frames that have been sent.
4048 */
4049 while (sc->bge_tx_saved_considx != tx_cons) {
4050 uint32_t idx;
4051
4052 idx = sc->bge_tx_saved_considx;
4053 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
4054 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
4055 ifp->if_opackets++;
4056 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
4057 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
4058 sc->bge_cdata.bge_tx_dmamap[idx],
4059 BUS_DMASYNC_POSTWRITE);
4060 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
4061 sc->bge_cdata.bge_tx_dmamap[idx]);
4062 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
4063 sc->bge_cdata.bge_tx_chain[idx] = NULL;
4064 }
4065 sc->bge_txcnt--;
4066 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
4067 }
4068
4069 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4070 if (sc->bge_txcnt == 0)
4071 sc->bge_timer = 0;
4072 }
4073
4074 #ifdef DEVICE_POLLING
4075 static void
4076 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
4077 {
4078 struct bge_softc *sc = ifp->if_softc;
4079 uint16_t rx_prod, tx_cons;
4080 uint32_t statusword;
4081
4082 BGE_LOCK(sc);
4083 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4084 BGE_UNLOCK(sc);
4085 return;
4086 }
4087
4088 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4089 sc->bge_cdata.bge_status_map,
4090 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4091 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4092 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4093
4094 statusword = sc->bge_ldata.bge_status_block->bge_status;
4095 sc->bge_ldata.bge_status_block->bge_status = 0;
4096
4097 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4098 sc->bge_cdata.bge_status_map,
4099 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4100
4101 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
4102 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
4103 sc->bge_link_evt++;
4104
4105 if (cmd == POLL_AND_CHECK_STATUS)
4106 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4107 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4108 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
4109 bge_link_upd(sc);
4110
4111 sc->rxcycles = count;
4112 bge_rxeof(sc, rx_prod, 1);
4113 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4114 BGE_UNLOCK(sc);
4115 return;
4116 }
4117 bge_txeof(sc, tx_cons);
4118 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4119 bge_start_locked(ifp);
4120
4121 BGE_UNLOCK(sc);
4122 }
4123 #endif /* DEVICE_POLLING */
4124
4125 static int
4126 bge_msi_intr(void *arg)
4127 {
4128 struct bge_softc *sc;
4129
4130 sc = (struct bge_softc *)arg;
4131 /*
4132 * This interrupt is not shared and controller already
4133 * disabled further interrupt.
4134 */
4135 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
4136 return (FILTER_HANDLED);
4137 }
4138
4139 static void
4140 bge_intr_task(void *arg, int pending)
4141 {
4142 struct bge_softc *sc;
4143 struct ifnet *ifp;
4144 uint32_t status, status_tag;
4145 uint16_t rx_prod, tx_cons;
4146
4147 sc = (struct bge_softc *)arg;
4148 ifp = sc->bge_ifp;
4149
4150 BGE_LOCK(sc);
4151 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4152 BGE_UNLOCK(sc);
4153 return;
4154 }
4155
4156 /* Get updated status block. */
4157 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4158 sc->bge_cdata.bge_status_map,
4159 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4160
4161 /* Save producer/consumer indexess. */
4162 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4163 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4164 status = sc->bge_ldata.bge_status_block->bge_status;
4165 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
4166 sc->bge_ldata.bge_status_block->bge_status = 0;
4167 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4168 sc->bge_cdata.bge_status_map,
4169 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4170 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
4171 status_tag = 0;
4172
4173 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
4174 bge_link_upd(sc);
4175
4176 /* Let controller work. */
4177 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4178
4179 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4180 sc->bge_rx_saved_considx != rx_prod) {
4181 /* Check RX return ring producer/consumer. */
4182 BGE_UNLOCK(sc);
4183 bge_rxeof(sc, rx_prod, 0);
4184 BGE_LOCK(sc);
4185 }
4186 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4187 /* Check TX ring producer/consumer. */
4188 bge_txeof(sc, tx_cons);
4189 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4190 bge_start_locked(ifp);
4191 }
4192 BGE_UNLOCK(sc);
4193 }
4194
4195 static void
4196 bge_intr(void *xsc)
4197 {
4198 struct bge_softc *sc;
4199 struct ifnet *ifp;
4200 uint32_t statusword;
4201 uint16_t rx_prod, tx_cons;
4202
4203 sc = xsc;
4204
4205 BGE_LOCK(sc);
4206
4207 ifp = sc->bge_ifp;
4208
4209 #ifdef DEVICE_POLLING
4210 if (ifp->if_capenable & IFCAP_POLLING) {
4211 BGE_UNLOCK(sc);
4212 return;
4213 }
4214 #endif
4215
4216 /*
4217 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4218 * disable interrupts by writing nonzero like we used to, since with
4219 * our current organization this just gives complications and
4220 * pessimizations for re-enabling interrupts. We used to have races
4221 * instead of the necessary complications. Disabling interrupts
4222 * would just reduce the chance of a status update while we are
4223 * running (by switching to the interrupt-mode coalescence
4224 * parameters), but this chance is already very low so it is more
4225 * efficient to get another interrupt than prevent it.
4226 *
4227 * We do the ack first to ensure another interrupt if there is a
4228 * status update after the ack. We don't check for the status
4229 * changing later because it is more efficient to get another
4230 * interrupt than prevent it, not quite as above (not checking is
4231 * a smaller optimization than not toggling the interrupt enable,
4232 * since checking doesn't involve PCI accesses and toggling require
4233 * the status check). So toggling would probably be a pessimization
4234 * even with MSI. It would only be needed for using a task queue.
4235 */
4236 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4237
4238 /*
4239 * Do the mandatory PCI flush as well as get the link status.
4240 */
4241 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4242
4243 /* Make sure the descriptor ring indexes are coherent. */
4244 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4245 sc->bge_cdata.bge_status_map,
4246 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4247 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4248 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4249 sc->bge_ldata.bge_status_block->bge_status = 0;
4250 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4251 sc->bge_cdata.bge_status_map,
4252 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4253
4254 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4255 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4256 statusword || sc->bge_link_evt)
4257 bge_link_upd(sc);
4258
4259 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4260 /* Check RX return ring producer/consumer. */
4261 bge_rxeof(sc, rx_prod, 1);
4262 }
4263
4264 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4265 /* Check TX ring producer/consumer. */
4266 bge_txeof(sc, tx_cons);
4267 }
4268
4269 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
4270 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
4271 bge_start_locked(ifp);
4272
4273 BGE_UNLOCK(sc);
4274 }
4275
4276 static void
4277 bge_asf_driver_up(struct bge_softc *sc)
4278 {
4279 if (sc->bge_asf_mode & ASF_STACKUP) {
4280 /* Send ASF heartbeat aprox. every 2s */
4281 if (sc->bge_asf_count)
4282 sc->bge_asf_count --;
4283 else {
4284 sc->bge_asf_count = 2;
4285 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4286 BGE_FW_CMD_DRV_ALIVE);
4287 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4288 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4289 BGE_FW_HB_TIMEOUT_SEC);
4290 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4291 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4292 BGE_RX_CPU_DRV_EVENT);
4293 }
4294 }
4295 }
4296
4297 static void
4298 bge_tick(void *xsc)
4299 {
4300 struct bge_softc *sc = xsc;
4301 struct mii_data *mii = NULL;
4302
4303 BGE_LOCK_ASSERT(sc);
4304
4305 /* Synchronize with possible callout reset/stop. */
4306 if (callout_pending(&sc->bge_stat_ch) ||
4307 !callout_active(&sc->bge_stat_ch))
4308 return;
4309
4310 if (BGE_IS_5705_PLUS(sc))
4311 bge_stats_update_regs(sc);
4312 else
4313 bge_stats_update(sc);
4314
4315 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4316 mii = device_get_softc(sc->bge_miibus);
4317 /*
4318 * Do not touch PHY if we have link up. This could break
4319 * IPMI/ASF mode or produce extra input errors
4320 * (extra errors was reported for bcm5701 & bcm5704).
4321 */
4322 if (!sc->bge_link)
4323 mii_tick(mii);
4324 } else {
4325 /*
4326 * Since in TBI mode auto-polling can't be used we should poll
4327 * link status manually. Here we register pending link event
4328 * and trigger interrupt.
4329 */
4330 #ifdef DEVICE_POLLING
4331 /* In polling mode we poll link state in bge_poll(). */
4332 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
4333 #endif
4334 {
4335 sc->bge_link_evt++;
4336 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4337 sc->bge_flags & BGE_FLAG_5788)
4338 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4339 else
4340 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4341 }
4342 }
4343
4344 bge_asf_driver_up(sc);
4345 bge_watchdog(sc);
4346
4347 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4348 }
4349
4350 static void
4351 bge_stats_update_regs(struct bge_softc *sc)
4352 {
4353 struct ifnet *ifp;
4354 struct bge_mac_stats *stats;
4355
4356 ifp = sc->bge_ifp;
4357 stats = &sc->bge_mac_stats;
4358
4359 stats->ifHCOutOctets +=
4360 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4361 stats->etherStatsCollisions +=
4362 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4363 stats->outXonSent +=
4364 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4365 stats->outXoffSent +=
4366 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4367 stats->dot3StatsInternalMacTransmitErrors +=
4368 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4369 stats->dot3StatsSingleCollisionFrames +=
4370 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4371 stats->dot3StatsMultipleCollisionFrames +=
4372 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4373 stats->dot3StatsDeferredTransmissions +=
4374 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4375 stats->dot3StatsExcessiveCollisions +=
4376 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4377 stats->dot3StatsLateCollisions +=
4378 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4379 stats->ifHCOutUcastPkts +=
4380 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4381 stats->ifHCOutMulticastPkts +=
4382 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4383 stats->ifHCOutBroadcastPkts +=
4384 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4385
4386 stats->ifHCInOctets +=
4387 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4388 stats->etherStatsFragments +=
4389 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4390 stats->ifHCInUcastPkts +=
4391 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4392 stats->ifHCInMulticastPkts +=
4393 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4394 stats->ifHCInBroadcastPkts +=
4395 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4396 stats->dot3StatsFCSErrors +=
4397 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4398 stats->dot3StatsAlignmentErrors +=
4399 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4400 stats->xonPauseFramesReceived +=
4401 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4402 stats->xoffPauseFramesReceived +=
4403 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4404 stats->macControlFramesReceived +=
4405 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4406 stats->xoffStateEntered +=
4407 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4408 stats->dot3StatsFramesTooLong +=
4409 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4410 stats->etherStatsJabbers +=
4411 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4412 stats->etherStatsUndersizePkts +=
4413 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4414
4415 stats->FramesDroppedDueToFilters +=
4416 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4417 stats->DmaWriteQueueFull +=
4418 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4419 stats->DmaWriteHighPriQueueFull +=
4420 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4421 stats->NoMoreRxBDs +=
4422 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4423 /*
4424 * XXX
4425 * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS
4426 * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0
4427 * includes number of unwanted multicast frames. This comes
4428 * from silicon bug and known workaround to get rough(not
4429 * exact) counter is to enable interrupt on MBUF low water
4430 * attention. This can be accomplished by setting
4431 * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE,
4432 * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and
4433 * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL.
4434 * However that change would generate more interrupts and
4435 * there are still possibilities of losing multiple frames
4436 * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling.
4437 * Given that the workaround still would not get correct
4438 * counter I don't think it's worth to implement it. So
4439 * ignore reading the counter on controllers that have the
4440 * silicon bug.
4441 */
4442 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
4443 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
4444 sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
4445 stats->InputDiscards +=
4446 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4447 stats->InputErrors +=
4448 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4449 stats->RecvThresholdHit +=
4450 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4451
4452 ifp->if_collisions = (u_long)stats->etherStatsCollisions;
4453 ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
4454 stats->InputErrors);
4455 }
4456
4457 static void
4458 bge_stats_clear_regs(struct bge_softc *sc)
4459 {
4460
4461 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4462 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4463 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4464 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4465 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4466 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4467 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4468 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4469 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4470 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4471 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4472 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4473 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4474
4475 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4476 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4477 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4478 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4479 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4480 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4481 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4482 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4483 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4484 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4485 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4486 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4487 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4488 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4489
4490 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4491 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4492 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4493 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4494 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4495 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4496 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4497 }
4498
4499 static void
4500 bge_stats_update(struct bge_softc *sc)
4501 {
4502 struct ifnet *ifp;
4503 bus_size_t stats;
4504 uint32_t cnt; /* current register value */
4505
4506 ifp = sc->bge_ifp;
4507
4508 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4509
4510 #define READ_STAT(sc, stats, stat) \
4511 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4512
4513 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4514 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
4515 sc->bge_tx_collisions = cnt;
4516
4517 cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
4518 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_nobds);
4519 sc->bge_rx_nobds = cnt;
4520 cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
4521 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrs);
4522 sc->bge_rx_inerrs = cnt;
4523 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4524 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
4525 sc->bge_rx_discards = cnt;
4526
4527 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4528 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
4529 sc->bge_tx_discards = cnt;
4530
4531 #undef READ_STAT
4532 }
4533
4534 /*
4535 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4536 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4537 * but when such padded frames employ the bge IP/TCP checksum offload,
4538 * the hardware checksum assist gives incorrect results (possibly
4539 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4540 * If we pad such runts with zeros, the onboard checksum comes out correct.
4541 */
4542 static __inline int
4543 bge_cksum_pad(struct mbuf *m)
4544 {
4545 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
4546 struct mbuf *last;
4547
4548 /* If there's only the packet-header and we can pad there, use it. */
4549 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
4550 M_TRAILINGSPACE(m) >= padlen) {
4551 last = m;
4552 } else {
4553 /*
4554 * Walk packet chain to find last mbuf. We will either
4555 * pad there, or append a new mbuf and pad it.
4556 */
4557 for (last = m; last->m_next != NULL; last = last->m_next);
4558 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
4559 /* Allocate new empty mbuf, pad it. Compact later. */
4560 struct mbuf *n;
4561
4562 MGET(n, M_DONTWAIT, MT_DATA);
4563 if (n == NULL)
4564 return (ENOBUFS);
4565 n->m_len = 0;
4566 last->m_next = n;
4567 last = n;
4568 }
4569 }
4570
4571 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
4572 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4573 last->m_len += padlen;
4574 m->m_pkthdr.len += padlen;
4575
4576 return (0);
4577 }
4578
4579 static struct mbuf *
4580 bge_check_short_dma(struct mbuf *m)
4581 {
4582 struct mbuf *n;
4583 int found;
4584
4585 /*
4586 * If device receive two back-to-back send BDs with less than
4587 * or equal to 8 total bytes then the device may hang. The two
4588 * back-to-back send BDs must in the same frame for this failure
4589 * to occur. Scan mbuf chains and see whether two back-to-back
4590 * send BDs are there. If this is the case, allocate new mbuf
4591 * and copy the frame to workaround the silicon bug.
4592 */
4593 for (n = m, found = 0; n != NULL; n = n->m_next) {
4594 if (n->m_len < 8) {
4595 found++;
4596 if (found > 1)
4597 break;
4598 continue;
4599 }
4600 found = 0;
4601 }
4602
4603 if (found > 1) {
4604 n = m_defrag(m, M_DONTWAIT);
4605 if (n == NULL)
4606 m_freem(m);
4607 } else
4608 n = m;
4609 return (n);
4610 }
4611
4612 static struct mbuf *
4613 bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
4614 uint16_t *flags)
4615 {
4616 struct ip *ip;
4617 struct tcphdr *tcp;
4618 struct mbuf *n;
4619 uint16_t hlen;
4620 uint32_t poff;
4621
4622 if (M_WRITABLE(m) == 0) {
4623 /* Get a writable copy. */
4624 n = m_dup(m, M_DONTWAIT);
4625 m_freem(m);
4626 if (n == NULL)
4627 return (NULL);
4628 m = n;
4629 }
4630 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
4631 if (m == NULL)
4632 return (NULL);
4633 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4634 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
4635 m = m_pullup(m, poff + sizeof(struct tcphdr));
4636 if (m == NULL)
4637 return (NULL);
4638 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4639 m = m_pullup(m, poff + (tcp->th_off << 2));
4640 if (m == NULL)
4641 return (NULL);
4642 /*
4643 * It seems controller doesn't modify IP length and TCP pseudo
4644 * checksum. These checksum computed by upper stack should be 0.
4645 */
4646 *mss = m->m_pkthdr.tso_segsz;
4647 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
4648 ip->ip_sum = 0;
4649 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
4650 /* Clear pseudo checksum computed by TCP stack. */
4651 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
4652 tcp->th_sum = 0;
4653 /*
4654 * Broadcom controllers uses different descriptor format for
4655 * TSO depending on ASIC revision. Due to TSO-capable firmware
4656 * license issue and lower performance of firmware based TSO
4657 * we only support hardware based TSO.
4658 */
4659 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
4660 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
4661 if (sc->bge_flags & BGE_FLAG_TSO3) {
4662 /*
4663 * For BCM5717 and newer controllers, hardware based TSO
4664 * uses the 14 lower bits of the bge_mss field to store the
4665 * MSS and the upper 2 bits to store the lowest 2 bits of
4666 * the IP/TCP header length. The upper 6 bits of the header
4667 * length are stored in the bge_flags[14:10,4] field. Jumbo
4668 * frames are supported.
4669 */
4670 *mss |= ((hlen & 0x3) << 14);
4671 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
4672 } else {
4673 /*
4674 * For BCM5755 and newer controllers, hardware based TSO uses
4675 * the lower 11 bits to store the MSS and the upper 5 bits to
4676 * store the IP/TCP header length. Jumbo frames are not
4677 * supported.
4678 */
4679 *mss |= (hlen << 11);
4680 }
4681 return (m);
4682 }
4683
4684 /*
4685 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4686 * pointers to descriptors.
4687 */
4688 static int
4689 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
4690 {
4691 bus_dma_segment_t segs[BGE_NSEG_NEW];
4692 bus_dmamap_t map;
4693 struct bge_tx_bd *d;
4694 struct mbuf *m = *m_head;
4695 uint32_t idx = *txidx;
4696 uint16_t csum_flags, mss, vlan_tag;
4697 int nsegs, i, error;
4698
4699 csum_flags = 0;
4700 mss = 0;
4701 vlan_tag = 0;
4702 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
4703 m->m_next != NULL) {
4704 *m_head = bge_check_short_dma(m);
4705 if (*m_head == NULL)
4706 return (ENOBUFS);
4707 m = *m_head;
4708 }
4709 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
4710 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
4711 if (*m_head == NULL)
4712 return (ENOBUFS);
4713 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
4714 BGE_TXBDFLAG_CPU_POST_DMA;
4715 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
4716 if (m->m_pkthdr.csum_flags & CSUM_IP)
4717 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4718 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
4719 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4720 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4721 (error = bge_cksum_pad(m)) != 0) {
4722 m_freem(m);
4723 *m_head = NULL;
4724 return (error);
4725 }
4726 }
4727 if (m->m_flags & M_LASTFRAG)
4728 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
4729 else if (m->m_flags & M_FRAG)
4730 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
4731 }
4732
4733 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
4734 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
4735 m->m_pkthdr.len > ETHER_MAX_LEN)
4736 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4737 if (sc->bge_forced_collapse > 0 &&
4738 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
4739 /*
4740 * Forcedly collapse mbuf chains to overcome hardware
4741 * limitation which only support a single outstanding
4742 * DMA read operation.
4743 */
4744 if (sc->bge_forced_collapse == 1)
4745 m = m_defrag(m, M_DONTWAIT);
4746 else
4747 m = m_collapse(m, M_DONTWAIT,
4748 sc->bge_forced_collapse);
4749 if (m == NULL)
4750 m = *m_head;
4751 *m_head = m;
4752 }
4753 }
4754
4755 map = sc->bge_cdata.bge_tx_dmamap[idx];
4756 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
4757 &nsegs, BUS_DMA_NOWAIT);
4758 if (error == EFBIG) {
4759 m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
4760 if (m == NULL) {
4761 m_freem(*m_head);
4762 *m_head = NULL;
4763 return (ENOBUFS);
4764 }
4765 *m_head = m;
4766 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
4767 m, segs, &nsegs, BUS_DMA_NOWAIT);
4768 if (error) {
4769 m_freem(m);
4770 *m_head = NULL;
4771 return (error);
4772 }
4773 } else if (error != 0)
4774 return (error);
4775
4776 /* Check if we have enough free send BDs. */
4777 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
4778 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
4779 return (ENOBUFS);
4780 }
4781
4782 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
4783
4784 if (m->m_flags & M_VLANTAG) {
4785 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
4786 vlan_tag = m->m_pkthdr.ether_vtag;
4787 }
4788 for (i = 0; ; i++) {
4789 d = &sc->bge_ldata.bge_tx_ring[idx];
4790 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
4791 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
4792 d->bge_len = segs[i].ds_len;
4793 d->bge_flags = csum_flags;
4794 d->bge_vlan_tag = vlan_tag;
4795 d->bge_mss = mss;
4796 if (i == nsegs - 1)
4797 break;
4798 BGE_INC(idx, BGE_TX_RING_CNT);
4799 }
4800
4801 /* Mark the last segment as end of packet... */
4802 d->bge_flags |= BGE_TXBDFLAG_END;
4803
4804 /*
4805 * Insure that the map for this transmission
4806 * is placed at the array index of the last descriptor
4807 * in this chain.
4808 */
4809 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
4810 sc->bge_cdata.bge_tx_dmamap[idx] = map;
4811 sc->bge_cdata.bge_tx_chain[idx] = m;
4812 sc->bge_txcnt += nsegs;
4813
4814 BGE_INC(idx, BGE_TX_RING_CNT);
4815 *txidx = idx;
4816
4817 return (0);
4818 }
4819
4820 /*
4821 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4822 * to the mbuf data regions directly in the transmit descriptors.
4823 */
4824 static void
4825 bge_start_locked(struct ifnet *ifp)
4826 {
4827 struct bge_softc *sc;
4828 struct mbuf *m_head;
4829 uint32_t prodidx;
4830 int count;
4831
4832 sc = ifp->if_softc;
4833 BGE_LOCK_ASSERT(sc);
4834
4835 if (!sc->bge_link ||
4836 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4837 IFF_DRV_RUNNING)
4838 return;
4839
4840 prodidx = sc->bge_tx_prodidx;
4841
4842 for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
4843 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4844 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4845 break;
4846 }
4847 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
4848 if (m_head == NULL)
4849 break;
4850
4851 /*
4852 * XXX
4853 * The code inside the if() block is never reached since we
4854 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
4855 * requests to checksum TCP/UDP in a fragmented packet.
4856 *
4857 * XXX
4858 * safety overkill. If this is a fragmented packet chain
4859 * with delayed TCP/UDP checksums, then only encapsulate
4860 * it if we have enough descriptors to handle the entire
4861 * chain at once.
4862 * (paranoia -- may not actually be needed)
4863 */
4864 if (m_head->m_flags & M_FIRSTFRAG &&
4865 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4866 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4867 m_head->m_pkthdr.csum_data + 16) {
4868 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4869 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4870 break;
4871 }
4872 }
4873
4874 /*
4875 * Pack the data into the transmit ring. If we
4876 * don't have room, set the OACTIVE flag and wait
4877 * for the NIC to drain the ring.
4878 */
4879 if (bge_encap(sc, &m_head, &prodidx)) {
4880 if (m_head == NULL)
4881 break;
4882 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
4883 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4884 break;
4885 }
4886 ++count;
4887
4888 /*
4889 * If there's a BPF listener, bounce a copy of this frame
4890 * to him.
4891 */
4892 #ifdef ETHER_BPF_MTAP
4893 ETHER_BPF_MTAP(ifp, m_head);
4894 #else
4895 BPF_MTAP(ifp, m_head);
4896 #endif
4897 }
4898
4899 if (count > 0) {
4900 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4901 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
4902 /* Transmit. */
4903 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4904 /* 5700 b2 errata */
4905 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
4906 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4907
4908 sc->bge_tx_prodidx = prodidx;
4909
4910 /*
4911 * Set a timeout in case the chip goes out to lunch.
4912 */
4913 sc->bge_timer = 5;
4914 }
4915 }
4916
4917 /*
4918 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4919 * to the mbuf data regions directly in the transmit descriptors.
4920 */
4921 static void
4922 bge_start(struct ifnet *ifp)
4923 {
4924 struct bge_softc *sc;
4925
4926 sc = ifp->if_softc;
4927 BGE_LOCK(sc);
4928 bge_start_locked(ifp);
4929 BGE_UNLOCK(sc);
4930 }
4931
4932 static void
4933 bge_init_locked(struct bge_softc *sc)
4934 {
4935 struct ifnet *ifp;
4936 uint16_t *m;
4937 uint32_t mode;
4938
4939 BGE_LOCK_ASSERT(sc);
4940
4941 ifp = sc->bge_ifp;
4942
4943 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4944 return;
4945
4946 /* Cancel pending I/O and flush buffers. */
4947 bge_stop(sc);
4948
4949 bge_stop_fw(sc);
4950 bge_sig_pre_reset(sc, BGE_RESET_START);
4951 bge_reset(sc);
4952 bge_sig_legacy(sc, BGE_RESET_START);
4953 bge_sig_post_reset(sc, BGE_RESET_START);
4954
4955 bge_chipinit(sc);
4956
4957 /*
4958 * Init the various state machines, ring
4959 * control blocks and firmware.
4960 */
4961 if (bge_blockinit(sc)) {
4962 device_printf(sc->bge_dev, "initialization failure\n");
4963 return;
4964 }
4965
4966 ifp = sc->bge_ifp;
4967
4968 /* Specify MTU. */
4969 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4970 ETHER_HDR_LEN + ETHER_CRC_LEN +
4971 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
4972
4973 /* Load our MAC address. */
4974 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
4975 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4976 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4977
4978 /* Program promiscuous mode. */
4979 bge_setpromisc(sc);
4980
4981 /* Program multicast filter. */
4982 bge_setmulti(sc);
4983
4984 /* Program VLAN tag stripping. */
4985 bge_setvlan(sc);
4986
4987 /* Override UDP checksum offloading. */
4988 if (sc->bge_forced_udpcsum == 0)
4989 sc->bge_csum_features &= ~CSUM_UDP;
4990 else
4991 sc->bge_csum_features |= CSUM_UDP;
4992 if (ifp->if_capabilities & IFCAP_TXCSUM &&
4993 ifp->if_capenable & IFCAP_TXCSUM) {
4994 ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
4995 ifp->if_hwassist |= sc->bge_csum_features;
4996 }
4997
4998 /* Init RX ring. */
4999 if (bge_init_rx_ring_std(sc) != 0) {
5000 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
5001 bge_stop(sc);
5002 return;
5003 }
5004
5005 /*
5006 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
5007 * memory to insure that the chip has in fact read the first
5008 * entry of the ring.
5009 */
5010 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
5011 uint32_t v, i;
5012 for (i = 0; i < 10; i++) {
5013 DELAY(20);
5014 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
5015 if (v == (MCLBYTES - ETHER_ALIGN))
5016 break;
5017 }
5018 if (i == 10)
5019 device_printf (sc->bge_dev,
5020 "5705 A0 chip failed to load RX ring\n");
5021 }
5022
5023 /* Init jumbo RX ring. */
5024 if (BGE_IS_JUMBO_CAPABLE(sc) &&
5025 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
5026 (MCLBYTES - ETHER_ALIGN)) {
5027 if (bge_init_rx_ring_jumbo(sc) != 0) {
5028 device_printf(sc->bge_dev,
5029 "no memory for jumbo Rx buffers.\n");
5030 bge_stop(sc);
5031 return;
5032 }
5033 }
5034
5035 /* Init our RX return ring index. */
5036 sc->bge_rx_saved_considx = 0;
5037
5038 /* Init our RX/TX stat counters. */
5039 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
5040
5041 /* Init TX ring. */
5042 bge_init_tx_ring(sc);
5043
5044 /* Enable TX MAC state machine lockup fix. */
5045 mode = CSR_READ_4(sc, BGE_TX_MODE);
5046 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
5047 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
5048 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
5049 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5050 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
5051 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5052 }
5053 /* Turn on transmitter. */
5054 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
5055
5056 /* Turn on receiver. */
5057 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5058
5059 /*
5060 * Set the number of good frames to receive after RX MBUF
5061 * Low Watermark has been reached. After the RX MAC receives
5062 * this number of frames, it will drop subsequent incoming
5063 * frames until the MBUF High Watermark is reached.
5064 */
5065 if (sc->bge_asicrev == BGE_ASICREV_BCM57765)
5066 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
5067 else
5068 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
5069
5070 /* Clear MAC statistics. */
5071 if (BGE_IS_5705_PLUS(sc))
5072 bge_stats_clear_regs(sc);
5073
5074 /* Tell firmware we're alive. */
5075 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5076
5077 #ifdef DEVICE_POLLING
5078 /* Disable interrupts if we are polling. */
5079 if (ifp->if_capenable & IFCAP_POLLING) {
5080 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5081 BGE_PCIMISCCTL_MASK_PCI_INTR);
5082 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5083 } else
5084 #endif
5085
5086 /* Enable host interrupts. */
5087 {
5088 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
5089 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5090 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5091 }
5092
5093 bge_ifmedia_upd_locked(ifp);
5094
5095 ifp->if_drv_flags |= IFF_DRV_RUNNING;
5096 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5097
5098 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
5099 }
5100
5101 static void
5102 bge_init(void *xsc)
5103 {
5104 struct bge_softc *sc = xsc;
5105
5106 BGE_LOCK(sc);
5107 bge_init_locked(sc);
5108 BGE_UNLOCK(sc);
5109 }
5110
5111 /*
5112 * Set media options.
5113 */
5114 static int
5115 bge_ifmedia_upd(struct ifnet *ifp)
5116 {
5117 struct bge_softc *sc = ifp->if_softc;
5118 int res;
5119
5120 BGE_LOCK(sc);
5121 res = bge_ifmedia_upd_locked(ifp);
5122 BGE_UNLOCK(sc);
5123
5124 return (res);
5125 }
5126
5127 static int
5128 bge_ifmedia_upd_locked(struct ifnet *ifp)
5129 {
5130 struct bge_softc *sc = ifp->if_softc;
5131 struct mii_data *mii;
5132 struct mii_softc *miisc;
5133 struct ifmedia *ifm;
5134
5135 BGE_LOCK_ASSERT(sc);
5136
5137 ifm = &sc->bge_ifmedia;
5138
5139 /* If this is a 1000baseX NIC, enable the TBI port. */
5140 if (sc->bge_flags & BGE_FLAG_TBI) {
5141 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5142 return (EINVAL);
5143 switch(IFM_SUBTYPE(ifm->ifm_media)) {
5144 case IFM_AUTO:
5145 /*
5146 * The BCM5704 ASIC appears to have a special
5147 * mechanism for programming the autoneg
5148 * advertisement registers in TBI mode.
5149 */
5150 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
5151 uint32_t sgdig;
5152 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
5153 if (sgdig & BGE_SGDIGSTS_DONE) {
5154 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
5155 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
5156 sgdig |= BGE_SGDIGCFG_AUTO |
5157 BGE_SGDIGCFG_PAUSE_CAP |
5158 BGE_SGDIGCFG_ASYM_PAUSE;
5159 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
5160 sgdig | BGE_SGDIGCFG_SEND);
5161 DELAY(5);
5162 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
5163 }
5164 }
5165 break;
5166 case IFM_1000_SX:
5167 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
5168 BGE_CLRBIT(sc, BGE_MAC_MODE,
5169 BGE_MACMODE_HALF_DUPLEX);
5170 } else {
5171 BGE_SETBIT(sc, BGE_MAC_MODE,
5172 BGE_MACMODE_HALF_DUPLEX);
5173 }
5174 break;
5175 default:
5176 return (EINVAL);
5177 }
5178 return (0);
5179 }
5180
5181 sc->bge_link_evt++;
5182 mii = device_get_softc(sc->bge_miibus);
5183 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5184 mii_phy_reset(miisc);
5185 mii_mediachg(mii);
5186
5187 /*
5188 * Force an interrupt so that we will call bge_link_upd
5189 * if needed and clear any pending link state attention.
5190 * Without this we are not getting any further interrupts
5191 * for link state changes and thus will not UP the link and
5192 * not be able to send in bge_start_locked. The only
5193 * way to get things working was to receive a packet and
5194 * get an RX intr.
5195 * bge_tick should help for fiber cards and we might not
5196 * need to do this here if BGE_FLAG_TBI is set but as
5197 * we poll for fiber anyway it should not harm.
5198 */
5199 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
5200 sc->bge_flags & BGE_FLAG_5788)
5201 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5202 else
5203 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5204
5205 return (0);
5206 }
5207
5208 /*
5209 * Report current media status.
5210 */
5211 static void
5212 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
5213 {
5214 struct bge_softc *sc = ifp->if_softc;
5215 struct mii_data *mii;
5216
5217 BGE_LOCK(sc);
5218
5219 if (sc->bge_flags & BGE_FLAG_TBI) {
5220 ifmr->ifm_status = IFM_AVALID;
5221 ifmr->ifm_active = IFM_ETHER;
5222 if (CSR_READ_4(sc, BGE_MAC_STS) &
5223 BGE_MACSTAT_TBI_PCS_SYNCHED)
5224 ifmr->ifm_status |= IFM_ACTIVE;
5225 else {
5226 ifmr->ifm_active |= IFM_NONE;
5227 BGE_UNLOCK(sc);
5228 return;
5229 }
5230 ifmr->ifm_active |= IFM_1000_SX;
5231 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5232 ifmr->ifm_active |= IFM_HDX;
5233 else
5234 ifmr->ifm_active |= IFM_FDX;
5235 BGE_UNLOCK(sc);
5236 return;
5237 }
5238
5239 mii = device_get_softc(sc->bge_miibus);
5240 mii_pollstat(mii);
5241 ifmr->ifm_active = mii->mii_media_active;
5242 ifmr->ifm_status = mii->mii_media_status;
5243
5244 BGE_UNLOCK(sc);
5245 }
5246
5247 static int
5248 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5249 {
5250 struct bge_softc *sc = ifp->if_softc;
5251 struct ifreq *ifr = (struct ifreq *) data;
5252 struct mii_data *mii;
5253 int flags, mask, error = 0;
5254
5255 switch (command) {
5256 case SIOCSIFMTU:
5257 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5258 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5259 if (ifr->ifr_mtu < ETHERMIN ||
5260 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5261 error = EINVAL;
5262 break;
5263 }
5264 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5265 error = EINVAL;
5266 break;
5267 }
5268 BGE_LOCK(sc);
5269 if (ifp->if_mtu != ifr->ifr_mtu) {
5270 ifp->if_mtu = ifr->ifr_mtu;
5271 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5272 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5273 bge_init_locked(sc);
5274 }
5275 }
5276 BGE_UNLOCK(sc);
5277 break;
5278 case SIOCSIFFLAGS:
5279 BGE_LOCK(sc);
5280 if (ifp->if_flags & IFF_UP) {
5281 /*
5282 * If only the state of the PROMISC flag changed,
5283 * then just use the 'set promisc mode' command
5284 * instead of reinitializing the entire NIC. Doing
5285 * a full re-init means reloading the firmware and
5286 * waiting for it to start up, which may take a
5287 * second or two. Similarly for ALLMULTI.
5288 */
5289 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5290 flags = ifp->if_flags ^ sc->bge_if_flags;
5291 if (flags & IFF_PROMISC)
5292 bge_setpromisc(sc);
5293 if (flags & IFF_ALLMULTI)
5294 bge_setmulti(sc);
5295 } else
5296 bge_init_locked(sc);
5297 } else {
5298 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5299 bge_stop(sc);
5300 }
5301 }
5302 sc->bge_if_flags = ifp->if_flags;
5303 BGE_UNLOCK(sc);
5304 error = 0;
5305 break;
5306 case SIOCADDMULTI:
5307 case SIOCDELMULTI:
5308 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5309 BGE_LOCK(sc);
5310 bge_setmulti(sc);
5311 BGE_UNLOCK(sc);
5312 error = 0;
5313 }
5314 break;
5315 case SIOCSIFMEDIA:
5316 case SIOCGIFMEDIA:
5317 if (sc->bge_flags & BGE_FLAG_TBI) {
5318 error = ifmedia_ioctl(ifp, ifr,
5319 &sc->bge_ifmedia, command);
5320 } else {
5321 mii = device_get_softc(sc->bge_miibus);
5322 error = ifmedia_ioctl(ifp, ifr,
5323 &mii->mii_media, command);
5324 }
5325 break;
5326 case SIOCSIFCAP:
5327 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5328 #ifdef DEVICE_POLLING
5329 if (mask & IFCAP_POLLING) {
5330 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5331 error = ether_poll_register(bge_poll, ifp);
5332 if (error)
5333 return (error);
5334 BGE_LOCK(sc);
5335 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5336 BGE_PCIMISCCTL_MASK_PCI_INTR);
5337 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5338 ifp->if_capenable |= IFCAP_POLLING;
5339 BGE_UNLOCK(sc);
5340 } else {
5341 error = ether_poll_deregister(ifp);
5342 /* Enable interrupt even in error case */
5343 BGE_LOCK(sc);
5344 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5345 BGE_PCIMISCCTL_MASK_PCI_INTR);
5346 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5347 ifp->if_capenable &= ~IFCAP_POLLING;
5348 BGE_UNLOCK(sc);
5349 }
5350 }
5351 #endif
5352 if ((mask & IFCAP_TXCSUM) != 0 &&
5353 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
5354 ifp->if_capenable ^= IFCAP_TXCSUM;
5355 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
5356 ifp->if_hwassist |= sc->bge_csum_features;
5357 else
5358 ifp->if_hwassist &= ~sc->bge_csum_features;
5359 }
5360
5361 if ((mask & IFCAP_RXCSUM) != 0 &&
5362 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
5363 ifp->if_capenable ^= IFCAP_RXCSUM;
5364
5365 if ((mask & IFCAP_TSO4) != 0 &&
5366 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
5367 ifp->if_capenable ^= IFCAP_TSO4;
5368 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
5369 ifp->if_hwassist |= CSUM_TSO;
5370 else
5371 ifp->if_hwassist &= ~CSUM_TSO;
5372 }
5373
5374 if (mask & IFCAP_VLAN_MTU) {
5375 ifp->if_capenable ^= IFCAP_VLAN_MTU;
5376 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5377 bge_init(sc);
5378 }
5379
5380 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5381 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
5382 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5383 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5384 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
5385 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5386 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
5387 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
5388 BGE_LOCK(sc);
5389 bge_setvlan(sc);
5390 BGE_UNLOCK(sc);
5391 }
5392 #ifdef VLAN_CAPABILITIES
5393 VLAN_CAPABILITIES(ifp);
5394 #endif
5395 break;
5396 default:
5397 error = ether_ioctl(ifp, command, data);
5398 break;
5399 }
5400
5401 return (error);
5402 }
5403
5404 static void
5405 bge_watchdog(struct bge_softc *sc)
5406 {
5407 struct ifnet *ifp;
5408
5409 BGE_LOCK_ASSERT(sc);
5410
5411 if (sc->bge_timer == 0 || --sc->bge_timer)
5412 return;
5413
5414 ifp = sc->bge_ifp;
5415
5416 if_printf(ifp, "watchdog timeout -- resetting\n");
5417
5418 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5419 bge_init_locked(sc);
5420
5421 ifp->if_oerrors++;
5422 }
5423
5424 static void
5425 bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5426 {
5427 int i;
5428
5429 BGE_CLRBIT(sc, reg, bit);
5430
5431 for (i = 0; i < BGE_TIMEOUT; i++) {
5432 if ((CSR_READ_4(sc, reg) & bit) == 0)
5433 return;
5434 DELAY(100);
5435 }
5436 }
5437
5438 /*
5439 * Stop the adapter and free any mbufs allocated to the
5440 * RX and TX lists.
5441 */
5442 static void
5443 bge_stop(struct bge_softc *sc)
5444 {
5445 struct ifnet *ifp;
5446
5447 BGE_LOCK_ASSERT(sc);
5448
5449 ifp = sc->bge_ifp;
5450
5451 callout_stop(&sc->bge_stat_ch);
5452
5453 /* Disable host interrupts. */
5454 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5455 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5456
5457 /*
5458 * Tell firmware we're shutting down.
5459 */
5460 bge_stop_fw(sc);
5461 bge_sig_pre_reset(sc, BGE_RESET_STOP);
5462
5463 /*
5464 * Disable all of the receiver blocks.
5465 */
5466 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5467 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5468 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5469 if (BGE_IS_5700_FAMILY(sc))
5470 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5471 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5472 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5473 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5474
5475 /*
5476 * Disable all of the transmit blocks.
5477 */
5478 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5479 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5480 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5481 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5482 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5483 if (BGE_IS_5700_FAMILY(sc))
5484 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5485 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5486
5487 /*
5488 * Shut down all of the memory managers and related
5489 * state machines.
5490 */
5491 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
5492 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
5493 if (BGE_IS_5700_FAMILY(sc))
5494 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
5495
5496 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
5497 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
5498 if (!(BGE_IS_5705_PLUS(sc))) {
5499 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
5500 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
5501 }
5502 /* Update MAC statistics. */
5503 if (BGE_IS_5705_PLUS(sc))
5504 bge_stats_update_regs(sc);
5505
5506 bge_reset(sc);
5507 bge_sig_legacy(sc, BGE_RESET_STOP);
5508 bge_sig_post_reset(sc, BGE_RESET_STOP);
5509
5510 /*
5511 * Keep the ASF firmware running if up.
5512 */
5513 if (sc->bge_asf_mode & ASF_STACKUP)
5514 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5515 else
5516 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5517
5518 /* Free the RX lists. */
5519 bge_free_rx_ring_std(sc);
5520
5521 /* Free jumbo RX list. */
5522 if (BGE_IS_JUMBO_CAPABLE(sc))
5523 bge_free_rx_ring_jumbo(sc);
5524
5525 /* Free TX buffers. */
5526 bge_free_tx_ring(sc);
5527
5528 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
5529
5530 /* Clear MAC's link state (PHY may still have link UP). */
5531 if (bootverbose && sc->bge_link)
5532 if_printf(sc->bge_ifp, "link DOWN\n");
5533 sc->bge_link = 0;
5534
5535 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
5536 }
5537
5538 /*
5539 * Stop all chip I/O so that the kernel's probe routines don't
5540 * get confused by errant DMAs when rebooting.
5541 */
5542 static int
5543 bge_shutdown(device_t dev)
5544 {
5545 struct bge_softc *sc;
5546
5547 sc = device_get_softc(dev);
5548 BGE_LOCK(sc);
5549 bge_stop(sc);
5550 bge_reset(sc);
5551 BGE_UNLOCK(sc);
5552
5553 return (0);
5554 }
5555
5556 static int
5557 bge_suspend(device_t dev)
5558 {
5559 struct bge_softc *sc;
5560
5561 sc = device_get_softc(dev);
5562 BGE_LOCK(sc);
5563 bge_stop(sc);
5564 BGE_UNLOCK(sc);
5565
5566 return (0);
5567 }
5568
5569 static int
5570 bge_resume(device_t dev)
5571 {
5572 struct bge_softc *sc;
5573 struct ifnet *ifp;
5574
5575 sc = device_get_softc(dev);
5576 BGE_LOCK(sc);
5577 ifp = sc->bge_ifp;
5578 if (ifp->if_flags & IFF_UP) {
5579 bge_init_locked(sc);
5580 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5581 bge_start_locked(ifp);
5582 }
5583 BGE_UNLOCK(sc);
5584
5585 return (0);
5586 }
5587
5588 static void
5589 bge_link_upd(struct bge_softc *sc)
5590 {
5591 struct mii_data *mii;
5592 uint32_t link, status;
5593
5594 BGE_LOCK_ASSERT(sc);
5595
5596 /* Clear 'pending link event' flag. */
5597 sc->bge_link_evt = 0;
5598
5599 /*
5600 * Process link state changes.
5601 * Grrr. The link status word in the status block does
5602 * not work correctly on the BCM5700 rev AX and BX chips,
5603 * according to all available information. Hence, we have
5604 * to enable MII interrupts in order to properly obtain
5605 * async link changes. Unfortunately, this also means that
5606 * we have to read the MAC status register to detect link
5607 * changes, thereby adding an additional register access to
5608 * the interrupt handler.
5609 *
5610 * XXX: perhaps link state detection procedure used for
5611 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
5612 */
5613
5614 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
5615 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
5616 status = CSR_READ_4(sc, BGE_MAC_STS);
5617 if (status & BGE_MACSTAT_MI_INTERRUPT) {
5618 mii = device_get_softc(sc->bge_miibus);
5619 mii_pollstat(mii);
5620 if (!sc->bge_link &&
5621 mii->mii_media_status & IFM_ACTIVE &&
5622 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5623 sc->bge_link++;
5624 if (bootverbose)
5625 if_printf(sc->bge_ifp, "link UP\n");
5626 } else if (sc->bge_link &&
5627 (!(mii->mii_media_status & IFM_ACTIVE) ||
5628 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5629 sc->bge_link = 0;
5630 if (bootverbose)
5631 if_printf(sc->bge_ifp, "link DOWN\n");
5632 }
5633
5634 /* Clear the interrupt. */
5635 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
5636 BGE_EVTENB_MI_INTERRUPT);
5637 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
5638 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
5639 BRGPHY_INTRS);
5640 }
5641 return;
5642 }
5643
5644 if (sc->bge_flags & BGE_FLAG_TBI) {
5645 status = CSR_READ_4(sc, BGE_MAC_STS);
5646 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
5647 if (!sc->bge_link) {
5648 sc->bge_link++;
5649 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
5650 BGE_CLRBIT(sc, BGE_MAC_MODE,
5651 BGE_MACMODE_TBI_SEND_CFGS);
5652 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
5653 if (bootverbose)
5654 if_printf(sc->bge_ifp, "link UP\n");
5655 if_link_state_change(sc->bge_ifp,
5656 LINK_STATE_UP);
5657 }
5658 } else if (sc->bge_link) {
5659 sc->bge_link = 0;
5660 if (bootverbose)
5661 if_printf(sc->bge_ifp, "link DOWN\n");
5662 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
5663 }
5664 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
5665 /*
5666 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
5667 * in status word always set. Workaround this bug by reading
5668 * PHY link status directly.
5669 */
5670 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
5671
5672 if (link != sc->bge_link ||
5673 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
5674 mii = device_get_softc(sc->bge_miibus);
5675 mii_pollstat(mii);
5676 if (!sc->bge_link &&
5677 mii->mii_media_status & IFM_ACTIVE &&
5678 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5679 sc->bge_link++;
5680 if (bootverbose)
5681 if_printf(sc->bge_ifp, "link UP\n");
5682 } else if (sc->bge_link &&
5683 (!(mii->mii_media_status & IFM_ACTIVE) ||
5684 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
5685 sc->bge_link = 0;
5686 if (bootverbose)
5687 if_printf(sc->bge_ifp, "link DOWN\n");
5688 }
5689 }
5690 } else {
5691 /*
5692 * For controllers that call mii_tick, we have to poll
5693 * link status.
5694 */
5695 mii = device_get_softc(sc->bge_miibus);
5696 mii_pollstat(mii);
5697 bge_miibus_statchg(sc->bge_dev);
5698 }
5699
5700 /* Clear the attention. */
5701 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
5702 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
5703 BGE_MACSTAT_LINK_CHANGED);
5704 }
5705
5706 static void
5707 bge_add_sysctls(struct bge_softc *sc)
5708 {
5709 struct sysctl_ctx_list *ctx;
5710 struct sysctl_oid_list *children;
5711 char tn[32];
5712 int unit;
5713
5714 ctx = device_get_sysctl_ctx(sc->bge_dev);
5715 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
5716
5717 #ifdef BGE_REGISTER_DEBUG
5718 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
5719 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
5720 "Debug Information");
5721
5722 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
5723 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
5724 "Register Read");
5725
5726 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
5727 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
5728 "Memory Read");
5729
5730 #endif
5731
5732 unit = device_get_unit(sc->bge_dev);
5733 /*
5734 * A common design characteristic for many Broadcom client controllers
5735 * is that they only support a single outstanding DMA read operation
5736 * on the PCIe bus. This means that it will take twice as long to fetch
5737 * a TX frame that is split into header and payload buffers as it does
5738 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
5739 * these controllers, coalescing buffers to reduce the number of memory
5740 * reads is effective way to get maximum performance(about 940Mbps).
5741 * Without collapsing TX buffers the maximum TCP bulk transfer
5742 * performance is about 850Mbps. However forcing coalescing mbufs
5743 * consumes a lot of CPU cycles, so leave it off by default.
5744 */
5745 sc->bge_forced_collapse = 0;
5746 snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit);
5747 TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse);
5748 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
5749 CTLFLAG_RW, &sc->bge_forced_collapse, 0,
5750 "Number of fragmented TX buffers of a frame allowed before "
5751 "forced collapsing");
5752
5753 sc->bge_msi = 1;
5754 snprintf(tn, sizeof(tn), "dev.bge.%d.msi", unit);
5755 TUNABLE_INT_FETCH(tn, &sc->bge_msi);
5756 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi",
5757 CTLFLAG_RD, &sc->bge_msi, 0, "Enable MSI");
5758
5759 /*
5760 * It seems all Broadcom controllers have a bug that can generate UDP
5761 * datagrams with checksum value 0 when TX UDP checksum offloading is
5762 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
5763 * Even though the probability of generating such UDP datagrams is
5764 * low, I don't want to see FreeBSD boxes to inject such datagrams
5765 * into network so disable UDP checksum offloading by default. Users
5766 * still override this behavior by setting a sysctl variable,
5767 * dev.bge.0.forced_udpcsum.
5768 */
5769 sc->bge_forced_udpcsum = 0;
5770 snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit);
5771 TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum);
5772 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
5773 CTLFLAG_RW, &sc->bge_forced_udpcsum, 0,
5774 "Enable UDP checksum offloading even if controller can "
5775 "generate UDP checksum value 0");
5776
5777 if (BGE_IS_5705_PLUS(sc))
5778 bge_add_sysctl_stats_regs(sc, ctx, children);
5779 else
5780 bge_add_sysctl_stats(sc, ctx, children);
5781 }
5782
5783 #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
5784 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \
5785 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \
5786 desc)
5787
5788 static void
5789 bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5790 struct sysctl_oid_list *parent)
5791 {
5792 struct sysctl_oid *tree;
5793 struct sysctl_oid_list *children, *schildren;
5794
5795 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5796 NULL, "BGE Statistics");
5797 schildren = children = SYSCTL_CHILDREN(tree);
5798 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
5799 children, COSFramesDroppedDueToFilters,
5800 "FramesDroppedDueToFilters");
5801 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
5802 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
5803 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
5804 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
5805 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
5806 children, nicNoMoreRxBDs, "NoMoreRxBDs");
5807 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
5808 children, ifInDiscards, "InputDiscards");
5809 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
5810 children, ifInErrors, "InputErrors");
5811 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
5812 children, nicRecvThresholdHit, "RecvThresholdHit");
5813 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
5814 children, nicDmaReadQueueFull, "DmaReadQueueFull");
5815 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
5816 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
5817 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
5818 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
5819 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
5820 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
5821 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
5822 children, nicRingStatusUpdate, "RingStatusUpdate");
5823 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
5824 children, nicInterrupts, "Interrupts");
5825 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
5826 children, nicAvoidedInterrupts, "AvoidedInterrupts");
5827 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
5828 children, nicSendThresholdHit, "SendThresholdHit");
5829
5830 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD,
5831 NULL, "BGE RX Statistics");
5832 children = SYSCTL_CHILDREN(tree);
5833 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
5834 children, rxstats.ifHCInOctets, "ifHCInOctets");
5835 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
5836 children, rxstats.etherStatsFragments, "Fragments");
5837 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
5838 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
5839 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
5840 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
5841 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
5842 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
5843 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
5844 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
5845 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
5846 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
5847 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
5848 children, rxstats.xoffPauseFramesReceived,
5849 "xoffPauseFramesReceived");
5850 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
5851 children, rxstats.macControlFramesReceived,
5852 "ControlFramesReceived");
5853 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
5854 children, rxstats.xoffStateEntered, "xoffStateEntered");
5855 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
5856 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
5857 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
5858 children, rxstats.etherStatsJabbers, "Jabbers");
5859 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
5860 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
5861 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
5862 children, rxstats.inRangeLengthError, "inRangeLengthError");
5863 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
5864 children, rxstats.outRangeLengthError, "outRangeLengthError");
5865
5866 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD,
5867 NULL, "BGE TX Statistics");
5868 children = SYSCTL_CHILDREN(tree);
5869 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
5870 children, txstats.ifHCOutOctets, "ifHCOutOctets");
5871 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
5872 children, txstats.etherStatsCollisions, "Collisions");
5873 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
5874 children, txstats.outXonSent, "XonSent");
5875 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
5876 children, txstats.outXoffSent, "XoffSent");
5877 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
5878 children, txstats.flowControlDone, "flowControlDone");
5879 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
5880 children, txstats.dot3StatsInternalMacTransmitErrors,
5881 "InternalMacTransmitErrors");
5882 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
5883 children, txstats.dot3StatsSingleCollisionFrames,
5884 "SingleCollisionFrames");
5885 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
5886 children, txstats.dot3StatsMultipleCollisionFrames,
5887 "MultipleCollisionFrames");
5888 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
5889 children, txstats.dot3StatsDeferredTransmissions,
5890 "DeferredTransmissions");
5891 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
5892 children, txstats.dot3StatsExcessiveCollisions,
5893 "ExcessiveCollisions");
5894 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
5895 children, txstats.dot3StatsLateCollisions,
5896 "LateCollisions");
5897 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
5898 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
5899 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
5900 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
5901 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
5902 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
5903 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
5904 children, txstats.dot3StatsCarrierSenseErrors,
5905 "CarrierSenseErrors");
5906 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
5907 children, txstats.ifOutDiscards, "Discards");
5908 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
5909 children, txstats.ifOutErrors, "Errors");
5910 }
5911
5912 #undef BGE_SYSCTL_STAT
5913
5914 #define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
5915 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
5916
5917 static void
5918 bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
5919 struct sysctl_oid_list *parent)
5920 {
5921 struct sysctl_oid *tree;
5922 struct sysctl_oid_list *child, *schild;
5923 struct bge_mac_stats *stats;
5924
5925 stats = &sc->bge_mac_stats;
5926 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD,
5927 NULL, "BGE Statistics");
5928 schild = child = SYSCTL_CHILDREN(tree);
5929 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
5930 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
5931 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
5932 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
5933 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
5934 &stats->DmaWriteHighPriQueueFull,
5935 "NIC DMA Write High Priority Queue Full");
5936 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
5937 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
5938 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
5939 &stats->InputDiscards, "Discarded Input Frames");
5940 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
5941 &stats->InputErrors, "Input Errors");
5942 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
5943 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
5944
5945 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
5946 NULL, "BGE RX Statistics");
5947 child = SYSCTL_CHILDREN(tree);
5948 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
5949 &stats->ifHCInOctets, "Inbound Octets");
5950 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
5951 &stats->etherStatsFragments, "Fragments");
5952 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
5953 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
5954 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
5955 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
5956 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
5957 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
5958 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
5959 &stats->dot3StatsFCSErrors, "FCS Errors");
5960 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
5961 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
5962 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
5963 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
5964 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
5965 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
5966 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
5967 &stats->macControlFramesReceived, "MAC Control Frames Received");
5968 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
5969 &stats->xoffStateEntered, "XOFF State Entered");
5970 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
5971 &stats->dot3StatsFramesTooLong, "Frames Too Long");
5972 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
5973 &stats->etherStatsJabbers, "Jabbers");
5974 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
5975 &stats->etherStatsUndersizePkts, "Undersized Packets");
5976
5977 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
5978 NULL, "BGE TX Statistics");
5979 child = SYSCTL_CHILDREN(tree);
5980 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
5981 &stats->ifHCOutOctets, "Outbound Octets");
5982 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
5983 &stats->etherStatsCollisions, "TX Collisions");
5984 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
5985 &stats->outXonSent, "XON Sent");
5986 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
5987 &stats->outXoffSent, "XOFF Sent");
5988 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
5989 &stats->dot3StatsInternalMacTransmitErrors,
5990 "Internal MAC TX Errors");
5991 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
5992 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
5993 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
5994 &stats->dot3StatsMultipleCollisionFrames,
5995 "Multiple Collision Frames");
5996 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
5997 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
5998 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
5999 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
6000 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
6001 &stats->dot3StatsLateCollisions, "Late Collisions");
6002 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
6003 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
6004 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
6005 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
6006 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
6007 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
6008 }
6009
6010 #undef BGE_SYSCTL_STAT_ADD64
6011
6012 static int
6013 bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
6014 {
6015 struct bge_softc *sc;
6016 uint32_t result;
6017 int offset;
6018
6019 sc = (struct bge_softc *)arg1;
6020 offset = arg2;
6021 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
6022 offsetof(bge_hostaddr, bge_addr_lo));
6023 return (sysctl_handle_int(oidp, &result, 0, req));
6024 }
6025
6026 #ifdef BGE_REGISTER_DEBUG
6027 static int
6028 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
6029 {
6030 struct bge_softc *sc;
6031 uint16_t *sbdata;
6032 int error, result, sbsz;
6033 int i, j;
6034
6035 result = -1;
6036 error = sysctl_handle_int(oidp, &result, 0, req);
6037 if (error || (req->newptr == NULL))
6038 return (error);
6039
6040 if (result == 1) {
6041 sc = (struct bge_softc *)arg1;
6042
6043 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
6044 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
6045 sbsz = BGE_STATUS_BLK_SZ;
6046 else
6047 sbsz = 32;
6048 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
6049 printf("Status Block:\n");
6050 BGE_LOCK(sc);
6051 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
6052 sc->bge_cdata.bge_status_map,
6053 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6054 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
6055 printf("%06x:", i);
6056 for (j = 0; j < 8; j++)
6057 printf(" %04x", sbdata[i++]);
6058 printf("\n");
6059 }
6060
6061 printf("Registers:\n");
6062 for (i = 0x800; i < 0xA00; ) {
6063 printf("%06x:", i);
6064 for (j = 0; j < 8; j++) {
6065 printf(" %08x", CSR_READ_4(sc, i));
6066 i += 4;
6067 }
6068 printf("\n");
6069 }
6070 BGE_UNLOCK(sc);
6071
6072 printf("Hardware Flags:\n");
6073 if (BGE_IS_5717_PLUS(sc))
6074 printf(" - 5717 Plus\n");
6075 if (BGE_IS_5755_PLUS(sc))
6076 printf(" - 5755 Plus\n");
6077 if (BGE_IS_575X_PLUS(sc))
6078 printf(" - 575X Plus\n");
6079 if (BGE_IS_5705_PLUS(sc))
6080 printf(" - 5705 Plus\n");
6081 if (BGE_IS_5714_FAMILY(sc))
6082 printf(" - 5714 Family\n");
6083 if (BGE_IS_5700_FAMILY(sc))
6084 printf(" - 5700 Family\n");
6085 if (sc->bge_flags & BGE_FLAG_JUMBO)
6086 printf(" - Supports Jumbo Frames\n");
6087 if (sc->bge_flags & BGE_FLAG_PCIX)
6088 printf(" - PCI-X Bus\n");
6089 if (sc->bge_flags & BGE_FLAG_PCIE)
6090 printf(" - PCI Express Bus\n");
6091 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
6092 printf(" - No 3 LEDs\n");
6093 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
6094 printf(" - RX Alignment Bug\n");
6095 }
6096
6097 return (error);
6098 }
6099
6100 static int
6101 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
6102 {
6103 struct bge_softc *sc;
6104 int error;
6105 uint16_t result;
6106 uint32_t val;
6107
6108 result = -1;
6109 error = sysctl_handle_int(oidp, &result, 0, req);
6110 if (error || (req->newptr == NULL))
6111 return (error);
6112
6113 if (result < 0x8000) {
6114 sc = (struct bge_softc *)arg1;
6115 val = CSR_READ_4(sc, result);
6116 printf("reg 0x%06X = 0x%08X\n", result, val);
6117 }
6118
6119 return (error);
6120 }
6121
6122 static int
6123 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
6124 {
6125 struct bge_softc *sc;
6126 int error;
6127 uint16_t result;
6128 uint32_t val;
6129
6130 result = -1;
6131 error = sysctl_handle_int(oidp, &result, 0, req);
6132 if (error || (req->newptr == NULL))
6133 return (error);
6134
6135 if (result < 0x8000) {
6136 sc = (struct bge_softc *)arg1;
6137 val = bge_readmem_ind(sc, result);
6138 printf("mem 0x%06X = 0x%08X\n", result, val);
6139 }
6140
6141 return (error);
6142 }
6143 #endif
6144
6145 static int
6146 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
6147 {
6148
6149 if (sc->bge_flags & BGE_FLAG_EADDR)
6150 return (1);
6151
6152 #ifdef __sparc64__
6153 OF_getetheraddr(sc->bge_dev, ether_addr);
6154 return (0);
6155 #endif
6156 return (1);
6157 }
6158
6159 static int
6160 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
6161 {
6162 uint32_t mac_addr;
6163
6164 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6165 if ((mac_addr >> 16) == 0x484b) {
6166 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6167 ether_addr[1] = (uint8_t)mac_addr;
6168 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6169 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6170 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6171 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6172 ether_addr[5] = (uint8_t)mac_addr;
6173 return (0);
6174 }
6175 return (1);
6176 }
6177
6178 static int
6179 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6180 {
6181 int mac_offset = BGE_EE_MAC_OFFSET;
6182
6183 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6184 mac_offset = BGE_EE_MAC_OFFSET_5906;
6185
6186 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6187 ETHER_ADDR_LEN));
6188 }
6189
6190 static int
6191 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6192 {
6193
6194 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6195 return (1);
6196
6197 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6198 ETHER_ADDR_LEN));
6199 }
6200
6201 static int
6202 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6203 {
6204 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6205 /* NOTE: Order is critical */
6206 bge_get_eaddr_fw,
6207 bge_get_eaddr_mem,
6208 bge_get_eaddr_nvram,
6209 bge_get_eaddr_eeprom,
6210 NULL
6211 };
6212 const bge_eaddr_fcn_t *func;
6213
6214 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6215 if ((*func)(sc, eaddr) == 0)
6216 break;
6217 }
6218 return (*func == NULL ? ENXIO : 0);
6219 }
Cache object: 42bb39e1cb8b2628d4aabbbe362b33f7
|