FreeBSD/Linux Kernel Cross Reference
sys/dev/bge/if_bge.c
1 /*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/6.2/sys/dev/bge/if_bge.c 165451 2006-12-21 21:53:54Z scottl $");
36
37 /*
38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82
83 #include <net/if.h>
84 #include <net/if_arp.h>
85 #include <net/ethernet.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88
89 #include <net/bpf.h>
90
91 #include <net/if_types.h>
92 #include <net/if_vlan_var.h>
93
94 #include <netinet/in_systm.h>
95 #include <netinet/in.h>
96 #include <netinet/ip.h>
97
98 #include <machine/bus.h>
99 #include <machine/resource.h>
100 #include <sys/bus.h>
101 #include <sys/rman.h>
102
103 #include <dev/mii/mii.h>
104 #include <dev/mii/miivar.h>
105 #include "miidevs.h"
106 #include <dev/mii/brgphyreg.h>
107
108 #include <dev/pci/pcireg.h>
109 #include <dev/pci/pcivar.h>
110
111 #include <dev/bge/if_bgereg.h>
112
113 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
114 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
115
116 MODULE_DEPEND(bge, pci, 1, 1, 1);
117 MODULE_DEPEND(bge, ether, 1, 1, 1);
118 MODULE_DEPEND(bge, miibus, 1, 1, 1);
119
120 /* "device miibus" required. See GENERIC if you get errors here. */
121 #include "miibus_if.h"
122
123 /*
124 * Various supported device vendors/types and their names. Note: the
125 * spec seems to indicate that the hardware still has Alteon's vendor
126 * ID burned into it, though it will always be overriden by the vendor
127 * ID in the EEPROM. Just to be safe, we cover all possibilities.
128 */
129 static struct bge_type {
130 uint16_t bge_vid;
131 uint16_t bge_did;
132 } bge_devs[] = {
133 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
134 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
135
136 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
137 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
138 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
139
140 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
141
142 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
183
184 { SK_VENDORID, SK_DEVICEID_ALTIMA },
185
186 { TC_VENDORID, TC_DEVICEID_3C996 },
187
188 { 0, 0 }
189 };
190
191 static const struct bge_vendor {
192 uint16_t v_id;
193 const char *v_name;
194 } bge_vendors[] = {
195 { ALTEON_VENDORID, "Alteon" },
196 { ALTIMA_VENDORID, "Altima" },
197 { APPLE_VENDORID, "Apple" },
198 { BCOM_VENDORID, "Broadcom" },
199 { SK_VENDORID, "SysKonnect" },
200 { TC_VENDORID, "3Com" },
201
202 { 0, NULL }
203 };
204
205 static const struct bge_revision {
206 uint32_t br_chipid;
207 const char *br_name;
208 } bge_revisions[] = {
209 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
210 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
211 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
212 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
213 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
214 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
215 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
216 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
217 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
218 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
219 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
220 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
221 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
222 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
223 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
224 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
225 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
226 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
227 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
228 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
229 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
230 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
231 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
232 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
233 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
234 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
235 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
236 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
237 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
238 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
239 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
240 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
241 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
242 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
243 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
244 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
245 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
246 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
247 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
248 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
249 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
250 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
251
252 { 0, NULL }
253 };
254
255 /*
256 * Some defaults for major revisions, so that newer steppings
257 * that we don't know about have a shot at working.
258 */
259 static const struct bge_revision bge_majorrevs[] = {
260 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
261 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
262 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
263 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
264 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
265 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
266 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
267 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
268 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
269 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
270
271 { 0, NULL }
272 };
273
274 #define BGE_IS_5705_OR_BEYOND(sc) \
275 ((sc)->bge_asicrev == BGE_ASICREV_BCM5705 || \
276 (sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \
277 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \
278 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \
279 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \
280 (sc)->bge_asicrev == BGE_ASICREV_BCM5752)
281
282 #define BGE_IS_575X_PLUS(sc) \
283 ((sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \
284 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \
285 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \
286 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \
287 (sc)->bge_asicrev == BGE_ASICREV_BCM5752)
288
289 #define BGE_IS_5714_FAMILY(sc) \
290 ((sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \
291 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \
292 (sc)->bge_asicrev == BGE_ASICREV_BCM5714)
293
294 #define BGE_IS_JUMBO_CAPABLE(sc) \
295 ((sc)->bge_asicrev == BGE_ASICREV_BCM5700 || \
296 (sc)->bge_asicrev == BGE_ASICREV_BCM5701 || \
297 (sc)->bge_asicrev == BGE_ASICREV_BCM5703 || \
298 (sc)->bge_asicrev == BGE_ASICREV_BCM5704)
299
300 const struct bge_revision * bge_lookup_rev(uint32_t);
301 const struct bge_vendor * bge_lookup_vendor(uint16_t);
302 static int bge_probe(device_t);
303 static int bge_attach(device_t);
304 static int bge_detach(device_t);
305 static int bge_suspend(device_t);
306 static int bge_resume(device_t);
307 static void bge_release_resources(struct bge_softc *);
308 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
309 static int bge_dma_alloc(device_t);
310 static void bge_dma_free(struct bge_softc *);
311
312 static void bge_txeof(struct bge_softc *);
313 static void bge_rxeof(struct bge_softc *);
314
315 static void bge_tick_locked(struct bge_softc *);
316 static void bge_tick(void *);
317 static void bge_stats_update(struct bge_softc *);
318 static void bge_stats_update_regs(struct bge_softc *);
319 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
320
321 static void bge_intr(void *);
322 static void bge_start_locked(struct ifnet *);
323 static void bge_start(struct ifnet *);
324 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
325 static void bge_init_locked(struct bge_softc *);
326 static void bge_init(void *);
327 static void bge_stop(struct bge_softc *);
328 static void bge_watchdog(struct ifnet *);
329 static void bge_shutdown(device_t);
330 static int bge_ifmedia_upd_locked(struct ifnet *);
331 static int bge_ifmedia_upd(struct ifnet *);
332 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
333
334 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
335 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
336
337 static void bge_setmulti(struct bge_softc *);
338
339 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
340 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
341 static int bge_init_rx_ring_std(struct bge_softc *);
342 static void bge_free_rx_ring_std(struct bge_softc *);
343 static int bge_init_rx_ring_jumbo(struct bge_softc *);
344 static void bge_free_rx_ring_jumbo(struct bge_softc *);
345 static void bge_free_tx_ring(struct bge_softc *);
346 static int bge_init_tx_ring(struct bge_softc *);
347
348 static int bge_chipinit(struct bge_softc *);
349 static int bge_blockinit(struct bge_softc *);
350
351 static uint32_t bge_readmem_ind(struct bge_softc *, int);
352 static void bge_writemem_ind(struct bge_softc *, int, int);
353 #ifdef notdef
354 static uint32_t bge_readreg_ind(struct bge_softc *, int);
355 #endif
356 static void bge_writereg_ind(struct bge_softc *, int, int);
357
358 static int bge_miibus_readreg(device_t, int, int);
359 static int bge_miibus_writereg(device_t, int, int, int);
360 static void bge_miibus_statchg(device_t);
361 #ifdef DEVICE_POLLING
362 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
363 #endif
364
365 static void bge_reset(struct bge_softc *);
366 static void bge_link_upd(struct bge_softc *);
367
368 static device_method_t bge_methods[] = {
369 /* Device interface */
370 DEVMETHOD(device_probe, bge_probe),
371 DEVMETHOD(device_attach, bge_attach),
372 DEVMETHOD(device_detach, bge_detach),
373 DEVMETHOD(device_shutdown, bge_shutdown),
374 DEVMETHOD(device_suspend, bge_suspend),
375 DEVMETHOD(device_resume, bge_resume),
376
377 /* bus interface */
378 DEVMETHOD(bus_print_child, bus_generic_print_child),
379 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
380
381 /* MII interface */
382 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
383 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
384 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
385
386 { 0, 0 }
387 };
388
389 static driver_t bge_driver = {
390 "bge",
391 bge_methods,
392 sizeof(struct bge_softc)
393 };
394
395 static devclass_t bge_devclass;
396
397 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
398 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
399
400 static int bge_fake_autoneg = 0;
401 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
402
403 static uint32_t
404 bge_readmem_ind(struct bge_softc *sc, int off)
405 {
406 device_t dev;
407
408 dev = sc->bge_dev;
409
410 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
411 return (pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
412 }
413
414 static void
415 bge_writemem_ind(struct bge_softc *sc, int off, int val)
416 {
417 device_t dev;
418
419 dev = sc->bge_dev;
420
421 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
422 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
423 }
424
425 #ifdef notdef
426 static uint32_t
427 bge_readreg_ind(struct bge_softc *sc, int off)
428 {
429 device_t dev;
430
431 dev = sc->bge_dev;
432
433 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
434 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
435 }
436 #endif
437
438 static void
439 bge_writereg_ind(struct bge_softc *sc, int off, int val)
440 {
441 device_t dev;
442
443 dev = sc->bge_dev;
444
445 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
446 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
447 }
448
449 /*
450 * Map a single buffer address.
451 */
452
453 static void
454 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
455 {
456 struct bge_dmamap_arg *ctx;
457
458 if (error)
459 return;
460
461 ctx = arg;
462
463 if (nseg > ctx->bge_maxsegs) {
464 ctx->bge_maxsegs = 0;
465 return;
466 }
467
468 ctx->bge_busaddr = segs->ds_addr;
469 }
470
471 /*
472 * Read a byte of data stored in the EEPROM at address 'addr.' The
473 * BCM570x supports both the traditional bitbang interface and an
474 * auto access interface for reading the EEPROM. We use the auto
475 * access method.
476 */
477 static uint8_t
478 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
479 {
480 int i;
481 uint32_t byte = 0;
482
483 /*
484 * Enable use of auto EEPROM access so we can avoid
485 * having to use the bitbang method.
486 */
487 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
488
489 /* Reset the EEPROM, load the clock period. */
490 CSR_WRITE_4(sc, BGE_EE_ADDR,
491 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
492 DELAY(20);
493
494 /* Issue the read EEPROM command. */
495 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
496
497 /* Wait for completion */
498 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
499 DELAY(10);
500 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
501 break;
502 }
503
504 if (i == BGE_TIMEOUT) {
505 device_printf(sc->bge_dev, "EEPROM read timed out\n");
506 return (1);
507 }
508
509 /* Get result. */
510 byte = CSR_READ_4(sc, BGE_EE_DATA);
511
512 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
513
514 return (0);
515 }
516
517 /*
518 * Read a sequence of bytes from the EEPROM.
519 */
520 static int
521 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
522 {
523 int i, error = 0;
524 uint8_t byte = 0;
525
526 for (i = 0; i < cnt; i++) {
527 error = bge_eeprom_getbyte(sc, off + i, &byte);
528 if (error)
529 break;
530 *(dest + i) = byte;
531 }
532
533 return (error ? 1 : 0);
534 }
535
536 static int
537 bge_miibus_readreg(device_t dev, int phy, int reg)
538 {
539 struct bge_softc *sc;
540 uint32_t val, autopoll;
541 int i;
542
543 sc = device_get_softc(dev);
544
545 /*
546 * Broadcom's own driver always assumes the internal
547 * PHY is at GMII address 1. On some chips, the PHY responds
548 * to accesses at all addresses, which could cause us to
549 * bogusly attach the PHY 32 times at probe type. Always
550 * restricting the lookup to address 1 is simpler than
551 * trying to figure out which chips revisions should be
552 * special-cased.
553 */
554 if (phy != 1)
555 return (0);
556
557 /* Reading with autopolling on may trigger PCI errors */
558 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
559 if (autopoll & BGE_MIMODE_AUTOPOLL) {
560 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
561 DELAY(40);
562 }
563
564 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
565 BGE_MIPHY(phy)|BGE_MIREG(reg));
566
567 for (i = 0; i < BGE_TIMEOUT; i++) {
568 val = CSR_READ_4(sc, BGE_MI_COMM);
569 if (!(val & BGE_MICOMM_BUSY))
570 break;
571 }
572
573 if (i == BGE_TIMEOUT) {
574 if_printf(sc->bge_ifp, "PHY read timed out\n");
575 val = 0;
576 goto done;
577 }
578
579 val = CSR_READ_4(sc, BGE_MI_COMM);
580
581 done:
582 if (autopoll & BGE_MIMODE_AUTOPOLL) {
583 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
584 DELAY(40);
585 }
586
587 if (val & BGE_MICOMM_READFAIL)
588 return (0);
589
590 return (val & 0xFFFF);
591 }
592
593 static int
594 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
595 {
596 struct bge_softc *sc;
597 uint32_t autopoll;
598 int i;
599
600 sc = device_get_softc(dev);
601
602 /* Reading with autopolling on may trigger PCI errors */
603 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
604 if (autopoll & BGE_MIMODE_AUTOPOLL) {
605 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
606 DELAY(40);
607 }
608
609 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
610 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
611
612 for (i = 0; i < BGE_TIMEOUT; i++) {
613 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
614 break;
615 }
616
617 if (autopoll & BGE_MIMODE_AUTOPOLL) {
618 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
619 DELAY(40);
620 }
621
622 if (i == BGE_TIMEOUT) {
623 if_printf(sc->bge_ifp, "PHY read timed out\n");
624 return (0);
625 }
626
627 return (0);
628 }
629
630 static void
631 bge_miibus_statchg(device_t dev)
632 {
633 struct bge_softc *sc;
634 struct mii_data *mii;
635
636 sc = device_get_softc(dev);
637 mii = device_get_softc(sc->bge_miibus);
638
639 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
640 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
641 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
642 else
643 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
644
645 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
646 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
647 else
648 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
649 }
650
651 /*
652 * Intialize a standard receive ring descriptor.
653 */
654 static int
655 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
656 {
657 struct mbuf *m_new = NULL;
658 struct bge_rx_bd *r;
659 struct bge_dmamap_arg ctx;
660 int error;
661
662 if (m == NULL) {
663 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
664 if (m_new == NULL)
665 return (ENOBUFS);
666 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
667 } else {
668 m_new = m;
669 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
670 m_new->m_data = m_new->m_ext.ext_buf;
671 }
672
673 if (!sc->bge_rx_alignment_bug)
674 m_adj(m_new, ETHER_ALIGN);
675 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
676 r = &sc->bge_ldata.bge_rx_std_ring[i];
677 ctx.bge_maxsegs = 1;
678 ctx.sc = sc;
679 error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
680 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
681 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
682 if (error || ctx.bge_maxsegs == 0) {
683 if (m == NULL) {
684 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
685 m_freem(m_new);
686 }
687 return (ENOMEM);
688 }
689 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
690 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
691 r->bge_flags = BGE_RXBDFLAG_END;
692 r->bge_len = m_new->m_len;
693 r->bge_idx = i;
694
695 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
696 sc->bge_cdata.bge_rx_std_dmamap[i],
697 BUS_DMASYNC_PREREAD);
698
699 return (0);
700 }
701
702 /*
703 * Initialize a jumbo receive ring descriptor. This allocates
704 * a jumbo buffer from the pool managed internally by the driver.
705 */
706 static int
707 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
708 {
709 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
710 struct bge_extrx_bd *r;
711 struct mbuf *m_new = NULL;
712 int nsegs;
713 int error;
714
715 if (m == NULL) {
716 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
717 if (m_new == NULL)
718 return (ENOBUFS);
719
720 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
721 if (!(m_new->m_flags & M_EXT)) {
722 m_freem(m_new);
723 return (ENOBUFS);
724 }
725 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
726 } else {
727 m_new = m;
728 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
729 m_new->m_data = m_new->m_ext.ext_buf;
730 }
731
732 if (!sc->bge_rx_alignment_bug)
733 m_adj(m_new, ETHER_ALIGN);
734
735 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
736 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
737 m_new, segs, &nsegs, BUS_DMA_NOWAIT);
738 if (error) {
739 if (m == NULL)
740 m_freem(m_new);
741 return (error);
742 }
743 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
744
745 /*
746 * Fill in the extended RX buffer descriptor.
747 */
748 r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
749 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
750 r->bge_idx = i;
751 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
752 switch (nsegs) {
753 case 4:
754 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
755 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
756 r->bge_len3 = segs[3].ds_len;
757 case 3:
758 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
759 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
760 r->bge_len2 = segs[2].ds_len;
761 case 2:
762 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
763 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
764 r->bge_len1 = segs[1].ds_len;
765 case 1:
766 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
767 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
768 r->bge_len0 = segs[0].ds_len;
769 break;
770 default:
771 panic("%s: %d segments\n", __func__, nsegs);
772 }
773
774 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
775 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
776 BUS_DMASYNC_PREREAD);
777
778 return (0);
779 }
780
781 /*
782 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
783 * that's 1MB or memory, which is a lot. For now, we fill only the first
784 * 256 ring entries and hope that our CPU is fast enough to keep up with
785 * the NIC.
786 */
787 static int
788 bge_init_rx_ring_std(struct bge_softc *sc)
789 {
790 int i;
791
792 for (i = 0; i < BGE_SSLOTS; i++) {
793 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
794 return (ENOBUFS);
795 };
796
797 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
798 sc->bge_cdata.bge_rx_std_ring_map,
799 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
800
801 sc->bge_std = i - 1;
802 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
803
804 return (0);
805 }
806
807 static void
808 bge_free_rx_ring_std(struct bge_softc *sc)
809 {
810 int i;
811
812 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
813 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
814 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
815 sc->bge_cdata.bge_rx_std_dmamap[i],
816 BUS_DMASYNC_POSTREAD);
817 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
818 sc->bge_cdata.bge_rx_std_dmamap[i]);
819 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
820 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
821 }
822 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
823 sizeof(struct bge_rx_bd));
824 }
825 }
826
827 static int
828 bge_init_rx_ring_jumbo(struct bge_softc *sc)
829 {
830 struct bge_rcb *rcb;
831 int i;
832
833 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
834 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
835 return (ENOBUFS);
836 };
837
838 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
839 sc->bge_cdata.bge_rx_jumbo_ring_map,
840 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
841
842 sc->bge_jumbo = i - 1;
843
844 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
845 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
846 BGE_RCB_FLAG_USE_EXT_RX_BD);
847 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
848
849 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
850
851 return (0);
852 }
853
854 static void
855 bge_free_rx_ring_jumbo(struct bge_softc *sc)
856 {
857 int i;
858
859 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
860 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
861 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
862 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
863 BUS_DMASYNC_POSTREAD);
864 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
865 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
866 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
867 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
868 }
869 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
870 sizeof(struct bge_extrx_bd));
871 }
872 }
873
874 static void
875 bge_free_tx_ring(struct bge_softc *sc)
876 {
877 int i;
878
879 if (sc->bge_ldata.bge_tx_ring == NULL)
880 return;
881
882 for (i = 0; i < BGE_TX_RING_CNT; i++) {
883 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
884 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
885 sc->bge_cdata.bge_tx_dmamap[i],
886 BUS_DMASYNC_POSTWRITE);
887 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
888 sc->bge_cdata.bge_tx_dmamap[i]);
889 m_freem(sc->bge_cdata.bge_tx_chain[i]);
890 sc->bge_cdata.bge_tx_chain[i] = NULL;
891 }
892 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
893 sizeof(struct bge_tx_bd));
894 }
895 }
896
897 static int
898 bge_init_tx_ring(struct bge_softc *sc)
899 {
900 sc->bge_txcnt = 0;
901 sc->bge_tx_saved_considx = 0;
902
903 /* Initialize transmit producer index for host-memory send ring. */
904 sc->bge_tx_prodidx = 0;
905 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
906
907 /* 5700 b2 errata */
908 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
909 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
910
911 /* NIC-memory send ring not used; initialize to zero. */
912 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
913 /* 5700 b2 errata */
914 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
915 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
916
917 return (0);
918 }
919
920 static void
921 bge_setmulti(struct bge_softc *sc)
922 {
923 struct ifnet *ifp;
924 struct ifmultiaddr *ifma;
925 uint32_t hashes[4] = { 0, 0, 0, 0 };
926 int h, i;
927
928 BGE_LOCK_ASSERT(sc);
929
930 ifp = sc->bge_ifp;
931
932 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
933 for (i = 0; i < 4; i++)
934 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
935 return;
936 }
937
938 /* First, zot all the existing filters. */
939 for (i = 0; i < 4; i++)
940 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
941
942 /* Now program new ones. */
943 IF_ADDR_LOCK(ifp);
944 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
945 if (ifma->ifma_addr->sa_family != AF_LINK)
946 continue;
947 h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
948 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
949 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
950 }
951 IF_ADDR_UNLOCK(ifp);
952
953 for (i = 0; i < 4; i++)
954 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
955 }
956
957 /*
958 * Do endian, PCI and DMA initialization. Also check the on-board ROM
959 * self-test results.
960 */
961 static int
962 bge_chipinit(struct bge_softc *sc)
963 {
964 uint32_t dma_rw_ctl;
965 int i;
966
967 /* Set endian type before we access any non-PCI registers. */
968 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
969
970 /*
971 * Check the 'ROM failed' bit on the RX CPU to see if
972 * self-tests passed.
973 */
974 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
975 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
976 return (ENODEV);
977 }
978
979 /* Clear the MAC control register */
980 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
981
982 /*
983 * Clear the MAC statistics block in the NIC's
984 * internal memory.
985 */
986 for (i = BGE_STATS_BLOCK;
987 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
988 BGE_MEMWIN_WRITE(sc, i, 0);
989
990 for (i = BGE_STATUS_BLOCK;
991 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
992 BGE_MEMWIN_WRITE(sc, i, 0);
993
994 /* Set up the PCI DMA control register. */
995 if (sc->bge_pcie) {
996 /* PCI Express bus */
997 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
998 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
999 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1000 } else if (sc->bge_pcix) {
1001 /* PCI-X bus */
1002 if (BGE_IS_5714_FAMILY(sc)) {
1003 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1004 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1005 /* XXX magic values, Broadcom-supplied Linux driver */
1006 if (sc->bge_asicrev == BGE_ASICREV_BCM5780)
1007 dma_rw_ctl |= (1 << 20) | (1 << 18) |
1008 BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1009 else
1010 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1011
1012 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1013 /*
1014 * The 5704 uses a different encoding of read/write
1015 * watermarks.
1016 */
1017 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1018 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1019 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1020 else
1021 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1022 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1023 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1024 (0x0F);
1025
1026 /*
1027 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1028 * for hardware bugs.
1029 */
1030 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1031 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1032 uint32_t tmp;
1033
1034 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1035 if (tmp == 0x6 || tmp == 0x7)
1036 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1037 }
1038 } else
1039 /* Conventional PCI bus */
1040 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1041 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1042 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1043 (0x0F);
1044
1045 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1046 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1047 sc->bge_asicrev == BGE_ASICREV_BCM5705)
1048 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1049 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1050
1051 /*
1052 * Set up general mode register.
1053 */
1054 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1055 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1056 BGE_MODECTL_TX_NO_PHDR_CSUM);
1057
1058 /*
1059 * Disable memory write invalidate. Apparently it is not supported
1060 * properly by these devices.
1061 */
1062 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1063
1064 #ifdef __brokenalpha__
1065 /*
1066 * Must insure that we do not cross an 8K (bytes) boundary
1067 * for DMA reads. Our highest limit is 1K bytes. This is a
1068 * restriction on some ALPHA platforms with early revision
1069 * 21174 PCI chipsets, such as the AlphaPC 164lx
1070 */
1071 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1072 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1073 #endif
1074
1075 /* Set the timer prescaler (always 66Mhz) */
1076 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1077
1078 return (0);
1079 }
1080
1081 static int
1082 bge_blockinit(struct bge_softc *sc)
1083 {
1084 struct bge_rcb *rcb;
1085 bus_size_t vrcb;
1086 bge_hostaddr taddr;
1087 int i;
1088
1089 /*
1090 * Initialize the memory window pointer register so that
1091 * we can access the first 32K of internal NIC RAM. This will
1092 * allow us to set up the TX send ring RCBs and the RX return
1093 * ring RCBs, plus other things which live in NIC memory.
1094 */
1095 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1096
1097 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1098
1099 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1100 /* Configure mbuf memory pool */
1101 if (sc->bge_extram) {
1102 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1103 BGE_EXT_SSRAM);
1104 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1105 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1106 else
1107 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1108 } else {
1109 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1110 BGE_BUFFPOOL_1);
1111 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1112 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1113 else
1114 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1115 }
1116
1117 /* Configure DMA resource pool */
1118 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1119 BGE_DMA_DESCRIPTORS);
1120 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1121 }
1122
1123 /* Configure mbuf pool watermarks */
1124 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1125 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1126 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1127 } else {
1128 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1129 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1130 }
1131 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1132
1133 /* Configure DMA resource watermarks */
1134 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1135 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1136
1137 /* Enable buffer manager */
1138 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1139 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1140 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1141
1142 /* Poll for buffer manager start indication */
1143 for (i = 0; i < BGE_TIMEOUT; i++) {
1144 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1145 break;
1146 DELAY(10);
1147 }
1148
1149 if (i == BGE_TIMEOUT) {
1150 device_printf(sc->bge_dev,
1151 "buffer manager failed to start\n");
1152 return (ENXIO);
1153 }
1154 }
1155
1156 /* Enable flow-through queues */
1157 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1158 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1159
1160 /* Wait until queue initialization is complete */
1161 for (i = 0; i < BGE_TIMEOUT; i++) {
1162 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1163 break;
1164 DELAY(10);
1165 }
1166
1167 if (i == BGE_TIMEOUT) {
1168 device_printf(sc->bge_dev, "flow-through queue init failed\n");
1169 return (ENXIO);
1170 }
1171
1172 /* Initialize the standard RX ring control block */
1173 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1174 rcb->bge_hostaddr.bge_addr_lo =
1175 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1176 rcb->bge_hostaddr.bge_addr_hi =
1177 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1178 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1179 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1180 if (BGE_IS_5705_OR_BEYOND(sc))
1181 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1182 else
1183 rcb->bge_maxlen_flags =
1184 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1185 if (sc->bge_extram)
1186 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1187 else
1188 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1189 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1190 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1191
1192 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1193 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1194
1195 /*
1196 * Initialize the jumbo RX ring control block
1197 * We set the 'ring disabled' bit in the flags
1198 * field until we're actually ready to start
1199 * using this ring (i.e. once we set the MTU
1200 * high enough to require it).
1201 */
1202 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1203 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1204
1205 rcb->bge_hostaddr.bge_addr_lo =
1206 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1207 rcb->bge_hostaddr.bge_addr_hi =
1208 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1209 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1210 sc->bge_cdata.bge_rx_jumbo_ring_map,
1211 BUS_DMASYNC_PREREAD);
1212 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1213 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1214 if (sc->bge_extram)
1215 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1216 else
1217 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1218 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1219 rcb->bge_hostaddr.bge_addr_hi);
1220 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1221 rcb->bge_hostaddr.bge_addr_lo);
1222
1223 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1224 rcb->bge_maxlen_flags);
1225 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1226
1227 /* Set up dummy disabled mini ring RCB */
1228 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1229 rcb->bge_maxlen_flags =
1230 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1231 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1232 rcb->bge_maxlen_flags);
1233 }
1234
1235 /*
1236 * Set the BD ring replentish thresholds. The recommended
1237 * values are 1/8th the number of descriptors allocated to
1238 * each ring.
1239 */
1240 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1241 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1242
1243 /*
1244 * Disable all unused send rings by setting the 'ring disabled'
1245 * bit in the flags field of all the TX send ring control blocks.
1246 * These are located in NIC memory.
1247 */
1248 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1249 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1250 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1251 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1252 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1253 vrcb += sizeof(struct bge_rcb);
1254 }
1255
1256 /* Configure TX RCB 0 (we use only the first ring) */
1257 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1258 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1259 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1260 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1261 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1262 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1263 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1264 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1265 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1266
1267 /* Disable all unused RX return rings */
1268 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1269 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1270 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1271 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1272 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1273 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1274 BGE_RCB_FLAG_RING_DISABLED));
1275 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1276 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1277 (i * (sizeof(uint64_t))), 0);
1278 vrcb += sizeof(struct bge_rcb);
1279 }
1280
1281 /* Initialize RX ring indexes */
1282 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1283 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1284 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1285
1286 /*
1287 * Set up RX return ring 0
1288 * Note that the NIC address for RX return rings is 0x00000000.
1289 * The return rings live entirely within the host, so the
1290 * nicaddr field in the RCB isn't used.
1291 */
1292 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1293 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1294 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1295 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1296 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1297 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1298 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1299
1300 /* Set random backoff seed for TX */
1301 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1302 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1303 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1304 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1305 BGE_TX_BACKOFF_SEED_MASK);
1306
1307 /* Set inter-packet gap */
1308 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1309
1310 /*
1311 * Specify which ring to use for packets that don't match
1312 * any RX rules.
1313 */
1314 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1315
1316 /*
1317 * Configure number of RX lists. One interrupt distribution
1318 * list, sixteen active lists, one bad frames class.
1319 */
1320 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1321
1322 /* Inialize RX list placement stats mask. */
1323 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1324 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1325
1326 /* Disable host coalescing until we get it set up */
1327 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1328
1329 /* Poll to make sure it's shut down. */
1330 for (i = 0; i < BGE_TIMEOUT; i++) {
1331 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1332 break;
1333 DELAY(10);
1334 }
1335
1336 if (i == BGE_TIMEOUT) {
1337 device_printf(sc->bge_dev,
1338 "host coalescing engine failed to idle\n");
1339 return (ENXIO);
1340 }
1341
1342 /* Set up host coalescing defaults */
1343 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1344 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1345 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1346 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1347 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1348 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1349 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1350 }
1351 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1352 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1353
1354 /* Set up address of statistics block */
1355 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1356 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1357 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1358 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1359 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1360 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1361 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1362 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1363 }
1364
1365 /* Set up address of status block */
1366 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1367 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1368 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1369 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1370 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1371 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1372
1373 /* Turn on host coalescing state machine */
1374 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1375
1376 /* Turn on RX BD completion state machine and enable attentions */
1377 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1378 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1379
1380 /* Turn on RX list placement state machine */
1381 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1382
1383 /* Turn on RX list selector state machine. */
1384 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1385 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1386
1387 /* Turn on DMA, clear stats */
1388 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1389 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1390 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1391 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1392 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1393
1394 /* Set misc. local control, enable interrupts on attentions */
1395 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1396
1397 #ifdef notdef
1398 /* Assert GPIO pins for PHY reset */
1399 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1400 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1401 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1402 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1403 #endif
1404
1405 /* Turn on DMA completion state machine */
1406 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1407 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1408
1409 /* Turn on write DMA state machine */
1410 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1411 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1412
1413 /* Turn on read DMA state machine */
1414 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1415 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1416
1417 /* Turn on RX data completion state machine */
1418 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1419
1420 /* Turn on RX BD initiator state machine */
1421 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1422
1423 /* Turn on RX data and RX BD initiator state machine */
1424 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1425
1426 /* Turn on Mbuf cluster free state machine */
1427 if (!(BGE_IS_5705_OR_BEYOND(sc)))
1428 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1429
1430 /* Turn on send BD completion state machine */
1431 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1432
1433 /* Turn on send data completion state machine */
1434 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1435
1436 /* Turn on send data initiator state machine */
1437 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1438
1439 /* Turn on send BD initiator state machine */
1440 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1441
1442 /* Turn on send BD selector state machine */
1443 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1444
1445 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1446 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1447 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1448
1449 /* ack/clear link change events */
1450 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1451 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1452 BGE_MACSTAT_LINK_CHANGED);
1453 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1454
1455 /* Enable PHY auto polling (for MII/GMII only) */
1456 if (sc->bge_tbi) {
1457 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1458 } else {
1459 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1460 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1461 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1462 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1463 BGE_EVTENB_MI_INTERRUPT);
1464 }
1465
1466 /*
1467 * Clear any pending link state attention.
1468 * Otherwise some link state change events may be lost until attention
1469 * is cleared by bge_intr() -> bge_link_upd() sequence.
1470 * It's not necessary on newer BCM chips - perhaps enabling link
1471 * state change attentions implies clearing pending attention.
1472 */
1473 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1474 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1475 BGE_MACSTAT_LINK_CHANGED);
1476
1477 /* Enable link state change attentions. */
1478 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1479
1480 return (0);
1481 }
1482
1483 const struct bge_revision *
1484 bge_lookup_rev(uint32_t chipid)
1485 {
1486 const struct bge_revision *br;
1487
1488 for (br = bge_revisions; br->br_name != NULL; br++) {
1489 if (br->br_chipid == chipid)
1490 return (br);
1491 }
1492
1493 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1494 if (br->br_chipid == BGE_ASICREV(chipid))
1495 return (br);
1496 }
1497
1498 return (NULL);
1499 }
1500
1501 const struct bge_vendor *
1502 bge_lookup_vendor(uint16_t vid)
1503 {
1504 const struct bge_vendor *v;
1505
1506 for (v = bge_vendors; v->v_name != NULL; v++)
1507 if (v->v_id == vid)
1508 return (v);
1509
1510 panic("%s: unknown vendor %d", __func__, vid);
1511 return (NULL);
1512 }
1513
1514 /*
1515 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1516 * against our list and return its name if we find a match.
1517 *
1518 * Note that since the Broadcom controller contains VPD support, we
1519 * can get the device name string from the controller itself instead
1520 * of the compiled-in string. This is a little slow, but it guarantees
1521 * we'll always announce the right product name. Unfortunately, this
1522 * is possible only later in bge_attach(), when we have established
1523 * access to EEPROM.
1524 */
1525 static int
1526 bge_probe(device_t dev)
1527 {
1528 struct bge_type *t = bge_devs;
1529 struct bge_softc *sc = device_get_softc(dev);
1530
1531 bzero(sc, sizeof(struct bge_softc));
1532 sc->bge_dev = dev;
1533
1534 while(t->bge_vid != 0) {
1535 if ((pci_get_vendor(dev) == t->bge_vid) &&
1536 (pci_get_device(dev) == t->bge_did)) {
1537 char buf[64];
1538 const struct bge_revision *br;
1539 const struct bge_vendor *v;
1540 uint32_t id;
1541
1542 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1543 BGE_PCIMISCCTL_ASICREV;
1544 br = bge_lookup_rev(id);
1545 id >>= 16;
1546 v = bge_lookup_vendor(t->bge_vid);
1547 if (br == NULL)
1548 snprintf(buf, 64, "%s unknown ASIC (%#04x)",
1549 v->v_name, id);
1550 else
1551 snprintf(buf, 64, "%s %s, ASIC rev. %#04x",
1552 v->v_name, br->br_name, id);
1553 device_set_desc_copy(dev, buf);
1554 if (pci_get_subvendor(dev) == DELL_VENDORID)
1555 sc->bge_no_3_led = 1;
1556 return (0);
1557 }
1558 t++;
1559 }
1560
1561 return (ENXIO);
1562 }
1563
1564 static void
1565 bge_dma_free(struct bge_softc *sc)
1566 {
1567 int i;
1568
1569 /* Destroy DMA maps for RX buffers. */
1570 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1571 if (sc->bge_cdata.bge_rx_std_dmamap[i])
1572 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1573 sc->bge_cdata.bge_rx_std_dmamap[i]);
1574 }
1575
1576 /* Destroy DMA maps for jumbo RX buffers. */
1577 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1578 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1579 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1580 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1581 }
1582
1583 /* Destroy DMA maps for TX buffers. */
1584 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1585 if (sc->bge_cdata.bge_tx_dmamap[i])
1586 bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1587 sc->bge_cdata.bge_tx_dmamap[i]);
1588 }
1589
1590 if (sc->bge_cdata.bge_mtag)
1591 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1592
1593
1594 /* Destroy standard RX ring. */
1595 if (sc->bge_cdata.bge_rx_std_ring_map)
1596 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1597 sc->bge_cdata.bge_rx_std_ring_map);
1598 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1599 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1600 sc->bge_ldata.bge_rx_std_ring,
1601 sc->bge_cdata.bge_rx_std_ring_map);
1602
1603 if (sc->bge_cdata.bge_rx_std_ring_tag)
1604 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1605
1606 /* Destroy jumbo RX ring. */
1607 if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1608 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1609 sc->bge_cdata.bge_rx_jumbo_ring_map);
1610
1611 if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1612 sc->bge_ldata.bge_rx_jumbo_ring)
1613 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1614 sc->bge_ldata.bge_rx_jumbo_ring,
1615 sc->bge_cdata.bge_rx_jumbo_ring_map);
1616
1617 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1618 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1619
1620 /* Destroy RX return ring. */
1621 if (sc->bge_cdata.bge_rx_return_ring_map)
1622 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1623 sc->bge_cdata.bge_rx_return_ring_map);
1624
1625 if (sc->bge_cdata.bge_rx_return_ring_map &&
1626 sc->bge_ldata.bge_rx_return_ring)
1627 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1628 sc->bge_ldata.bge_rx_return_ring,
1629 sc->bge_cdata.bge_rx_return_ring_map);
1630
1631 if (sc->bge_cdata.bge_rx_return_ring_tag)
1632 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1633
1634 /* Destroy TX ring. */
1635 if (sc->bge_cdata.bge_tx_ring_map)
1636 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1637 sc->bge_cdata.bge_tx_ring_map);
1638
1639 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1640 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1641 sc->bge_ldata.bge_tx_ring,
1642 sc->bge_cdata.bge_tx_ring_map);
1643
1644 if (sc->bge_cdata.bge_tx_ring_tag)
1645 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1646
1647 /* Destroy status block. */
1648 if (sc->bge_cdata.bge_status_map)
1649 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1650 sc->bge_cdata.bge_status_map);
1651
1652 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1653 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1654 sc->bge_ldata.bge_status_block,
1655 sc->bge_cdata.bge_status_map);
1656
1657 if (sc->bge_cdata.bge_status_tag)
1658 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1659
1660 /* Destroy statistics block. */
1661 if (sc->bge_cdata.bge_stats_map)
1662 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1663 sc->bge_cdata.bge_stats_map);
1664
1665 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1666 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1667 sc->bge_ldata.bge_stats,
1668 sc->bge_cdata.bge_stats_map);
1669
1670 if (sc->bge_cdata.bge_stats_tag)
1671 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1672
1673 /* Destroy the parent tag. */
1674 if (sc->bge_cdata.bge_parent_tag)
1675 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1676 }
1677
1678 static int
1679 bge_dma_alloc(device_t dev)
1680 {
1681 struct bge_dmamap_arg ctx;
1682 struct bge_softc *sc;
1683 int i, error;
1684
1685 sc = device_get_softc(dev);
1686
1687 /*
1688 * Allocate the parent bus DMA tag appropriate for PCI.
1689 */
1690 error = bus_dma_tag_create(NULL, /* parent */
1691 PAGE_SIZE, 0, /* alignment, boundary */
1692 BUS_SPACE_MAXADDR, /* lowaddr */
1693 BUS_SPACE_MAXADDR, /* highaddr */
1694 NULL, NULL, /* filter, filterarg */
1695 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */
1696 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1697 0, /* flags */
1698 NULL, NULL, /* lockfunc, lockarg */
1699 &sc->bge_cdata.bge_parent_tag);
1700
1701 if (error != 0) {
1702 device_printf(sc->bge_dev,
1703 "could not allocate parent dma tag\n");
1704 return (ENOMEM);
1705 }
1706
1707 /*
1708 * Create tag for RX mbufs.
1709 */
1710 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1711 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1712 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1713 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1714
1715 if (error) {
1716 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1717 return (ENOMEM);
1718 }
1719
1720 /* Create DMA maps for RX buffers. */
1721 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1722 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1723 &sc->bge_cdata.bge_rx_std_dmamap[i]);
1724 if (error) {
1725 device_printf(sc->bge_dev,
1726 "can't create DMA map for RX\n");
1727 return (ENOMEM);
1728 }
1729 }
1730
1731 /* Create DMA maps for TX buffers. */
1732 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1733 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1734 &sc->bge_cdata.bge_tx_dmamap[i]);
1735 if (error) {
1736 device_printf(sc->bge_dev,
1737 "can't create DMA map for RX\n");
1738 return (ENOMEM);
1739 }
1740 }
1741
1742 /* Create tag for standard RX ring. */
1743 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1744 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1745 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1746 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1747
1748 if (error) {
1749 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1750 return (ENOMEM);
1751 }
1752
1753 /* Allocate DMA'able memory for standard RX ring. */
1754 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1755 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1756 &sc->bge_cdata.bge_rx_std_ring_map);
1757 if (error)
1758 return (ENOMEM);
1759
1760 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1761
1762 /* Load the address of the standard RX ring. */
1763 ctx.bge_maxsegs = 1;
1764 ctx.sc = sc;
1765
1766 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1767 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1768 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1769
1770 if (error)
1771 return (ENOMEM);
1772
1773 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1774
1775 /* Create tags for jumbo mbufs. */
1776 if (BGE_IS_JUMBO_CAPABLE(sc)) {
1777 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1778 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1779 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1780 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1781 if (error) {
1782 device_printf(sc->bge_dev,
1783 "could not allocate jumbo dma tag\n");
1784 return (ENOMEM);
1785 }
1786
1787 /* Create tag for jumbo RX ring. */
1788 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1789 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1790 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1791 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1792
1793 if (error) {
1794 device_printf(sc->bge_dev,
1795 "could not allocate jumbo ring dma tag\n");
1796 return (ENOMEM);
1797 }
1798
1799 /* Allocate DMA'able memory for jumbo RX ring. */
1800 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1801 (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1802 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1803 &sc->bge_cdata.bge_rx_jumbo_ring_map);
1804 if (error)
1805 return (ENOMEM);
1806
1807 /* Load the address of the jumbo RX ring. */
1808 ctx.bge_maxsegs = 1;
1809 ctx.sc = sc;
1810
1811 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1812 sc->bge_cdata.bge_rx_jumbo_ring_map,
1813 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1814 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1815
1816 if (error)
1817 return (ENOMEM);
1818
1819 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1820
1821 /* Create DMA maps for jumbo RX buffers. */
1822 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1823 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1824 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1825 if (error) {
1826 device_printf(sc->bge_dev,
1827 "can't create DMA map for jumbo RX\n");
1828 return (ENOMEM);
1829 }
1830 }
1831
1832 }
1833
1834 /* Create tag for RX return ring. */
1835 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1836 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1837 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1838 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1839
1840 if (error) {
1841 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1842 return (ENOMEM);
1843 }
1844
1845 /* Allocate DMA'able memory for RX return ring. */
1846 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1847 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1848 &sc->bge_cdata.bge_rx_return_ring_map);
1849 if (error)
1850 return (ENOMEM);
1851
1852 bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1853 BGE_RX_RTN_RING_SZ(sc));
1854
1855 /* Load the address of the RX return ring. */
1856 ctx.bge_maxsegs = 1;
1857 ctx.sc = sc;
1858
1859 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1860 sc->bge_cdata.bge_rx_return_ring_map,
1861 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1862 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1863
1864 if (error)
1865 return (ENOMEM);
1866
1867 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1868
1869 /* Create tag for TX ring. */
1870 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1871 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1872 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1873 &sc->bge_cdata.bge_tx_ring_tag);
1874
1875 if (error) {
1876 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1877 return (ENOMEM);
1878 }
1879
1880 /* Allocate DMA'able memory for TX ring. */
1881 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
1882 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
1883 &sc->bge_cdata.bge_tx_ring_map);
1884 if (error)
1885 return (ENOMEM);
1886
1887 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1888
1889 /* Load the address of the TX ring. */
1890 ctx.bge_maxsegs = 1;
1891 ctx.sc = sc;
1892
1893 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
1894 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
1895 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1896
1897 if (error)
1898 return (ENOMEM);
1899
1900 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
1901
1902 /* Create tag for status block. */
1903 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1904 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1905 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
1906 NULL, NULL, &sc->bge_cdata.bge_status_tag);
1907
1908 if (error) {
1909 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1910 return (ENOMEM);
1911 }
1912
1913 /* Allocate DMA'able memory for status block. */
1914 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
1915 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
1916 &sc->bge_cdata.bge_status_map);
1917 if (error)
1918 return (ENOMEM);
1919
1920 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1921
1922 /* Load the address of the status block. */
1923 ctx.sc = sc;
1924 ctx.bge_maxsegs = 1;
1925
1926 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
1927 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
1928 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1929
1930 if (error)
1931 return (ENOMEM);
1932
1933 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
1934
1935 /* Create tag for statistics block. */
1936 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1937 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1938 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
1939 &sc->bge_cdata.bge_stats_tag);
1940
1941 if (error) {
1942 device_printf(sc->bge_dev, "could not allocate dma tag\n");
1943 return (ENOMEM);
1944 }
1945
1946 /* Allocate DMA'able memory for statistics block. */
1947 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
1948 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
1949 &sc->bge_cdata.bge_stats_map);
1950 if (error)
1951 return (ENOMEM);
1952
1953 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
1954
1955 /* Load the address of the statstics block. */
1956 ctx.sc = sc;
1957 ctx.bge_maxsegs = 1;
1958
1959 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
1960 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
1961 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1962
1963 if (error)
1964 return (ENOMEM);
1965
1966 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
1967
1968 return (0);
1969 }
1970
1971 static int
1972 bge_attach(device_t dev)
1973 {
1974 struct ifnet *ifp;
1975 struct bge_softc *sc;
1976 uint32_t hwcfg = 0;
1977 uint32_t mac_tmp = 0;
1978 u_char eaddr[6];
1979 int error = 0, rid;
1980
1981 sc = device_get_softc(dev);
1982 sc->bge_dev = dev;
1983
1984 /*
1985 * Map control/status registers.
1986 */
1987 pci_enable_busmaster(dev);
1988
1989 rid = BGE_PCI_BAR0;
1990 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1991 RF_ACTIVE|PCI_RF_DENSE);
1992
1993 if (sc->bge_res == NULL) {
1994 device_printf (sc->bge_dev, "couldn't map memory\n");
1995 error = ENXIO;
1996 goto fail;
1997 }
1998
1999 sc->bge_btag = rman_get_bustag(sc->bge_res);
2000 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2001
2002 /* Allocate interrupt. */
2003 rid = 0;
2004
2005 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2006 RF_SHAREABLE | RF_ACTIVE);
2007
2008 if (sc->bge_irq == NULL) {
2009 device_printf(sc->bge_dev, "couldn't map interrupt\n");
2010 error = ENXIO;
2011 goto fail;
2012 }
2013
2014 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2015
2016 /* Save ASIC rev. */
2017
2018 sc->bge_chipid =
2019 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2020 BGE_PCIMISCCTL_ASICREV;
2021 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2022 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2023
2024 /*
2025 * XXX: Broadcom Linux driver. Not in specs or eratta.
2026 * PCI-Express?
2027 */
2028 if (BGE_IS_5705_OR_BEYOND(sc)) {
2029 uint32_t v;
2030
2031 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2032 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2033 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2034 if ((v & 0xff) == BGE_PCIE_CAPID)
2035 sc->bge_pcie = 1;
2036 }
2037 }
2038
2039 /*
2040 * PCI-X ?
2041 */
2042 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2043 BGE_PCISTATE_PCI_BUSMODE) == 0)
2044 sc->bge_pcix = 1;
2045
2046 /* Try to reset the chip. */
2047 bge_reset(sc);
2048
2049 if (bge_chipinit(sc)) {
2050 device_printf(sc->bge_dev, "chip initialization failed\n");
2051 bge_release_resources(sc);
2052 error = ENXIO;
2053 goto fail;
2054 }
2055
2056 /*
2057 * Get station address from the EEPROM.
2058 */
2059 mac_tmp = bge_readmem_ind(sc, 0x0c14);
2060 if ((mac_tmp >> 16) == 0x484b) {
2061 eaddr[0] = (u_char)(mac_tmp >> 8);
2062 eaddr[1] = (u_char)mac_tmp;
2063 mac_tmp = bge_readmem_ind(sc, 0x0c18);
2064 eaddr[2] = (u_char)(mac_tmp >> 24);
2065 eaddr[3] = (u_char)(mac_tmp >> 16);
2066 eaddr[4] = (u_char)(mac_tmp >> 8);
2067 eaddr[5] = (u_char)mac_tmp;
2068 } else if (bge_read_eeprom(sc, eaddr,
2069 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2070 device_printf(sc->bge_dev, "failed to read station address\n");
2071 bge_release_resources(sc);
2072 error = ENXIO;
2073 goto fail;
2074 }
2075
2076 /* 5705 limits RX return ring to 512 entries. */
2077 if (BGE_IS_5705_OR_BEYOND(sc))
2078 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2079 else
2080 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2081
2082 if (bge_dma_alloc(dev)) {
2083 device_printf(sc->bge_dev,
2084 "failed to allocate DMA resources\n");
2085 bge_release_resources(sc);
2086 error = ENXIO;
2087 goto fail;
2088 }
2089
2090 /* Set default tuneable values. */
2091 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2092 sc->bge_rx_coal_ticks = 150;
2093 sc->bge_tx_coal_ticks = 150;
2094 sc->bge_rx_max_coal_bds = 64;
2095 sc->bge_tx_max_coal_bds = 128;
2096
2097 /* Set up ifnet structure */
2098 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2099 if (ifp == NULL) {
2100 device_printf(sc->bge_dev, "failed to if_alloc()\n");
2101 bge_release_resources(sc);
2102 error = ENXIO;
2103 goto fail;
2104 }
2105 ifp->if_softc = sc;
2106 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2107 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2108 ifp->if_ioctl = bge_ioctl;
2109 ifp->if_start = bge_start;
2110 ifp->if_watchdog = bge_watchdog;
2111 ifp->if_init = bge_init;
2112 ifp->if_mtu = ETHERMTU;
2113 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2114 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2115 IFQ_SET_READY(&ifp->if_snd);
2116 ifp->if_hwassist = BGE_CSUM_FEATURES;
2117 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2118 IFCAP_VLAN_MTU;
2119 ifp->if_capenable = ifp->if_capabilities;
2120 #ifdef DEVICE_POLLING
2121 ifp->if_capabilities |= IFCAP_POLLING;
2122 #endif
2123
2124 /*
2125 * 5700 B0 chips do not support checksumming correctly due
2126 * to hardware bugs.
2127 */
2128 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2129 ifp->if_capabilities &= ~IFCAP_HWCSUM;
2130 ifp->if_capenable &= IFCAP_HWCSUM;
2131 ifp->if_hwassist = 0;
2132 }
2133
2134 /*
2135 * Figure out what sort of media we have by checking the
2136 * hardware config word in the first 32k of NIC internal memory,
2137 * or fall back to examining the EEPROM if necessary.
2138 * Note: on some BCM5700 cards, this value appears to be unset.
2139 * If that's the case, we have to rely on identifying the NIC
2140 * by its PCI subsystem ID, as we do below for the SysKonnect
2141 * SK-9D41.
2142 */
2143 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2144 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2145 else {
2146 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2147 sizeof(hwcfg))) {
2148 device_printf(sc->bge_dev, "failed to read EEPROM\n");
2149 bge_release_resources(sc);
2150 error = ENXIO;
2151 goto fail;
2152 }
2153 hwcfg = ntohl(hwcfg);
2154 }
2155
2156 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2157 sc->bge_tbi = 1;
2158
2159 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2160 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2161 sc->bge_tbi = 1;
2162
2163 if (sc->bge_tbi) {
2164 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2165 bge_ifmedia_upd, bge_ifmedia_sts);
2166 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2167 ifmedia_add(&sc->bge_ifmedia,
2168 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2169 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2170 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2171 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2172 } else {
2173 /*
2174 * Do transceiver setup.
2175 */
2176 if (mii_phy_probe(dev, &sc->bge_miibus,
2177 bge_ifmedia_upd, bge_ifmedia_sts)) {
2178 device_printf(sc->bge_dev, "MII without any PHY!\n");
2179 bge_release_resources(sc);
2180 error = ENXIO;
2181 goto fail;
2182 }
2183 }
2184
2185 /*
2186 * When using the BCM5701 in PCI-X mode, data corruption has
2187 * been observed in the first few bytes of some received packets.
2188 * Aligning the packet buffer in memory eliminates the corruption.
2189 * Unfortunately, this misaligns the packet payloads. On platforms
2190 * which do not support unaligned accesses, we will realign the
2191 * payloads by copying the received packets.
2192 */
2193 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_pcix)
2194 sc->bge_rx_alignment_bug = 1;
2195
2196 /*
2197 * Call MI attach routine.
2198 */
2199 ether_ifattach(ifp, eaddr);
2200 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2201
2202 /*
2203 * Hookup IRQ last.
2204 */
2205 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2206 bge_intr, sc, &sc->bge_intrhand);
2207
2208 if (error) {
2209 bge_detach(dev);
2210 device_printf(sc->bge_dev, "couldn't set up irq\n");
2211 }
2212
2213 fail:
2214 return (error);
2215 }
2216
2217 static int
2218 bge_detach(device_t dev)
2219 {
2220 struct bge_softc *sc;
2221 struct ifnet *ifp;
2222
2223 sc = device_get_softc(dev);
2224 ifp = sc->bge_ifp;
2225
2226 #ifdef DEVICE_POLLING
2227 if (ifp->if_capenable & IFCAP_POLLING)
2228 ether_poll_deregister(ifp);
2229 #endif
2230
2231 BGE_LOCK(sc);
2232 bge_stop(sc);
2233 bge_reset(sc);
2234 BGE_UNLOCK(sc);
2235
2236 ether_ifdetach(ifp);
2237
2238 if (sc->bge_tbi) {
2239 ifmedia_removeall(&sc->bge_ifmedia);
2240 } else {
2241 bus_generic_detach(dev);
2242 device_delete_child(dev, sc->bge_miibus);
2243 }
2244
2245 bge_release_resources(sc);
2246
2247 return (0);
2248 }
2249
2250 static void
2251 bge_release_resources(struct bge_softc *sc)
2252 {
2253 device_t dev;
2254
2255 dev = sc->bge_dev;
2256
2257 if (sc->bge_vpd_prodname != NULL)
2258 free(sc->bge_vpd_prodname, M_DEVBUF);
2259
2260 if (sc->bge_vpd_readonly != NULL)
2261 free(sc->bge_vpd_readonly, M_DEVBUF);
2262
2263 if (sc->bge_intrhand != NULL)
2264 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2265
2266 if (sc->bge_irq != NULL)
2267 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2268
2269 if (sc->bge_res != NULL)
2270 bus_release_resource(dev, SYS_RES_MEMORY,
2271 BGE_PCI_BAR0, sc->bge_res);
2272
2273 if (sc->bge_ifp != NULL)
2274 if_free(sc->bge_ifp);
2275
2276 bge_dma_free(sc);
2277
2278 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
2279 BGE_LOCK_DESTROY(sc);
2280 }
2281
2282 static void
2283 bge_reset(struct bge_softc *sc)
2284 {
2285 device_t dev;
2286 uint32_t cachesize, command, pcistate, reset;
2287 int i, val = 0;
2288
2289 dev = sc->bge_dev;
2290
2291 /* Save some important PCI state. */
2292 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2293 command = pci_read_config(dev, BGE_PCI_CMD, 4);
2294 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2295
2296 pci_write_config(dev, BGE_PCI_MISC_CTL,
2297 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2298 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2299
2300 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2301
2302 /* XXX: Broadcom Linux driver. */
2303 if (sc->bge_pcie) {
2304 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */
2305 CSR_WRITE_4(sc, 0x7e2c, 0x20);
2306 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2307 /* Prevent PCIE link training during global reset */
2308 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2309 reset |= (1<<29);
2310 }
2311 }
2312
2313 /* Issue global reset */
2314 bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2315
2316 DELAY(1000);
2317
2318 /* XXX: Broadcom Linux driver. */
2319 if (sc->bge_pcie) {
2320 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2321 uint32_t v;
2322
2323 DELAY(500000); /* wait for link training to complete */
2324 v = pci_read_config(dev, 0xc4, 4);
2325 pci_write_config(dev, 0xc4, v | (1<<15), 4);
2326 }
2327 /* Set PCIE max payload size and clear error status. */
2328 pci_write_config(dev, 0xd8, 0xf5000, 4);
2329 }
2330
2331 /* Reset some of the PCI state that got zapped by reset. */
2332 pci_write_config(dev, BGE_PCI_MISC_CTL,
2333 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2334 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2335 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2336 pci_write_config(dev, BGE_PCI_CMD, command, 4);
2337 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2338
2339 /* Enable memory arbiter. */
2340 if (BGE_IS_5714_FAMILY(sc)) {
2341 uint32_t val;
2342
2343 val = CSR_READ_4(sc, BGE_MARB_MODE);
2344 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2345 } else
2346 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2347
2348 /*
2349 * Prevent PXE restart: write a magic number to the
2350 * general communications memory at 0xB50.
2351 */
2352 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2353 /*
2354 * Poll the value location we just wrote until
2355 * we see the 1's complement of the magic number.
2356 * This indicates that the firmware initialization
2357 * is complete.
2358 */
2359 for (i = 0; i < BGE_TIMEOUT; i++) {
2360 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2361 if (val == ~BGE_MAGIC_NUMBER)
2362 break;
2363 DELAY(10);
2364 }
2365
2366 if (i == BGE_TIMEOUT) {
2367 device_printf(sc->bge_dev, "firmware handshake timed out\n");
2368 return;
2369 }
2370
2371 /*
2372 * XXX Wait for the value of the PCISTATE register to
2373 * return to its original pre-reset state. This is a
2374 * fairly good indicator of reset completion. If we don't
2375 * wait for the reset to fully complete, trying to read
2376 * from the device's non-PCI registers may yield garbage
2377 * results.
2378 */
2379 for (i = 0; i < BGE_TIMEOUT; i++) {
2380 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2381 break;
2382 DELAY(10);
2383 }
2384
2385 /* Fix up byte swapping. */
2386 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2387 BGE_MODECTL_BYTESWAP_DATA);
2388
2389 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2390
2391 /*
2392 * The 5704 in TBI mode apparently needs some special
2393 * adjustment to insure the SERDES drive level is set
2394 * to 1.2V.
2395 */
2396 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2397 uint32_t serdescfg;
2398 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2399 serdescfg = (serdescfg & ~0xFFF) | 0x880;
2400 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2401 }
2402
2403 /* XXX: Broadcom Linux driver. */
2404 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2405 uint32_t v;
2406
2407 v = CSR_READ_4(sc, 0x7c00);
2408 CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2409 }
2410 DELAY(10000);
2411 }
2412
2413 /*
2414 * Frame reception handling. This is called if there's a frame
2415 * on the receive return list.
2416 *
2417 * Note: we have to be able to handle two possibilities here:
2418 * 1) the frame is from the jumbo receive ring
2419 * 2) the frame is from the standard receive ring
2420 */
2421
2422 static void
2423 bge_rxeof(struct bge_softc *sc)
2424 {
2425 struct ifnet *ifp;
2426 int stdcnt = 0, jumbocnt = 0;
2427
2428 BGE_LOCK_ASSERT(sc);
2429
2430 /* Nothing to do. */
2431 if (sc->bge_rx_saved_considx ==
2432 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2433 return;
2434
2435 ifp = sc->bge_ifp;
2436
2437 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2438 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2439 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2440 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2441 if (BGE_IS_JUMBO_CAPABLE(sc))
2442 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2443 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
2444
2445 while(sc->bge_rx_saved_considx !=
2446 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2447 struct bge_rx_bd *cur_rx;
2448 uint32_t rxidx;
2449 struct mbuf *m = NULL;
2450 uint16_t vlan_tag = 0;
2451 int have_tag = 0;
2452
2453 #ifdef DEVICE_POLLING
2454 if (ifp->if_capenable & IFCAP_POLLING) {
2455 if (sc->rxcycles <= 0)
2456 break;
2457 sc->rxcycles--;
2458 }
2459 #endif
2460
2461 cur_rx =
2462 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2463
2464 rxidx = cur_rx->bge_idx;
2465 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2466
2467 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2468 have_tag = 1;
2469 vlan_tag = cur_rx->bge_vlan_tag;
2470 }
2471
2472 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2473 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2474 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2475 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2476 BUS_DMASYNC_POSTREAD);
2477 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2478 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2479 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2480 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2481 jumbocnt++;
2482 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2483 ifp->if_ierrors++;
2484 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2485 continue;
2486 }
2487 if (bge_newbuf_jumbo(sc,
2488 sc->bge_jumbo, NULL) == ENOBUFS) {
2489 ifp->if_ierrors++;
2490 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2491 continue;
2492 }
2493 } else {
2494 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2495 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2496 sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2497 BUS_DMASYNC_POSTREAD);
2498 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2499 sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2500 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2501 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2502 stdcnt++;
2503 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2504 ifp->if_ierrors++;
2505 bge_newbuf_std(sc, sc->bge_std, m);
2506 continue;
2507 }
2508 if (bge_newbuf_std(sc, sc->bge_std,
2509 NULL) == ENOBUFS) {
2510 ifp->if_ierrors++;
2511 bge_newbuf_std(sc, sc->bge_std, m);
2512 continue;
2513 }
2514 }
2515
2516 ifp->if_ipackets++;
2517 #ifndef __NO_STRICT_ALIGNMENT
2518 /*
2519 * For architectures with strict alignment we must make sure
2520 * the payload is aligned.
2521 */
2522 if (sc->bge_rx_alignment_bug) {
2523 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2524 cur_rx->bge_len);
2525 m->m_data += ETHER_ALIGN;
2526 }
2527 #endif
2528 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2529 m->m_pkthdr.rcvif = ifp;
2530
2531 if (ifp->if_capenable & IFCAP_RXCSUM) {
2532 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2533 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2534 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2535 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2536 }
2537 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2538 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2539 m->m_pkthdr.csum_data =
2540 cur_rx->bge_tcp_udp_csum;
2541 m->m_pkthdr.csum_flags |=
2542 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2543 }
2544 }
2545
2546 /*
2547 * If we received a packet with a vlan tag,
2548 * attach that information to the packet.
2549 */
2550 if (have_tag) {
2551 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
2552 if (m == NULL)
2553 continue;
2554 }
2555
2556 BGE_UNLOCK(sc);
2557 (*ifp->if_input)(ifp, m);
2558 BGE_LOCK(sc);
2559 }
2560
2561 if (stdcnt > 0)
2562 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2563 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2564
2565 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
2566 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2567 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
2568
2569 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2570 if (stdcnt)
2571 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2572 if (jumbocnt)
2573 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2574 }
2575
2576 static void
2577 bge_txeof(struct bge_softc *sc)
2578 {
2579 struct bge_tx_bd *cur_tx = NULL;
2580 struct ifnet *ifp;
2581
2582 BGE_LOCK_ASSERT(sc);
2583
2584 /* Nothing to do. */
2585 if (sc->bge_tx_saved_considx ==
2586 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2587 return;
2588
2589 ifp = sc->bge_ifp;
2590
2591 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2592 sc->bge_cdata.bge_tx_ring_map,
2593 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2594 /*
2595 * Go through our tx ring and free mbufs for those
2596 * frames that have been sent.
2597 */
2598 while (sc->bge_tx_saved_considx !=
2599 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2600 uint32_t idx = 0;
2601
2602 idx = sc->bge_tx_saved_considx;
2603 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2604 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2605 ifp->if_opackets++;
2606 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2607 bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2608 sc->bge_cdata.bge_tx_dmamap[idx],
2609 BUS_DMASYNC_POSTWRITE);
2610 bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2611 sc->bge_cdata.bge_tx_dmamap[idx]);
2612 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2613 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2614 }
2615 sc->bge_txcnt--;
2616 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2617 ifp->if_timer = 0;
2618 }
2619
2620 if (cur_tx != NULL)
2621 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2622 }
2623
2624 #ifdef DEVICE_POLLING
2625 static void
2626 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2627 {
2628 struct bge_softc *sc = ifp->if_softc;
2629 uint32_t statusword;
2630
2631 BGE_LOCK(sc);
2632 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2633 BGE_UNLOCK(sc);
2634 return;
2635 }
2636
2637 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2638 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2639
2640 statusword = atomic_readandclear_32(
2641 &sc->bge_ldata.bge_status_block->bge_status);
2642
2643 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2644 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2645
2646 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
2647 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2648 sc->bge_link_evt++;
2649
2650 if (cmd == POLL_AND_CHECK_STATUS)
2651 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2652 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2653 sc->bge_link_evt || sc->bge_tbi)
2654 bge_link_upd(sc);
2655
2656 sc->rxcycles = count;
2657 bge_rxeof(sc);
2658 bge_txeof(sc);
2659 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2660 bge_start_locked(ifp);
2661
2662 BGE_UNLOCK(sc);
2663 }
2664 #endif /* DEVICE_POLLING */
2665
2666 static void
2667 bge_intr(void *xsc)
2668 {
2669 struct bge_softc *sc;
2670 struct ifnet *ifp;
2671 uint32_t statusword;
2672
2673 sc = xsc;
2674
2675 BGE_LOCK(sc);
2676
2677 ifp = sc->bge_ifp;
2678
2679 #ifdef DEVICE_POLLING
2680 if (ifp->if_capenable & IFCAP_POLLING) {
2681 BGE_UNLOCK(sc);
2682 return;
2683 }
2684 #endif
2685
2686 /*
2687 * Do the mandatory PCI flush as well as get the link status.
2688 */
2689 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
2690
2691 /* Ack interrupt and stop others from occuring. */
2692 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2693
2694 /* Make sure the descriptor ring indexes are coherent. */
2695 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2696 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2697 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2698 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2699
2700 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2701 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2702 statusword || sc->bge_link_evt)
2703 bge_link_upd(sc);
2704
2705 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2706 /* Check RX return ring producer/consumer. */
2707 bge_rxeof(sc);
2708
2709 /* Check TX ring producer/consumer. */
2710 bge_txeof(sc);
2711 }
2712
2713 /* Re-enable interrupts. */
2714 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2715
2716 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2717 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2718 bge_start_locked(ifp);
2719
2720 BGE_UNLOCK(sc);
2721 }
2722
2723 static void
2724 bge_tick_locked(struct bge_softc *sc)
2725 {
2726 struct mii_data *mii = NULL;
2727
2728 BGE_LOCK_ASSERT(sc);
2729
2730 if (BGE_IS_5705_OR_BEYOND(sc))
2731 bge_stats_update_regs(sc);
2732 else
2733 bge_stats_update(sc);
2734
2735 if (!sc->bge_tbi) {
2736 mii = device_get_softc(sc->bge_miibus);
2737 mii_tick(mii);
2738 } else {
2739 /*
2740 * Since in TBI mode auto-polling can't be used we should poll
2741 * link status manually. Here we register pending link event
2742 * and trigger interrupt.
2743 */
2744 #ifdef DEVICE_POLLING
2745 /* In polling mode we poll link state in bge_poll(). */
2746 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
2747 #endif
2748 {
2749 sc->bge_link_evt++;
2750 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2751 }
2752 }
2753
2754 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2755 }
2756
2757 static void
2758 bge_tick(void *xsc)
2759 {
2760 struct bge_softc *sc;
2761
2762 sc = xsc;
2763
2764 BGE_LOCK(sc);
2765 bge_tick_locked(sc);
2766 BGE_UNLOCK(sc);
2767 }
2768
2769 static void
2770 bge_stats_update_regs(struct bge_softc *sc)
2771 {
2772 struct bge_mac_stats_regs stats;
2773 struct ifnet *ifp;
2774 uint32_t *s;
2775 u_long cnt; /* current register value */
2776 int i;
2777
2778 ifp = sc->bge_ifp;
2779
2780 s = (uint32_t *)&stats;
2781 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2782 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2783 s++;
2784 }
2785
2786 cnt = stats.dot3StatsSingleCollisionFrames +
2787 stats.dot3StatsMultipleCollisionFrames +
2788 stats.dot3StatsExcessiveCollisions +
2789 stats.dot3StatsLateCollisions;
2790 ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2791 cnt - sc->bge_tx_collisions : cnt;
2792 sc->bge_tx_collisions = cnt;
2793 }
2794
2795 static void
2796 bge_stats_update(struct bge_softc *sc)
2797 {
2798 struct ifnet *ifp;
2799 bus_size_t stats;
2800 u_long cnt; /* current register value */
2801
2802 ifp = sc->bge_ifp;
2803
2804 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2805
2806 #define READ_STAT(sc, stats, stat) \
2807 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2808
2809 cnt = READ_STAT(sc, stats,
2810 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
2811 cnt += READ_STAT(sc, stats,
2812 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
2813 cnt += READ_STAT(sc, stats,
2814 txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
2815 cnt += READ_STAT(sc, stats,
2816 txstats.dot3StatsLateCollisions.bge_addr_lo);
2817 ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2818 cnt - sc->bge_tx_collisions : cnt;
2819 sc->bge_tx_collisions = cnt;
2820
2821 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2822 ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
2823 cnt - sc->bge_rx_discards : cnt;
2824 sc->bge_rx_discards = cnt;
2825
2826 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2827 ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
2828 cnt - sc->bge_tx_discards : cnt;
2829 sc->bge_tx_discards = cnt;
2830
2831 #undef READ_STAT
2832 }
2833
2834 /*
2835 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2836 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2837 * but when such padded frames employ the bge IP/TCP checksum offload,
2838 * the hardware checksum assist gives incorrect results (possibly
2839 * from incorporating its own padding into the UDP/TCP checksum; who knows).
2840 * If we pad such runts with zeros, the onboard checksum comes out correct.
2841 */
2842 static __inline int
2843 bge_cksum_pad(struct mbuf *m)
2844 {
2845 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
2846 struct mbuf *last;
2847
2848 /* If there's only the packet-header and we can pad there, use it. */
2849 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
2850 M_TRAILINGSPACE(m) >= padlen) {
2851 last = m;
2852 } else {
2853 /*
2854 * Walk packet chain to find last mbuf. We will either
2855 * pad there, or append a new mbuf and pad it.
2856 */
2857 for (last = m; last->m_next != NULL; last = last->m_next);
2858 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
2859 /* Allocate new empty mbuf, pad it. Compact later. */
2860 struct mbuf *n;
2861
2862 MGET(n, M_DONTWAIT, MT_DATA);
2863 if (n == NULL)
2864 return (ENOBUFS);
2865 n->m_len = 0;
2866 last->m_next = n;
2867 last = n;
2868 }
2869 }
2870
2871 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
2872 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
2873 last->m_len += padlen;
2874 m->m_pkthdr.len += padlen;
2875
2876 return (0);
2877 }
2878
2879 /*
2880 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2881 * pointers to descriptors.
2882 */
2883 static int
2884 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2885 {
2886 bus_dma_segment_t segs[BGE_NSEG_NEW];
2887 bus_dmamap_t map;
2888 struct bge_tx_bd *d = NULL;
2889 struct m_tag *mtag;
2890 uint32_t idx = *txidx;
2891 uint16_t csum_flags = 0;
2892 int nsegs, i, error;
2893
2894 if (m_head->m_pkthdr.csum_flags) {
2895 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2896 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2897 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
2898 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2899 if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
2900 bge_cksum_pad(m_head) != 0)
2901 return (ENOBUFS);
2902 }
2903 if (m_head->m_flags & M_LASTFRAG)
2904 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2905 else if (m_head->m_flags & M_FRAG)
2906 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2907 }
2908
2909 mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
2910
2911 map = sc->bge_cdata.bge_tx_dmamap[idx];
2912 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map,
2913 m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2914 if (error) {
2915 if (error == EFBIG) {
2916 struct mbuf *m0;
2917
2918 m0 = m_defrag(m_head, M_DONTWAIT);
2919 if (m0 == NULL)
2920 return (ENOBUFS);
2921 m_head = m0;
2922 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag,
2923 map, m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2924 }
2925 if (error)
2926 return (error);
2927 }
2928
2929 /*
2930 * Sanity check: avoid coming within 16 descriptors
2931 * of the end of the ring.
2932 */
2933 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
2934 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
2935 return (ENOBUFS);
2936 }
2937
2938 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
2939
2940 for (i = 0; ; i++) {
2941 d = &sc->bge_ldata.bge_tx_ring[idx];
2942 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2943 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2944 d->bge_len = segs[i].ds_len;
2945 d->bge_flags = csum_flags;
2946 if (i == nsegs - 1)
2947 break;
2948 BGE_INC(idx, BGE_TX_RING_CNT);
2949 }
2950
2951 /* Mark the last segment as end of packet... */
2952 d->bge_flags |= BGE_TXBDFLAG_END;
2953 /* ... and put VLAN tag into first segment. */
2954 d = &sc->bge_ldata.bge_tx_ring[*txidx];
2955 if (mtag != NULL) {
2956 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2957 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
2958 } else
2959 d->bge_vlan_tag = 0;
2960
2961 /*
2962 * Insure that the map for this transmission
2963 * is placed at the array index of the last descriptor
2964 * in this chain.
2965 */
2966 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2967 sc->bge_cdata.bge_tx_dmamap[idx] = map;
2968 sc->bge_cdata.bge_tx_chain[idx] = m_head;
2969 sc->bge_txcnt += nsegs;
2970
2971 BGE_INC(idx, BGE_TX_RING_CNT);
2972 *txidx = idx;
2973
2974 return (0);
2975 }
2976
2977 /*
2978 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2979 * to the mbuf data regions directly in the transmit descriptors.
2980 */
2981 static void
2982 bge_start_locked(struct ifnet *ifp)
2983 {
2984 struct bge_softc *sc;
2985 struct mbuf *m_head = NULL;
2986 uint32_t prodidx;
2987 int count = 0;
2988
2989 sc = ifp->if_softc;
2990
2991 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2992 return;
2993
2994 prodidx = sc->bge_tx_prodidx;
2995
2996 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2997 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2998 if (m_head == NULL)
2999 break;
3000
3001 /*
3002 * XXX
3003 * The code inside the if() block is never reached since we
3004 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3005 * requests to checksum TCP/UDP in a fragmented packet.
3006 *
3007 * XXX
3008 * safety overkill. If this is a fragmented packet chain
3009 * with delayed TCP/UDP checksums, then only encapsulate
3010 * it if we have enough descriptors to handle the entire
3011 * chain at once.
3012 * (paranoia -- may not actually be needed)
3013 */
3014 if (m_head->m_flags & M_FIRSTFRAG &&
3015 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3016 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3017 m_head->m_pkthdr.csum_data + 16) {
3018 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3019 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3020 break;
3021 }
3022 }
3023
3024 /*
3025 * Pack the data into the transmit ring. If we
3026 * don't have room, set the OACTIVE flag and wait
3027 * for the NIC to drain the ring.
3028 */
3029 if (bge_encap(sc, m_head, &prodidx)) {
3030 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3031 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3032 break;
3033 }
3034 ++count;
3035
3036 /*
3037 * If there's a BPF listener, bounce a copy of this frame
3038 * to him.
3039 */
3040 BPF_MTAP(ifp, m_head);
3041 }
3042
3043 if (count == 0)
3044 /* No packets were dequeued. */
3045 return;
3046
3047 /* Transmit. */
3048 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3049 /* 5700 b2 errata */
3050 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3051 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3052
3053 sc->bge_tx_prodidx = prodidx;
3054
3055 /*
3056 * Set a timeout in case the chip goes out to lunch.
3057 */
3058 ifp->if_timer = 5;
3059 }
3060
3061 /*
3062 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3063 * to the mbuf data regions directly in the transmit descriptors.
3064 */
3065 static void
3066 bge_start(struct ifnet *ifp)
3067 {
3068 struct bge_softc *sc;
3069
3070 sc = ifp->if_softc;
3071 BGE_LOCK(sc);
3072 bge_start_locked(ifp);
3073 BGE_UNLOCK(sc);
3074 }
3075
3076 static void
3077 bge_init_locked(struct bge_softc *sc)
3078 {
3079 struct ifnet *ifp;
3080 uint16_t *m;
3081
3082 BGE_LOCK_ASSERT(sc);
3083
3084 ifp = sc->bge_ifp;
3085
3086 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3087 return;
3088
3089 /* Cancel pending I/O and flush buffers. */
3090 bge_stop(sc);
3091 bge_reset(sc);
3092 bge_chipinit(sc);
3093
3094 /*
3095 * Init the various state machines, ring
3096 * control blocks and firmware.
3097 */
3098 if (bge_blockinit(sc)) {
3099 device_printf(sc->bge_dev, "initialization failure\n");
3100 return;
3101 }
3102
3103 ifp = sc->bge_ifp;
3104
3105 /* Specify MTU. */
3106 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3107 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3108
3109 /* Load our MAC address. */
3110 m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3111 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3112 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3113
3114 /* Enable or disable promiscuous mode as needed. */
3115 if (ifp->if_flags & IFF_PROMISC) {
3116 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3117 } else {
3118 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3119 }
3120
3121 /* Program multicast filter. */
3122 bge_setmulti(sc);
3123
3124 /* Init RX ring. */
3125 bge_init_rx_ring_std(sc);
3126
3127 /*
3128 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3129 * memory to insure that the chip has in fact read the first
3130 * entry of the ring.
3131 */
3132 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3133 uint32_t v, i;
3134 for (i = 0; i < 10; i++) {
3135 DELAY(20);
3136 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3137 if (v == (MCLBYTES - ETHER_ALIGN))
3138 break;
3139 }
3140 if (i == 10)
3141 device_printf (sc->bge_dev,
3142 "5705 A0 chip failed to load RX ring\n");
3143 }
3144
3145 /* Init jumbo RX ring. */
3146 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3147 bge_init_rx_ring_jumbo(sc);
3148
3149 /* Init our RX return ring index. */
3150 sc->bge_rx_saved_considx = 0;
3151
3152 /* Init TX ring. */
3153 bge_init_tx_ring(sc);
3154
3155 /* Turn on transmitter. */
3156 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3157
3158 /* Turn on receiver. */
3159 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3160
3161 /* Tell firmware we're alive. */
3162 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3163
3164 #ifdef DEVICE_POLLING
3165 /* Disable interrupts if we are polling. */
3166 if (ifp->if_capenable & IFCAP_POLLING) {
3167 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3168 BGE_PCIMISCCTL_MASK_PCI_INTR);
3169 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3170 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3171 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3172 } else
3173 #endif
3174
3175 /* Enable host interrupts. */
3176 {
3177 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3178 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3179 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3180 }
3181
3182 bge_ifmedia_upd_locked(ifp);
3183
3184 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3185 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3186
3187 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3188 }
3189
3190 static void
3191 bge_init(void *xsc)
3192 {
3193 struct bge_softc *sc = xsc;
3194
3195 BGE_LOCK(sc);
3196 bge_init_locked(sc);
3197 BGE_UNLOCK(sc);
3198 }
3199
3200 /*
3201 * Set media options.
3202 */
3203 static int
3204 bge_ifmedia_upd(struct ifnet *ifp)
3205 {
3206 struct bge_softc *sc = ifp->if_softc;
3207 int res;
3208
3209 BGE_LOCK(sc);
3210 res = bge_ifmedia_upd_locked(ifp);
3211 BGE_UNLOCK(sc);
3212
3213 return (res);
3214 }
3215
3216 static int
3217 bge_ifmedia_upd_locked(struct ifnet *ifp)
3218 {
3219 struct bge_softc *sc = ifp->if_softc;
3220 struct mii_data *mii;
3221 struct ifmedia *ifm;
3222
3223 BGE_LOCK_ASSERT(sc);
3224
3225 ifm = &sc->bge_ifmedia;
3226
3227 /* If this is a 1000baseX NIC, enable the TBI port. */
3228 if (sc->bge_tbi) {
3229 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3230 return (EINVAL);
3231 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3232 case IFM_AUTO:
3233 /*
3234 * The BCM5704 ASIC appears to have a special
3235 * mechanism for programming the autoneg
3236 * advertisement registers in TBI mode.
3237 */
3238 if (bge_fake_autoneg == 0 &&
3239 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3240 uint32_t sgdig;
3241 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3242 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3243 sgdig |= BGE_SGDIGCFG_AUTO|
3244 BGE_SGDIGCFG_PAUSE_CAP|
3245 BGE_SGDIGCFG_ASYM_PAUSE;
3246 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3247 sgdig|BGE_SGDIGCFG_SEND);
3248 DELAY(5);
3249 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3250 }
3251 break;
3252 case IFM_1000_SX:
3253 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3254 BGE_CLRBIT(sc, BGE_MAC_MODE,
3255 BGE_MACMODE_HALF_DUPLEX);
3256 } else {
3257 BGE_SETBIT(sc, BGE_MAC_MODE,
3258 BGE_MACMODE_HALF_DUPLEX);
3259 }
3260 break;
3261 default:
3262 return (EINVAL);
3263 }
3264 return (0);
3265 }
3266
3267 sc->bge_link_evt++;
3268 mii = device_get_softc(sc->bge_miibus);
3269 if (mii->mii_instance) {
3270 struct mii_softc *miisc;
3271 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3272 miisc = LIST_NEXT(miisc, mii_list))
3273 mii_phy_reset(miisc);
3274 }
3275 mii_mediachg(mii);
3276
3277 return (0);
3278 }
3279
3280 /*
3281 * Report current media status.
3282 */
3283 static void
3284 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3285 {
3286 struct bge_softc *sc = ifp->if_softc;
3287 struct mii_data *mii;
3288
3289 BGE_LOCK(sc);
3290
3291 if (sc->bge_tbi) {
3292 ifmr->ifm_status = IFM_AVALID;
3293 ifmr->ifm_active = IFM_ETHER;
3294 if (CSR_READ_4(sc, BGE_MAC_STS) &
3295 BGE_MACSTAT_TBI_PCS_SYNCHED)
3296 ifmr->ifm_status |= IFM_ACTIVE;
3297 else {
3298 ifmr->ifm_active |= IFM_NONE;
3299 BGE_UNLOCK(sc);
3300 return;
3301 }
3302 ifmr->ifm_active |= IFM_1000_SX;
3303 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3304 ifmr->ifm_active |= IFM_HDX;
3305 else
3306 ifmr->ifm_active |= IFM_FDX;
3307 BGE_UNLOCK(sc);
3308 return;
3309 }
3310
3311 mii = device_get_softc(sc->bge_miibus);
3312 mii_pollstat(mii);
3313 ifmr->ifm_active = mii->mii_media_active;
3314 ifmr->ifm_status = mii->mii_media_status;
3315
3316 BGE_UNLOCK(sc);
3317 }
3318
3319 static int
3320 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3321 {
3322 struct bge_softc *sc = ifp->if_softc;
3323 struct ifreq *ifr = (struct ifreq *) data;
3324 struct mii_data *mii;
3325 int mask, error = 0;
3326
3327 switch (command) {
3328 case SIOCSIFMTU:
3329 if (ifr->ifr_mtu < ETHERMIN ||
3330 ((BGE_IS_JUMBO_CAPABLE(sc)) &&
3331 ifr->ifr_mtu > BGE_JUMBO_MTU) ||
3332 ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
3333 ifr->ifr_mtu > ETHERMTU))
3334 error = EINVAL;
3335 else if (ifp->if_mtu != ifr->ifr_mtu) {
3336 ifp->if_mtu = ifr->ifr_mtu;
3337 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3338 bge_init(sc);
3339 }
3340 break;
3341 case SIOCSIFFLAGS:
3342 BGE_LOCK(sc);
3343 if (ifp->if_flags & IFF_UP) {
3344 /*
3345 * If only the state of the PROMISC flag changed,
3346 * then just use the 'set promisc mode' command
3347 * instead of reinitializing the entire NIC. Doing
3348 * a full re-init means reloading the firmware and
3349 * waiting for it to start up, which may take a
3350 * second or two. Similarly for ALLMULTI.
3351 */
3352 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3353 ifp->if_flags & IFF_PROMISC &&
3354 !(sc->bge_if_flags & IFF_PROMISC)) {
3355 BGE_SETBIT(sc, BGE_RX_MODE,
3356 BGE_RXMODE_RX_PROMISC);
3357 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3358 !(ifp->if_flags & IFF_PROMISC) &&
3359 sc->bge_if_flags & IFF_PROMISC) {
3360 BGE_CLRBIT(sc, BGE_RX_MODE,
3361 BGE_RXMODE_RX_PROMISC);
3362 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3363 (ifp->if_flags ^ sc->bge_if_flags) & IFF_ALLMULTI) {
3364 bge_setmulti(sc);
3365 } else
3366 bge_init_locked(sc);
3367 } else {
3368 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3369 bge_stop(sc);
3370 }
3371 }
3372 sc->bge_if_flags = ifp->if_flags;
3373 BGE_UNLOCK(sc);
3374 error = 0;
3375 break;
3376 case SIOCADDMULTI:
3377 case SIOCDELMULTI:
3378 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3379 BGE_LOCK(sc);
3380 bge_setmulti(sc);
3381 BGE_UNLOCK(sc);
3382 error = 0;
3383 }
3384 break;
3385 case SIOCSIFMEDIA:
3386 case SIOCGIFMEDIA:
3387 if (sc->bge_tbi) {
3388 error = ifmedia_ioctl(ifp, ifr,
3389 &sc->bge_ifmedia, command);
3390 } else {
3391 mii = device_get_softc(sc->bge_miibus);
3392 error = ifmedia_ioctl(ifp, ifr,
3393 &mii->mii_media, command);
3394 }
3395 break;
3396 case SIOCSIFCAP:
3397 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3398 #ifdef DEVICE_POLLING
3399 if (mask & IFCAP_POLLING) {
3400 if (ifr->ifr_reqcap & IFCAP_POLLING) {
3401 error = ether_poll_register(bge_poll, ifp);
3402 if (error)
3403 return (error);
3404 BGE_LOCK(sc);
3405 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3406 BGE_PCIMISCCTL_MASK_PCI_INTR);
3407 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3408 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3409 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3410 ifp->if_capenable |= IFCAP_POLLING;
3411 BGE_UNLOCK(sc);
3412 } else {
3413 error = ether_poll_deregister(ifp);
3414 /* Enable interrupt even in error case */
3415 BGE_LOCK(sc);
3416 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3417 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3418 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3419 BGE_PCIMISCCTL_MASK_PCI_INTR);
3420 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3421 ifp->if_capenable &= ~IFCAP_POLLING;
3422 BGE_UNLOCK(sc);
3423 }
3424 }
3425 #endif
3426 if (mask & IFCAP_HWCSUM) {
3427 ifp->if_capenable ^= IFCAP_HWCSUM;
3428 if (IFCAP_HWCSUM & ifp->if_capenable &&
3429 IFCAP_HWCSUM & ifp->if_capabilities)
3430 ifp->if_hwassist = BGE_CSUM_FEATURES;
3431 else
3432 ifp->if_hwassist = 0;
3433 }
3434 break;
3435 default:
3436 error = ether_ioctl(ifp, command, data);
3437 break;
3438 }
3439
3440 return (error);
3441 }
3442
3443 static void
3444 bge_watchdog(struct ifnet *ifp)
3445 {
3446 struct bge_softc *sc;
3447
3448 sc = ifp->if_softc;
3449
3450 if_printf(ifp, "watchdog timeout -- resetting\n");
3451
3452 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3453 bge_init(sc);
3454
3455 ifp->if_oerrors++;
3456 }
3457
3458 /*
3459 * Stop the adapter and free any mbufs allocated to the
3460 * RX and TX lists.
3461 */
3462 static void
3463 bge_stop(struct bge_softc *sc)
3464 {
3465 struct ifnet *ifp;
3466 struct ifmedia_entry *ifm;
3467 struct mii_data *mii = NULL;
3468 int mtmp, itmp;
3469
3470 BGE_LOCK_ASSERT(sc);
3471
3472 ifp = sc->bge_ifp;
3473
3474 if (!sc->bge_tbi)
3475 mii = device_get_softc(sc->bge_miibus);
3476
3477 callout_stop(&sc->bge_stat_ch);
3478
3479 /*
3480 * Disable all of the receiver blocks.
3481 */
3482 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3483 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3484 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3485 if (!(BGE_IS_5705_OR_BEYOND(sc)))
3486 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3487 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3488 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3489 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3490
3491 /*
3492 * Disable all of the transmit blocks.
3493 */
3494 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3495 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3496 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3497 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3498 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3499 if (!(BGE_IS_5705_OR_BEYOND(sc)))
3500 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3501 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3502
3503 /*
3504 * Shut down all of the memory managers and related
3505 * state machines.
3506 */
3507 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3508 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3509 if (!(BGE_IS_5705_OR_BEYOND(sc)))
3510 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3511 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3512 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3513 if (!(BGE_IS_5705_OR_BEYOND(sc))) {
3514 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3515 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3516 }
3517
3518 /* Disable host interrupts. */
3519 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3520 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3521
3522 /*
3523 * Tell firmware we're shutting down.
3524 */
3525 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3526
3527 /* Free the RX lists. */
3528 bge_free_rx_ring_std(sc);
3529
3530 /* Free jumbo RX list. */
3531 if (BGE_IS_JUMBO_CAPABLE(sc))
3532 bge_free_rx_ring_jumbo(sc);
3533
3534 /* Free TX buffers. */
3535 bge_free_tx_ring(sc);
3536
3537 /*
3538 * Isolate/power down the PHY, but leave the media selection
3539 * unchanged so that things will be put back to normal when
3540 * we bring the interface back up.
3541 */
3542 if (!sc->bge_tbi) {
3543 itmp = ifp->if_flags;
3544 ifp->if_flags |= IFF_UP;
3545 /*
3546 * If we are called from bge_detach(), mii is already NULL.
3547 */
3548 if (mii != NULL) {
3549 ifm = mii->mii_media.ifm_cur;
3550 mtmp = ifm->ifm_media;
3551 ifm->ifm_media = IFM_ETHER|IFM_NONE;
3552 mii_mediachg(mii);
3553 ifm->ifm_media = mtmp;
3554 }
3555 ifp->if_flags = itmp;
3556 }
3557
3558 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3559
3560 /*
3561 * We can't just call bge_link_upd() cause chip is almost stopped so
3562 * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may
3563 * lead to hardware deadlock. So we just clearing MAC's link state
3564 * (PHY may still have link UP).
3565 */
3566 if (bootverbose && sc->bge_link)
3567 if_printf(sc->bge_ifp, "link DOWN\n");
3568 sc->bge_link = 0;
3569
3570 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3571 }
3572
3573 /*
3574 * Stop all chip I/O so that the kernel's probe routines don't
3575 * get confused by errant DMAs when rebooting.
3576 */
3577 static void
3578 bge_shutdown(device_t dev)
3579 {
3580 struct bge_softc *sc;
3581
3582 sc = device_get_softc(dev);
3583
3584 BGE_LOCK(sc);
3585 bge_stop(sc);
3586 bge_reset(sc);
3587 BGE_UNLOCK(sc);
3588 }
3589
3590 static int
3591 bge_suspend(device_t dev)
3592 {
3593 struct bge_softc *sc;
3594
3595 sc = device_get_softc(dev);
3596 BGE_LOCK(sc);
3597 bge_stop(sc);
3598 BGE_UNLOCK(sc);
3599
3600 return (0);
3601 }
3602
3603 static int
3604 bge_resume(device_t dev)
3605 {
3606 struct bge_softc *sc;
3607 struct ifnet *ifp;
3608
3609 sc = device_get_softc(dev);
3610 BGE_LOCK(sc);
3611 ifp = sc->bge_ifp;
3612 if (ifp->if_flags & IFF_UP) {
3613 bge_init_locked(sc);
3614 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3615 bge_start_locked(ifp);
3616 }
3617 BGE_UNLOCK(sc);
3618
3619 return (0);
3620 }
3621
3622 static void
3623 bge_link_upd(struct bge_softc *sc)
3624 {
3625 struct mii_data *mii;
3626 uint32_t link, status;
3627
3628 BGE_LOCK_ASSERT(sc);
3629
3630 /* Clear 'pending link event' flag. */
3631 sc->bge_link_evt = 0;
3632
3633 /*
3634 * Process link state changes.
3635 * Grrr. The link status word in the status block does
3636 * not work correctly on the BCM5700 rev AX and BX chips,
3637 * according to all available information. Hence, we have
3638 * to enable MII interrupts in order to properly obtain
3639 * async link changes. Unfortunately, this also means that
3640 * we have to read the MAC status register to detect link
3641 * changes, thereby adding an additional register access to
3642 * the interrupt handler.
3643 *
3644 * XXX: perhaps link state detection procedure used for
3645 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3646 */
3647
3648 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3649 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
3650 status = CSR_READ_4(sc, BGE_MAC_STS);
3651 if (status & BGE_MACSTAT_MI_INTERRUPT) {
3652 callout_stop(&sc->bge_stat_ch);
3653 bge_tick_locked(sc);
3654
3655 mii = device_get_softc(sc->bge_miibus);
3656 if (!sc->bge_link &&
3657 mii->mii_media_status & IFM_ACTIVE &&
3658 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3659 sc->bge_link++;
3660 if (bootverbose)
3661 if_printf(sc->bge_ifp, "link UP\n");
3662 } else if (sc->bge_link &&
3663 (!(mii->mii_media_status & IFM_ACTIVE) ||
3664 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3665 sc->bge_link = 0;
3666 if (bootverbose)
3667 if_printf(sc->bge_ifp, "link DOWN\n");
3668 }
3669
3670 /* Clear the interrupt. */
3671 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3672 BGE_EVTENB_MI_INTERRUPT);
3673 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3674 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3675 BRGPHY_INTRS);
3676 }
3677 return;
3678 }
3679
3680 if (sc->bge_tbi) {
3681 status = CSR_READ_4(sc, BGE_MAC_STS);
3682 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3683 if (!sc->bge_link) {
3684 sc->bge_link++;
3685 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3686 BGE_CLRBIT(sc, BGE_MAC_MODE,
3687 BGE_MACMODE_TBI_SEND_CFGS);
3688 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3689 if (bootverbose)
3690 if_printf(sc->bge_ifp, "link UP\n");
3691 if_link_state_change(sc->bge_ifp,
3692 LINK_STATE_UP);
3693 }
3694 } else if (sc->bge_link) {
3695 sc->bge_link = 0;
3696 if (bootverbose)
3697 if_printf(sc->bge_ifp, "link DOWN\n");
3698 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
3699 }
3700 /* Discard link events for MII/GMII cards if MI auto-polling disabled */
3701 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3702 /*
3703 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3704 * in status word always set. Workaround this bug by reading
3705 * PHY link status directly.
3706 */
3707 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3708
3709 if (link != sc->bge_link ||
3710 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3711 callout_stop(&sc->bge_stat_ch);
3712 bge_tick_locked(sc);
3713
3714 mii = device_get_softc(sc->bge_miibus);
3715 if (!sc->bge_link &&
3716 mii->mii_media_status & IFM_ACTIVE &&
3717 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3718 sc->bge_link++;
3719 if (bootverbose)
3720 if_printf(sc->bge_ifp, "link UP\n");
3721 } else if (sc->bge_link &&
3722 (!(mii->mii_media_status & IFM_ACTIVE) ||
3723 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3724 sc->bge_link = 0;
3725 if (bootverbose)
3726 if_printf(sc->bge_ifp, "link DOWN\n");
3727 }
3728 }
3729 }
3730
3731 /* Clear the attention. */
3732 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3733 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3734 BGE_MACSTAT_LINK_CHANGED);
3735 }
Cache object: a532b126cbae73358e92a2a12ca083f3
|