FreeBSD/Linux Kernel Cross Reference
sys/pci/if_dc.c
1 /*-
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 /*
37 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
38 * series chips and several workalikes including the following:
39 *
40 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
41 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
42 * Lite-On 82c168/82c169 PNIC (www.litecom.com)
43 * ASIX Electronics AX88140A (www.asix.com.tw)
44 * ASIX Electronics AX88141 (www.asix.com.tw)
45 * ADMtek AL981 (www.admtek.com.tw)
46 * ADMtek AN985 (www.admtek.com.tw)
47 * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek AN985
48 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49 * Accton EN1217 (www.accton.com)
50 * Xircom X3201 (www.xircom.com)
51 * Abocom FE2500
52 * Conexant LANfinity (www.conexant.com)
53 * 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com)
54 *
55 * Datasheets for the 21143 are available at developer.intel.com.
56 * Datasheets for the clone parts can be found at their respective sites.
57 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
58 * The PNIC II is essentially a Macronix 98715A chip; the only difference
59 * worth noting is that its multicast hash table is only 128 bits wide
60 * instead of 512.
61 *
62 * Written by Bill Paul <wpaul@ee.columbia.edu>
63 * Electrical Engineering Department
64 * Columbia University, New York City
65 */
66 /*
67 * The Intel 21143 is the successor to the DEC 21140. It is basically
68 * the same as the 21140 but with a few new features. The 21143 supports
69 * three kinds of media attachments:
70 *
71 * o MII port, for 10Mbps and 100Mbps support and NWAY
72 * autonegotiation provided by an external PHY.
73 * o SYM port, for symbol mode 100Mbps support.
74 * o 10baseT port.
75 * o AUI/BNC port.
76 *
77 * The 100Mbps SYM port and 10baseT port can be used together in
78 * combination with the internal NWAY support to create a 10/100
79 * autosensing configuration.
80 *
81 * Note that not all tulip workalikes are handled in this driver: we only
82 * deal with those which are relatively well behaved. The Winbond is
83 * handled separately due to its different register offsets and the
84 * special handling needed for its various bugs. The PNIC is handled
85 * here, but I'm not thrilled about it.
86 *
87 * All of the workalike chips use some form of MII transceiver support
88 * with the exception of the Macronix chips, which also have a SYM port.
89 * The ASIX AX88140A is also documented to have a SYM port, but all
90 * the cards I've seen use an MII transceiver, probably because the
91 * AX88140A doesn't support internal NWAY.
92 */
93
94 #ifdef HAVE_KERNEL_OPTION_HEADERS
95 #include "opt_device_polling.h"
96 #endif
97
98 #include <sys/param.h>
99 #include <sys/endian.h>
100 #include <sys/systm.h>
101 #include <sys/sockio.h>
102 #include <sys/mbuf.h>
103 #include <sys/malloc.h>
104 #include <sys/kernel.h>
105 #include <sys/module.h>
106 #include <sys/socket.h>
107 #include <sys/sysctl.h>
108
109 #include <net/if.h>
110 #include <net/if_arp.h>
111 #include <net/ethernet.h>
112 #include <net/if_dl.h>
113 #include <net/if_media.h>
114 #include <net/if_types.h>
115 #include <net/if_vlan_var.h>
116
117 #include <net/bpf.h>
118
119 #include <machine/bus.h>
120 #include <machine/resource.h>
121 #include <sys/bus.h>
122 #include <sys/rman.h>
123
124 #include <dev/mii/mii.h>
125 #include <dev/mii/miivar.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129
130 #define DC_USEIOSPACE
131 #ifdef __alpha__
132 #define SRM_MEDIA
133 #endif
134
135 #include <pci/if_dcreg.h>
136
137 #ifdef __sparc64__
138 #include <dev/ofw/openfirm.h>
139 #include <machine/ofw_machdep.h>
140 #endif
141
142 MODULE_DEPEND(dc, pci, 1, 1, 1);
143 MODULE_DEPEND(dc, ether, 1, 1, 1);
144 MODULE_DEPEND(dc, miibus, 1, 1, 1);
145
146 /*
147 * "device miibus" is required in kernel config. See GENERIC if you get
148 * errors here.
149 */
150 #include "miibus_if.h"
151
152 /*
153 * Various supported device vendors/types and their names.
154 */
155 static const struct dc_type dc_devs[] = {
156 { DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143), 0,
157 "Intel 21143 10/100BaseTX" },
158 { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009), 0,
159 "Davicom DM9009 10/100BaseTX" },
160 { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100), 0,
161 "Davicom DM9100 10/100BaseTX" },
162 { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), DC_REVISION_DM9102A,
163 "Davicom DM9102A 10/100BaseTX" },
164 { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), 0,
165 "Davicom DM9102 10/100BaseTX" },
166 { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981), 0,
167 "ADMtek AL981 10/100BaseTX" },
168 { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985), 0,
169 "ADMtek AN985 10/100BaseTX" },
170 { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511), 0,
171 "ADMtek ADM9511 10/100BaseTX" },
172 { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513), 0,
173 "ADMtek ADM9513 10/100BaseTX" },
174 { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_FA511), 0,
175 "Netgear FA511 10/100BaseTX" },
176 { DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), DC_REVISION_88141,
177 "ASIX AX88141 10/100BaseTX" },
178 { DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), 0,
179 "ASIX AX88140A 10/100BaseTX" },
180 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), DC_REVISION_98713A,
181 "Macronix 98713A 10/100BaseTX" },
182 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), 0,
183 "Macronix 98713 10/100BaseTX" },
184 { DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), DC_REVISION_98713A,
185 "Compex RL100-TX 10/100BaseTX" },
186 { DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), 0,
187 "Compex RL100-TX 10/100BaseTX" },
188 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98725,
189 "Macronix 98725 10/100BaseTX" },
190 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98715AEC_C,
191 "Macronix 98715AEC-C 10/100BaseTX" },
192 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), 0,
193 "Macronix 98715/98715A 10/100BaseTX" },
194 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727), 0,
195 "Macronix 98727/98732 10/100BaseTX" },
196 { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115), 0,
197 "LC82C115 PNIC II 10/100BaseTX" },
198 { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), DC_REVISION_82C169,
199 "82c169 PNIC 10/100BaseTX" },
200 { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), 0,
201 "82c168 PNIC 10/100BaseTX" },
202 { DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217), 0,
203 "Accton EN1217 10/100BaseTX" },
204 { DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242), 0,
205 "Accton EN2242 MiniPCI 10/100BaseTX" },
206 { DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201), 0,
207 "Xircom X3201 10/100BaseTX" },
208 { DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD), 0,
209 "Neteasy DRP-32TXD Cardbus 10/100" },
210 { DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500), 0,
211 "Abocom FE2500 10/100BaseTX" },
212 { DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX), 0,
213 "Abocom FE2500MX 10/100BaseTX" },
214 { DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112), 0,
215 "Conexant LANfinity MiniPCI 10/100BaseTX" },
216 { DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX), 0,
217 "Hawking CB102 CardBus 10/100" },
218 { DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T), 0,
219 "PlaneX FNW-3602-T CardBus 10/100" },
220 { DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB), 0,
221 "3Com OfficeConnect 10/100B" },
222 { DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120), 0,
223 "Microsoft MN-120 CardBus 10/100" },
224 { DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130), 0,
225 "Microsoft MN-130 10/100" },
226 { DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08), 0,
227 "Linksys PCMPC200 CardBus 10/100" },
228 { DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09), 0,
229 "Linksys PCMPC200 CardBus 10/100" },
230 { 0, 0, NULL }
231 };
232
233 static int dc_probe(device_t);
234 static int dc_attach(device_t);
235 static int dc_detach(device_t);
236 static int dc_suspend(device_t);
237 static int dc_resume(device_t);
238 static const struct dc_type *dc_devtype(device_t);
239 static int dc_newbuf(struct dc_softc *, int, int);
240 static int dc_encap(struct dc_softc *, struct mbuf **);
241 static void dc_pnic_rx_bug_war(struct dc_softc *, int);
242 static int dc_rx_resync(struct dc_softc *);
243 static void dc_rxeof(struct dc_softc *);
244 static void dc_txeof(struct dc_softc *);
245 static void dc_tick(void *);
246 static void dc_tx_underrun(struct dc_softc *);
247 static void dc_intr(void *);
248 static void dc_start(struct ifnet *);
249 static void dc_start_locked(struct ifnet *);
250 static int dc_ioctl(struct ifnet *, u_long, caddr_t);
251 static void dc_init(void *);
252 static void dc_init_locked(struct dc_softc *);
253 static void dc_stop(struct dc_softc *);
254 static void dc_watchdog(void *);
255 static int dc_shutdown(device_t);
256 static int dc_ifmedia_upd(struct ifnet *);
257 static void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
258
259 static void dc_delay(struct dc_softc *);
260 static void dc_eeprom_idle(struct dc_softc *);
261 static void dc_eeprom_putbyte(struct dc_softc *, int);
262 static void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
263 static void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
264 static void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *);
265 static void dc_eeprom_width(struct dc_softc *);
266 static void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
267
268 static void dc_mii_writebit(struct dc_softc *, int);
269 static int dc_mii_readbit(struct dc_softc *);
270 static void dc_mii_sync(struct dc_softc *);
271 static void dc_mii_send(struct dc_softc *, u_int32_t, int);
272 static int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
273 static int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
274 static int dc_miibus_readreg(device_t, int, int);
275 static int dc_miibus_writereg(device_t, int, int, int);
276 static void dc_miibus_statchg(device_t);
277 static void dc_miibus_mediainit(device_t);
278
279 static void dc_setcfg(struct dc_softc *, int);
280 static uint32_t dc_mchash_le(struct dc_softc *, const uint8_t *);
281 static uint32_t dc_mchash_be(const uint8_t *);
282 static void dc_setfilt_21143(struct dc_softc *);
283 static void dc_setfilt_asix(struct dc_softc *);
284 static void dc_setfilt_admtek(struct dc_softc *);
285 static void dc_setfilt_xircom(struct dc_softc *);
286
287 static void dc_setfilt(struct dc_softc *);
288
289 static void dc_reset(struct dc_softc *);
290 static int dc_list_rx_init(struct dc_softc *);
291 static int dc_list_tx_init(struct dc_softc *);
292
293 static void dc_read_srom(struct dc_softc *, int);
294 static void dc_parse_21143_srom(struct dc_softc *);
295 static void dc_decode_leaf_sia(struct dc_softc *, struct dc_eblock_sia *);
296 static void dc_decode_leaf_mii(struct dc_softc *, struct dc_eblock_mii *);
297 static void dc_decode_leaf_sym(struct dc_softc *, struct dc_eblock_sym *);
298 static void dc_apply_fixup(struct dc_softc *, int);
299
300 #ifdef DC_USEIOSPACE
301 #define DC_RES SYS_RES_IOPORT
302 #define DC_RID DC_PCI_CFBIO
303 #else
304 #define DC_RES SYS_RES_MEMORY
305 #define DC_RID DC_PCI_CFBMA
306 #endif
307
308 static device_method_t dc_methods[] = {
309 /* Device interface */
310 DEVMETHOD(device_probe, dc_probe),
311 DEVMETHOD(device_attach, dc_attach),
312 DEVMETHOD(device_detach, dc_detach),
313 DEVMETHOD(device_suspend, dc_suspend),
314 DEVMETHOD(device_resume, dc_resume),
315 DEVMETHOD(device_shutdown, dc_shutdown),
316
317 /* bus interface */
318 DEVMETHOD(bus_print_child, bus_generic_print_child),
319 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
320
321 /* MII interface */
322 DEVMETHOD(miibus_readreg, dc_miibus_readreg),
323 DEVMETHOD(miibus_writereg, dc_miibus_writereg),
324 DEVMETHOD(miibus_statchg, dc_miibus_statchg),
325 DEVMETHOD(miibus_mediainit, dc_miibus_mediainit),
326
327 { 0, 0 }
328 };
329
330 static driver_t dc_driver = {
331 "dc",
332 dc_methods,
333 sizeof(struct dc_softc)
334 };
335
336 static devclass_t dc_devclass;
337 #ifdef __NO_STRICT_ALIGNMENT
338 static int dc_quick = 1;
339 SYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, &dc_quick, 0,
340 "do not m_devget() in dc driver");
341 #endif
342
343 DRIVER_MODULE(dc, cardbus, dc_driver, dc_devclass, 0, 0);
344 DRIVER_MODULE(dc, pci, dc_driver, dc_devclass, 0, 0);
345 DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0);
346
347 #define DC_SETBIT(sc, reg, x) \
348 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
349
350 #define DC_CLRBIT(sc, reg, x) \
351 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
352
353 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x))
354 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x))
355
356 static void
357 dc_delay(struct dc_softc *sc)
358 {
359 int idx;
360
361 for (idx = (300 / 33) + 1; idx > 0; idx--)
362 CSR_READ_4(sc, DC_BUSCTL);
363 }
364
365 static void
366 dc_eeprom_width(struct dc_softc *sc)
367 {
368 int i;
369
370 /* Force EEPROM to idle state. */
371 dc_eeprom_idle(sc);
372
373 /* Enter EEPROM access mode. */
374 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
375 dc_delay(sc);
376 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
377 dc_delay(sc);
378 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
379 dc_delay(sc);
380 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
381 dc_delay(sc);
382
383 for (i = 3; i--;) {
384 if (6 & (1 << i))
385 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
386 else
387 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
388 dc_delay(sc);
389 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
390 dc_delay(sc);
391 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
392 dc_delay(sc);
393 }
394
395 for (i = 1; i <= 12; i++) {
396 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
397 dc_delay(sc);
398 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
399 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
400 dc_delay(sc);
401 break;
402 }
403 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
404 dc_delay(sc);
405 }
406
407 /* Turn off EEPROM access mode. */
408 dc_eeprom_idle(sc);
409
410 if (i < 4 || i > 12)
411 sc->dc_romwidth = 6;
412 else
413 sc->dc_romwidth = i;
414
415 /* Enter EEPROM access mode. */
416 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
417 dc_delay(sc);
418 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
419 dc_delay(sc);
420 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
421 dc_delay(sc);
422 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
423 dc_delay(sc);
424
425 /* Turn off EEPROM access mode. */
426 dc_eeprom_idle(sc);
427 }
428
429 static void
430 dc_eeprom_idle(struct dc_softc *sc)
431 {
432 int i;
433
434 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
435 dc_delay(sc);
436 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
437 dc_delay(sc);
438 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
439 dc_delay(sc);
440 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
441 dc_delay(sc);
442
443 for (i = 0; i < 25; i++) {
444 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
445 dc_delay(sc);
446 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
447 dc_delay(sc);
448 }
449
450 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
451 dc_delay(sc);
452 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
453 dc_delay(sc);
454 CSR_WRITE_4(sc, DC_SIO, 0x00000000);
455 }
456
457 /*
458 * Send a read command and address to the EEPROM, check for ACK.
459 */
460 static void
461 dc_eeprom_putbyte(struct dc_softc *sc, int addr)
462 {
463 int d, i;
464
465 d = DC_EECMD_READ >> 6;
466 for (i = 3; i--; ) {
467 if (d & (1 << i))
468 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
469 else
470 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
471 dc_delay(sc);
472 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
473 dc_delay(sc);
474 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
475 dc_delay(sc);
476 }
477
478 /*
479 * Feed in each bit and strobe the clock.
480 */
481 for (i = sc->dc_romwidth; i--;) {
482 if (addr & (1 << i)) {
483 SIO_SET(DC_SIO_EE_DATAIN);
484 } else {
485 SIO_CLR(DC_SIO_EE_DATAIN);
486 }
487 dc_delay(sc);
488 SIO_SET(DC_SIO_EE_CLK);
489 dc_delay(sc);
490 SIO_CLR(DC_SIO_EE_CLK);
491 dc_delay(sc);
492 }
493 }
494
495 /*
496 * Read a word of data stored in the EEPROM at address 'addr.'
497 * The PNIC 82c168/82c169 has its own non-standard way to read
498 * the EEPROM.
499 */
500 static void
501 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest)
502 {
503 int i;
504 u_int32_t r;
505
506 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr);
507
508 for (i = 0; i < DC_TIMEOUT; i++) {
509 DELAY(1);
510 r = CSR_READ_4(sc, DC_SIO);
511 if (!(r & DC_PN_SIOCTL_BUSY)) {
512 *dest = (u_int16_t)(r & 0xFFFF);
513 return;
514 }
515 }
516 }
517
518 /*
519 * Read a word of data stored in the EEPROM at address 'addr.'
520 * The Xircom X3201 has its own non-standard way to read
521 * the EEPROM, too.
522 */
523 static void
524 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest)
525 {
526
527 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
528
529 addr *= 2;
530 CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
531 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
532 addr += 1;
533 CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
534 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
535
536 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
537 }
538
539 /*
540 * Read a word of data stored in the EEPROM at address 'addr.'
541 */
542 static void
543 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest)
544 {
545 int i;
546 u_int16_t word = 0;
547
548 /* Force EEPROM to idle state. */
549 dc_eeprom_idle(sc);
550
551 /* Enter EEPROM access mode. */
552 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
553 dc_delay(sc);
554 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
555 dc_delay(sc);
556 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
557 dc_delay(sc);
558 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
559 dc_delay(sc);
560
561 /*
562 * Send address of word we want to read.
563 */
564 dc_eeprom_putbyte(sc, addr);
565
566 /*
567 * Start reading bits from EEPROM.
568 */
569 for (i = 0x8000; i; i >>= 1) {
570 SIO_SET(DC_SIO_EE_CLK);
571 dc_delay(sc);
572 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
573 word |= i;
574 dc_delay(sc);
575 SIO_CLR(DC_SIO_EE_CLK);
576 dc_delay(sc);
577 }
578
579 /* Turn off EEPROM access mode. */
580 dc_eeprom_idle(sc);
581
582 *dest = word;
583 }
584
585 /*
586 * Read a sequence of words from the EEPROM.
587 */
588 static void
589 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int be)
590 {
591 int i;
592 u_int16_t word = 0, *ptr;
593
594 for (i = 0; i < cnt; i++) {
595 if (DC_IS_PNIC(sc))
596 dc_eeprom_getword_pnic(sc, off + i, &word);
597 else if (DC_IS_XIRCOM(sc))
598 dc_eeprom_getword_xircom(sc, off + i, &word);
599 else
600 dc_eeprom_getword(sc, off + i, &word);
601 ptr = (u_int16_t *)(dest + (i * 2));
602 if (be)
603 *ptr = be16toh(word);
604 else
605 *ptr = le16toh(word);
606 }
607 }
608
609 /*
610 * The following two routines are taken from the Macronix 98713
611 * Application Notes pp.19-21.
612 */
613 /*
614 * Write a bit to the MII bus.
615 */
616 static void
617 dc_mii_writebit(struct dc_softc *sc, int bit)
618 {
619 uint32_t reg;
620
621 reg = DC_SIO_ROMCTL_WRITE | (bit != 0 ? DC_SIO_MII_DATAOUT : 0);
622 CSR_WRITE_4(sc, DC_SIO, reg);
623 CSR_BARRIER_4(sc, DC_SIO,
624 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
625 DELAY(1);
626
627 CSR_WRITE_4(sc, DC_SIO, reg | DC_SIO_MII_CLK);
628 CSR_BARRIER_4(sc, DC_SIO,
629 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
630 DELAY(1);
631 CSR_WRITE_4(sc, DC_SIO, reg);
632 CSR_BARRIER_4(sc, DC_SIO,
633 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
634 DELAY(1);
635 }
636
637 /*
638 * Read a bit from the MII bus.
639 */
640 static int
641 dc_mii_readbit(struct dc_softc *sc)
642 {
643 uint32_t reg;
644
645 reg = DC_SIO_ROMCTL_READ | DC_SIO_MII_DIR;
646 CSR_WRITE_4(sc, DC_SIO, reg);
647 CSR_BARRIER_4(sc, DC_SIO,
648 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
649 DELAY(1);
650 (void)CSR_READ_4(sc, DC_SIO);
651 CSR_WRITE_4(sc, DC_SIO, reg | DC_SIO_MII_CLK);
652 CSR_BARRIER_4(sc, DC_SIO,
653 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
654 DELAY(1);
655 CSR_WRITE_4(sc, DC_SIO, reg);
656 CSR_BARRIER_4(sc, DC_SIO,
657 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
658 DELAY(1);
659 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
660 return (1);
661
662 return (0);
663 }
664
665 /*
666 * Sync the PHYs by setting data bit and strobing the clock 32 times.
667 */
668 static void
669 dc_mii_sync(struct dc_softc *sc)
670 {
671 int i;
672
673 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
674 CSR_BARRIER_4(sc, DC_SIO,
675 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
676 DELAY(1);
677
678 for (i = 0; i < 32; i++)
679 dc_mii_writebit(sc, 1);
680 }
681
682 /*
683 * Clock a series of bits through the MII.
684 */
685 static void
686 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt)
687 {
688 int i;
689
690 for (i = (0x1 << (cnt - 1)); i; i >>= 1)
691 dc_mii_writebit(sc, bits & i);
692 }
693
694 /*
695 * Read an PHY register through the MII.
696 */
697 static int
698 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame)
699 {
700 int i;
701
702 /*
703 * Set up frame for RX.
704 */
705 frame->mii_stdelim = DC_MII_STARTDELIM;
706 frame->mii_opcode = DC_MII_READOP;
707
708 /*
709 * Sync the PHYs.
710 */
711 dc_mii_sync(sc);
712
713 /*
714 * Send command/address info.
715 */
716 dc_mii_send(sc, frame->mii_stdelim, 2);
717 dc_mii_send(sc, frame->mii_opcode, 2);
718 dc_mii_send(sc, frame->mii_phyaddr, 5);
719 dc_mii_send(sc, frame->mii_regaddr, 5);
720
721 /*
722 * Now try reading data bits. If the turnaround failed, we still
723 * need to clock through 16 cycles to keep the PHY(s) in sync.
724 */
725 frame->mii_turnaround = dc_mii_readbit(sc);
726 if (frame->mii_turnaround != 0) {
727 for (i = 0; i < 16; i++)
728 dc_mii_readbit(sc);
729 goto fail;
730 }
731 for (i = 0x8000; i; i >>= 1) {
732 if (dc_mii_readbit(sc))
733 frame->mii_data |= i;
734 }
735
736 fail:
737
738 /* Clock the idle bits. */
739 dc_mii_writebit(sc, 0);
740 dc_mii_writebit(sc, 0);
741
742 if (frame->mii_turnaround != 0)
743 return (1);
744 return (0);
745 }
746
747 /*
748 * Write to a PHY register through the MII.
749 */
750 static int
751 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame)
752 {
753
754 /*
755 * Set up frame for TX.
756 */
757 frame->mii_stdelim = DC_MII_STARTDELIM;
758 frame->mii_opcode = DC_MII_WRITEOP;
759 frame->mii_turnaround = DC_MII_TURNAROUND;
760
761 /*
762 * Sync the PHYs.
763 */
764 dc_mii_sync(sc);
765
766 dc_mii_send(sc, frame->mii_stdelim, 2);
767 dc_mii_send(sc, frame->mii_opcode, 2);
768 dc_mii_send(sc, frame->mii_phyaddr, 5);
769 dc_mii_send(sc, frame->mii_regaddr, 5);
770 dc_mii_send(sc, frame->mii_turnaround, 2);
771 dc_mii_send(sc, frame->mii_data, 16);
772
773 /* Clock the idle bits. */
774 dc_mii_writebit(sc, 0);
775 dc_mii_writebit(sc, 0);
776
777 return (0);
778 }
779
780 static int
781 dc_miibus_readreg(device_t dev, int phy, int reg)
782 {
783 struct dc_mii_frame frame;
784 struct dc_softc *sc;
785 int i, rval, phy_reg = 0;
786
787 sc = device_get_softc(dev);
788 bzero(&frame, sizeof(frame));
789
790 /*
791 * Note: both the AL981 and AN985 have internal PHYs,
792 * however the AL981 provides direct access to the PHY
793 * registers while the AN985 uses a serial MII interface.
794 * The AN985's MII interface is also buggy in that you
795 * can read from any MII address (0 to 31), but only address 1
796 * behaves normally. To deal with both cases, we pretend
797 * that the PHY is at MII address 1.
798 */
799 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
800 return (0);
801
802 /*
803 * Note: the ukphy probes of the RS7112 report a PHY at
804 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
805 * so we only respond to correct one.
806 */
807 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
808 return (0);
809
810 if (sc->dc_pmode != DC_PMODE_MII) {
811 if (phy == (MII_NPHY - 1)) {
812 switch (reg) {
813 case MII_BMSR:
814 /*
815 * Fake something to make the probe
816 * code think there's a PHY here.
817 */
818 return (BMSR_MEDIAMASK);
819 break;
820 case MII_PHYIDR1:
821 if (DC_IS_PNIC(sc))
822 return (DC_VENDORID_LO);
823 return (DC_VENDORID_DEC);
824 break;
825 case MII_PHYIDR2:
826 if (DC_IS_PNIC(sc))
827 return (DC_DEVICEID_82C168);
828 return (DC_DEVICEID_21143);
829 break;
830 default:
831 return (0);
832 break;
833 }
834 } else
835 return (0);
836 }
837
838 if (DC_IS_PNIC(sc)) {
839 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
840 (phy << 23) | (reg << 18));
841 for (i = 0; i < DC_TIMEOUT; i++) {
842 DELAY(1);
843 rval = CSR_READ_4(sc, DC_PN_MII);
844 if (!(rval & DC_PN_MII_BUSY)) {
845 rval &= 0xFFFF;
846 return (rval == 0xFFFF ? 0 : rval);
847 }
848 }
849 return (0);
850 }
851
852 if (DC_IS_COMET(sc)) {
853 switch (reg) {
854 case MII_BMCR:
855 phy_reg = DC_AL_BMCR;
856 break;
857 case MII_BMSR:
858 phy_reg = DC_AL_BMSR;
859 break;
860 case MII_PHYIDR1:
861 phy_reg = DC_AL_VENID;
862 break;
863 case MII_PHYIDR2:
864 phy_reg = DC_AL_DEVID;
865 break;
866 case MII_ANAR:
867 phy_reg = DC_AL_ANAR;
868 break;
869 case MII_ANLPAR:
870 phy_reg = DC_AL_LPAR;
871 break;
872 case MII_ANER:
873 phy_reg = DC_AL_ANER;
874 break;
875 default:
876 device_printf(dev, "phy_read: bad phy register %x\n",
877 reg);
878 return (0);
879 break;
880 }
881
882 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
883
884 if (rval == 0xFFFF)
885 return (0);
886 return (rval);
887 }
888
889 frame.mii_phyaddr = phy;
890 frame.mii_regaddr = reg;
891 if (sc->dc_type == DC_TYPE_98713) {
892 phy_reg = CSR_READ_4(sc, DC_NETCFG);
893 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
894 }
895 dc_mii_readreg(sc, &frame);
896 if (sc->dc_type == DC_TYPE_98713)
897 CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
898
899 return (frame.mii_data);
900 }
901
902 static int
903 dc_miibus_writereg(device_t dev, int phy, int reg, int data)
904 {
905 struct dc_softc *sc;
906 struct dc_mii_frame frame;
907 int i, phy_reg = 0;
908
909 sc = device_get_softc(dev);
910 bzero(&frame, sizeof(frame));
911
912 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
913 return (0);
914
915 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
916 return (0);
917
918 if (DC_IS_PNIC(sc)) {
919 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
920 (phy << 23) | (reg << 10) | data);
921 for (i = 0; i < DC_TIMEOUT; i++) {
922 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
923 break;
924 }
925 return (0);
926 }
927
928 if (DC_IS_COMET(sc)) {
929 switch (reg) {
930 case MII_BMCR:
931 phy_reg = DC_AL_BMCR;
932 break;
933 case MII_BMSR:
934 phy_reg = DC_AL_BMSR;
935 break;
936 case MII_PHYIDR1:
937 phy_reg = DC_AL_VENID;
938 break;
939 case MII_PHYIDR2:
940 phy_reg = DC_AL_DEVID;
941 break;
942 case MII_ANAR:
943 phy_reg = DC_AL_ANAR;
944 break;
945 case MII_ANLPAR:
946 phy_reg = DC_AL_LPAR;
947 break;
948 case MII_ANER:
949 phy_reg = DC_AL_ANER;
950 break;
951 default:
952 device_printf(dev, "phy_write: bad phy register %x\n",
953 reg);
954 return (0);
955 break;
956 }
957
958 CSR_WRITE_4(sc, phy_reg, data);
959 return (0);
960 }
961
962 frame.mii_phyaddr = phy;
963 frame.mii_regaddr = reg;
964 frame.mii_data = data;
965
966 if (sc->dc_type == DC_TYPE_98713) {
967 phy_reg = CSR_READ_4(sc, DC_NETCFG);
968 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
969 }
970 dc_mii_writereg(sc, &frame);
971 if (sc->dc_type == DC_TYPE_98713)
972 CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
973
974 return (0);
975 }
976
977 static void
978 dc_miibus_statchg(device_t dev)
979 {
980 struct dc_softc *sc;
981 struct mii_data *mii;
982 struct ifmedia *ifm;
983
984 sc = device_get_softc(dev);
985 if (DC_IS_ADMTEK(sc))
986 return;
987
988 mii = device_get_softc(sc->dc_miibus);
989 ifm = &mii->mii_media;
990 if (DC_IS_DAVICOM(sc) &&
991 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
992 dc_setcfg(sc, ifm->ifm_media);
993 sc->dc_if_media = ifm->ifm_media;
994 } else {
995 dc_setcfg(sc, mii->mii_media_active);
996 sc->dc_if_media = mii->mii_media_active;
997 }
998 }
999
1000 /*
1001 * Special support for DM9102A cards with HomePNA PHYs. Note:
1002 * with the Davicom DM9102A/DM9801 eval board that I have, it seems
1003 * to be impossible to talk to the management interface of the DM9801
1004 * PHY (its MDIO pin is not connected to anything). Consequently,
1005 * the driver has to just 'know' about the additional mode and deal
1006 * with it itself. *sigh*
1007 */
1008 static void
1009 dc_miibus_mediainit(device_t dev)
1010 {
1011 struct dc_softc *sc;
1012 struct mii_data *mii;
1013 struct ifmedia *ifm;
1014 int rev;
1015
1016 rev = pci_get_revid(dev);
1017
1018 sc = device_get_softc(dev);
1019 mii = device_get_softc(sc->dc_miibus);
1020 ifm = &mii->mii_media;
1021
1022 if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A)
1023 ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL);
1024 }
1025
1026 #define DC_BITS_512 9
1027 #define DC_BITS_128 7
1028 #define DC_BITS_64 6
1029
1030 static uint32_t
1031 dc_mchash_le(struct dc_softc *sc, const uint8_t *addr)
1032 {
1033 uint32_t crc;
1034
1035 /* Compute CRC for the address value. */
1036 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
1037
1038 /*
1039 * The hash table on the PNIC II and the MX98715AEC-C/D/E
1040 * chips is only 128 bits wide.
1041 */
1042 if (sc->dc_flags & DC_128BIT_HASH)
1043 return (crc & ((1 << DC_BITS_128) - 1));
1044
1045 /* The hash table on the MX98715BEC is only 64 bits wide. */
1046 if (sc->dc_flags & DC_64BIT_HASH)
1047 return (crc & ((1 << DC_BITS_64) - 1));
1048
1049 /* Xircom's hash filtering table is different (read: weird) */
1050 /* Xircom uses the LEAST significant bits */
1051 if (DC_IS_XIRCOM(sc)) {
1052 if ((crc & 0x180) == 0x180)
1053 return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4));
1054 else
1055 return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 +
1056 (12 << 4));
1057 }
1058
1059 return (crc & ((1 << DC_BITS_512) - 1));
1060 }
1061
1062 /*
1063 * Calculate CRC of a multicast group address, return the lower 6 bits.
1064 */
1065 static uint32_t
1066 dc_mchash_be(const uint8_t *addr)
1067 {
1068 uint32_t crc;
1069
1070 /* Compute CRC for the address value. */
1071 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
1072
1073 /* Return the filter bit position. */
1074 return ((crc >> 26) & 0x0000003F);
1075 }
1076
1077 /*
1078 * 21143-style RX filter setup routine. Filter programming is done by
1079 * downloading a special setup frame into the TX engine. 21143, Macronix,
1080 * PNIC, PNIC II and Davicom chips are programmed this way.
1081 *
1082 * We always program the chip using 'hash perfect' mode, i.e. one perfect
1083 * address (our node address) and a 512-bit hash filter for multicast
1084 * frames. We also sneak the broadcast address into the hash filter since
1085 * we need that too.
1086 */
1087 static void
1088 dc_setfilt_21143(struct dc_softc *sc)
1089 {
1090 struct dc_desc *sframe;
1091 u_int32_t h, *sp;
1092 struct ifmultiaddr *ifma;
1093 struct ifnet *ifp;
1094 int i;
1095
1096 ifp = sc->dc_ifp;
1097
1098 i = sc->dc_cdata.dc_tx_prod;
1099 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1100 sc->dc_cdata.dc_tx_cnt++;
1101 sframe = &sc->dc_ldata->dc_tx_list[i];
1102 sp = sc->dc_cdata.dc_sbuf;
1103 bzero(sp, DC_SFRAME_LEN);
1104
1105 sframe->dc_data = htole32(sc->dc_saddr);
1106 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1107 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1108
1109 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
1110
1111 /* If we want promiscuous mode, set the allframes bit. */
1112 if (ifp->if_flags & IFF_PROMISC)
1113 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1114 else
1115 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1116
1117 if (ifp->if_flags & IFF_ALLMULTI)
1118 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1119 else
1120 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1121
1122 IF_ADDR_LOCK(ifp);
1123 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1124 if (ifma->ifma_addr->sa_family != AF_LINK)
1125 continue;
1126 h = dc_mchash_le(sc,
1127 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1128 sp[h >> 4] |= htole32(1 << (h & 0xF));
1129 }
1130 IF_ADDR_UNLOCK(ifp);
1131
1132 if (ifp->if_flags & IFF_BROADCAST) {
1133 h = dc_mchash_le(sc, ifp->if_broadcastaddr);
1134 sp[h >> 4] |= htole32(1 << (h & 0xF));
1135 }
1136
1137 /* Set our MAC address */
1138 sp[39] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[0]);
1139 sp[40] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[1]);
1140 sp[41] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[2]);
1141
1142 sframe->dc_status = htole32(DC_TXSTAT_OWN);
1143 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1144
1145 /*
1146 * The PNIC takes an exceedingly long time to process its
1147 * setup frame; wait 10ms after posting the setup frame
1148 * before proceeding, just so it has time to swallow its
1149 * medicine.
1150 */
1151 DELAY(10000);
1152
1153 sc->dc_wdog_timer = 5;
1154 }
1155
1156 static void
1157 dc_setfilt_admtek(struct dc_softc *sc)
1158 {
1159 uint8_t eaddr[ETHER_ADDR_LEN];
1160 struct ifnet *ifp;
1161 struct ifmultiaddr *ifma;
1162 int h = 0;
1163 u_int32_t hashes[2] = { 0, 0 };
1164
1165 ifp = sc->dc_ifp;
1166
1167 /* Init our MAC address. */
1168 bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
1169 CSR_WRITE_4(sc, DC_AL_PAR0, eaddr[3] << 24 | eaddr[2] << 16 |
1170 eaddr[1] << 8 | eaddr[0]);
1171 CSR_WRITE_4(sc, DC_AL_PAR1, eaddr[5] << 8 | eaddr[4]);
1172
1173 /* If we want promiscuous mode, set the allframes bit. */
1174 if (ifp->if_flags & IFF_PROMISC)
1175 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1176 else
1177 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1178
1179 if (ifp->if_flags & IFF_ALLMULTI)
1180 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1181 else
1182 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1183
1184 /* First, zot all the existing hash bits. */
1185 CSR_WRITE_4(sc, DC_AL_MAR0, 0);
1186 CSR_WRITE_4(sc, DC_AL_MAR1, 0);
1187
1188 /*
1189 * If we're already in promisc or allmulti mode, we
1190 * don't have to bother programming the multicast filter.
1191 */
1192 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))
1193 return;
1194
1195 /* Now program new ones. */
1196 IF_ADDR_LOCK(ifp);
1197 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1198 if (ifma->ifma_addr->sa_family != AF_LINK)
1199 continue;
1200 if (DC_IS_CENTAUR(sc))
1201 h = dc_mchash_le(sc,
1202 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1203 else
1204 h = dc_mchash_be(
1205 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1206 if (h < 32)
1207 hashes[0] |= (1 << h);
1208 else
1209 hashes[1] |= (1 << (h - 32));
1210 }
1211 IF_ADDR_UNLOCK(ifp);
1212
1213 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1214 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1215 }
1216
1217 static void
1218 dc_setfilt_asix(struct dc_softc *sc)
1219 {
1220 struct ifnet *ifp;
1221 struct ifmultiaddr *ifma;
1222 int h = 0;
1223 u_int32_t hashes[2] = { 0, 0 };
1224
1225 ifp = sc->dc_ifp;
1226
1227 /* Init our MAC address */
1228 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1229 CSR_WRITE_4(sc, DC_AX_FILTDATA,
1230 *(u_int32_t *)(&IFP2ENADDR(sc->dc_ifp)[0]));
1231 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1232 CSR_WRITE_4(sc, DC_AX_FILTDATA,
1233 *(u_int32_t *)(&IFP2ENADDR(sc->dc_ifp)[4]));
1234
1235 /* If we want promiscuous mode, set the allframes bit. */
1236 if (ifp->if_flags & IFF_PROMISC)
1237 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1238 else
1239 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1240
1241 if (ifp->if_flags & IFF_ALLMULTI)
1242 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1243 else
1244 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1245
1246 /*
1247 * The ASIX chip has a special bit to enable reception
1248 * of broadcast frames.
1249 */
1250 if (ifp->if_flags & IFF_BROADCAST)
1251 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1252 else
1253 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1254
1255 /* first, zot all the existing hash bits */
1256 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1257 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1258 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1259 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1260
1261 /*
1262 * If we're already in promisc or allmulti mode, we
1263 * don't have to bother programming the multicast filter.
1264 */
1265 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))
1266 return;
1267
1268 /* now program new ones */
1269 IF_ADDR_LOCK(ifp);
1270 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1271 if (ifma->ifma_addr->sa_family != AF_LINK)
1272 continue;
1273 h = dc_mchash_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1274 if (h < 32)
1275 hashes[0] |= (1 << h);
1276 else
1277 hashes[1] |= (1 << (h - 32));
1278 }
1279 IF_ADDR_UNLOCK(ifp);
1280
1281 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1282 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1283 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1284 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1285 }
1286
1287 static void
1288 dc_setfilt_xircom(struct dc_softc *sc)
1289 {
1290 struct ifnet *ifp;
1291 struct ifmultiaddr *ifma;
1292 struct dc_desc *sframe;
1293 u_int32_t h, *sp;
1294 int i;
1295
1296 ifp = sc->dc_ifp;
1297 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
1298
1299 i = sc->dc_cdata.dc_tx_prod;
1300 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1301 sc->dc_cdata.dc_tx_cnt++;
1302 sframe = &sc->dc_ldata->dc_tx_list[i];
1303 sp = sc->dc_cdata.dc_sbuf;
1304 bzero(sp, DC_SFRAME_LEN);
1305
1306 sframe->dc_data = htole32(sc->dc_saddr);
1307 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1308 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1309
1310 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
1311
1312 /* If we want promiscuous mode, set the allframes bit. */
1313 if (ifp->if_flags & IFF_PROMISC)
1314 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1315 else
1316 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1317
1318 if (ifp->if_flags & IFF_ALLMULTI)
1319 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1320 else
1321 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1322
1323 IF_ADDR_LOCK(ifp);
1324 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1325 if (ifma->ifma_addr->sa_family != AF_LINK)
1326 continue;
1327 h = dc_mchash_le(sc,
1328 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1329 sp[h >> 4] |= htole32(1 << (h & 0xF));
1330 }
1331 IF_ADDR_UNLOCK(ifp);
1332
1333 if (ifp->if_flags & IFF_BROADCAST) {
1334 h = dc_mchash_le(sc, ifp->if_broadcastaddr);
1335 sp[h >> 4] |= htole32(1 << (h & 0xF));
1336 }
1337
1338 /* Set our MAC address */
1339 sp[0] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[0]);
1340 sp[1] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[1]);
1341 sp[2] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[2]);
1342
1343 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1344 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1345 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1346 sframe->dc_status = htole32(DC_TXSTAT_OWN);
1347 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1348
1349 /*
1350 * Wait some time...
1351 */
1352 DELAY(1000);
1353
1354 sc->dc_wdog_timer = 5;
1355 }
1356
1357 static void
1358 dc_setfilt(struct dc_softc *sc)
1359 {
1360
1361 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1362 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1363 dc_setfilt_21143(sc);
1364
1365 if (DC_IS_ASIX(sc))
1366 dc_setfilt_asix(sc);
1367
1368 if (DC_IS_ADMTEK(sc))
1369 dc_setfilt_admtek(sc);
1370
1371 if (DC_IS_XIRCOM(sc))
1372 dc_setfilt_xircom(sc);
1373 }
1374
1375 /*
1376 * In order to fiddle with the 'full-duplex' and '100Mbps' bits in
1377 * the netconfig register, we first have to put the transmit and/or
1378 * receive logic in the idle state.
1379 */
1380 static void
1381 dc_setcfg(struct dc_softc *sc, int media)
1382 {
1383 int i, restart = 0, watchdogreg;
1384 u_int32_t isr;
1385
1386 if (IFM_SUBTYPE(media) == IFM_NONE)
1387 return;
1388
1389 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) {
1390 restart = 1;
1391 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
1392
1393 for (i = 0; i < DC_TIMEOUT; i++) {
1394 isr = CSR_READ_4(sc, DC_ISR);
1395 if (isr & DC_ISR_TX_IDLE &&
1396 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1397 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
1398 break;
1399 DELAY(10);
1400 }
1401
1402 if (i == DC_TIMEOUT) {
1403 if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc))
1404 device_printf(sc->dc_dev,
1405 "%s: failed to force tx to idle state\n",
1406 __func__);
1407 if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1408 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
1409 !DC_HAS_BROKEN_RXSTATE(sc))
1410 device_printf(sc->dc_dev,
1411 "%s: failed to force rx to idle state\n",
1412 __func__);
1413 }
1414 }
1415
1416 if (IFM_SUBTYPE(media) == IFM_100_TX) {
1417 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1418 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1419 if (sc->dc_pmode == DC_PMODE_MII) {
1420 if (DC_IS_INTEL(sc)) {
1421 /* There's a write enable bit here that reads as 1. */
1422 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1423 watchdogreg &= ~DC_WDOG_CTLWREN;
1424 watchdogreg |= DC_WDOG_JABBERDIS;
1425 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1426 } else {
1427 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1428 }
1429 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1430 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER));
1431 if (sc->dc_type == DC_TYPE_98713)
1432 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1433 DC_NETCFG_SCRAMBLER));
1434 if (!DC_IS_DAVICOM(sc))
1435 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1436 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1437 if (DC_IS_INTEL(sc))
1438 dc_apply_fixup(sc, IFM_AUTO);
1439 } else {
1440 if (DC_IS_PNIC(sc)) {
1441 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1442 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1443 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1444 }
1445 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1446 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1447 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1448 if (DC_IS_INTEL(sc))
1449 dc_apply_fixup(sc,
1450 (media & IFM_GMASK) == IFM_FDX ?
1451 IFM_100_TX | IFM_FDX : IFM_100_TX);
1452 }
1453 }
1454
1455 if (IFM_SUBTYPE(media) == IFM_10_T) {
1456 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1457 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1458 if (sc->dc_pmode == DC_PMODE_MII) {
1459 /* There's a write enable bit here that reads as 1. */
1460 if (DC_IS_INTEL(sc)) {
1461 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1462 watchdogreg &= ~DC_WDOG_CTLWREN;
1463 watchdogreg |= DC_WDOG_JABBERDIS;
1464 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1465 } else {
1466 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1467 }
1468 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1469 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER));
1470 if (sc->dc_type == DC_TYPE_98713)
1471 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1472 if (!DC_IS_DAVICOM(sc))
1473 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1474 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1475 if (DC_IS_INTEL(sc))
1476 dc_apply_fixup(sc, IFM_AUTO);
1477 } else {
1478 if (DC_IS_PNIC(sc)) {
1479 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1480 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1481 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1482 }
1483 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1484 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1485 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1486 if (DC_IS_INTEL(sc)) {
1487 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1488 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1489 if ((media & IFM_GMASK) == IFM_FDX)
1490 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1491 else
1492 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1493 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1494 DC_CLRBIT(sc, DC_10BTCTRL,
1495 DC_TCTL_AUTONEGENBL);
1496 dc_apply_fixup(sc,
1497 (media & IFM_GMASK) == IFM_FDX ?
1498 IFM_10_T | IFM_FDX : IFM_10_T);
1499 DELAY(20000);
1500 }
1501 }
1502 }
1503
1504 /*
1505 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1506 * PHY and we want HomePNA mode, set the portsel bit to turn
1507 * on the external MII port.
1508 */
1509 if (DC_IS_DAVICOM(sc)) {
1510 if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1511 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1512 sc->dc_link = 1;
1513 } else {
1514 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1515 }
1516 }
1517
1518 if ((media & IFM_GMASK) == IFM_FDX) {
1519 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1520 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1521 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1522 } else {
1523 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1524 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1525 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1526 }
1527
1528 if (restart)
1529 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON);
1530 }
1531
1532 static void
1533 dc_reset(struct dc_softc *sc)
1534 {
1535 int i;
1536
1537 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1538
1539 for (i = 0; i < DC_TIMEOUT; i++) {
1540 DELAY(10);
1541 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1542 break;
1543 }
1544
1545 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) ||
1546 DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) {
1547 DELAY(10000);
1548 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1549 i = 0;
1550 }
1551
1552 if (i == DC_TIMEOUT)
1553 device_printf(sc->dc_dev, "reset never completed!\n");
1554
1555 /* Wait a little while for the chip to get its brains in order. */
1556 DELAY(1000);
1557
1558 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1559 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1560 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1561
1562 /*
1563 * Bring the SIA out of reset. In some cases, it looks
1564 * like failing to unreset the SIA soon enough gets it
1565 * into a state where it will never come out of reset
1566 * until we reset the whole chip again.
1567 */
1568 if (DC_IS_INTEL(sc)) {
1569 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1570 CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1571 CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1572 }
1573 }
1574
1575 static const struct dc_type *
1576 dc_devtype(device_t dev)
1577 {
1578 const struct dc_type *t;
1579 u_int32_t devid;
1580 u_int8_t rev;
1581
1582 t = dc_devs;
1583 devid = pci_get_devid(dev);
1584 rev = pci_get_revid(dev);
1585
1586 while (t->dc_name != NULL) {
1587 if (devid == t->dc_devid && rev >= t->dc_minrev)
1588 return (t);
1589 t++;
1590 }
1591
1592 return (NULL);
1593 }
1594
1595 /*
1596 * Probe for a 21143 or clone chip. Check the PCI vendor and device
1597 * IDs against our list and return a device name if we find a match.
1598 * We do a little bit of extra work to identify the exact type of
1599 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID,
1600 * but different revision IDs. The same is true for 98715/98715A
1601 * chips and the 98725, as well as the ASIX and ADMtek chips. In some
1602 * cases, the exact chip revision affects driver behavior.
1603 */
1604 static int
1605 dc_probe(device_t dev)
1606 {
1607 const struct dc_type *t;
1608
1609 t = dc_devtype(dev);
1610
1611 if (t != NULL) {
1612 device_set_desc(dev, t->dc_name);
1613 return (BUS_PROBE_DEFAULT);
1614 }
1615
1616 return (ENXIO);
1617 }
1618
1619 static void
1620 dc_apply_fixup(struct dc_softc *sc, int media)
1621 {
1622 struct dc_mediainfo *m;
1623 u_int8_t *p;
1624 int i;
1625 u_int32_t reg;
1626
1627 m = sc->dc_mi;
1628
1629 while (m != NULL) {
1630 if (m->dc_media == media)
1631 break;
1632 m = m->dc_next;
1633 }
1634
1635 if (m == NULL)
1636 return;
1637
1638 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1639 reg = (p[0] | (p[1] << 8)) << 16;
1640 CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1641 }
1642
1643 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1644 reg = (p[0] | (p[1] << 8)) << 16;
1645 CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1646 }
1647 }
1648
1649 static void
1650 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
1651 {
1652 struct dc_mediainfo *m;
1653
1654 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1655 switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
1656 case DC_SIA_CODE_10BT:
1657 m->dc_media = IFM_10_T;
1658 break;
1659 case DC_SIA_CODE_10BT_FDX:
1660 m->dc_media = IFM_10_T | IFM_FDX;
1661 break;
1662 case DC_SIA_CODE_10B2:
1663 m->dc_media = IFM_10_2;
1664 break;
1665 case DC_SIA_CODE_10B5:
1666 m->dc_media = IFM_10_5;
1667 break;
1668 default:
1669 break;
1670 }
1671
1672 /*
1673 * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1674 * Things apparently already work for cards that do
1675 * supply Media Specific Data.
1676 */
1677 if (l->dc_sia_code & DC_SIA_CODE_EXT) {
1678 m->dc_gp_len = 2;
1679 m->dc_gp_ptr =
1680 (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1681 } else {
1682 m->dc_gp_len = 2;
1683 m->dc_gp_ptr =
1684 (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1685 }
1686
1687 m->dc_next = sc->dc_mi;
1688 sc->dc_mi = m;
1689
1690 sc->dc_pmode = DC_PMODE_SIA;
1691 }
1692
1693 static void
1694 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
1695 {
1696 struct dc_mediainfo *m;
1697
1698 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1699 if (l->dc_sym_code == DC_SYM_CODE_100BT)
1700 m->dc_media = IFM_100_TX;
1701
1702 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1703 m->dc_media = IFM_100_TX | IFM_FDX;
1704
1705 m->dc_gp_len = 2;
1706 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1707
1708 m->dc_next = sc->dc_mi;
1709 sc->dc_mi = m;
1710
1711 sc->dc_pmode = DC_PMODE_SYM;
1712 }
1713
1714 static void
1715 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
1716 {
1717 struct dc_mediainfo *m;
1718 u_int8_t *p;
1719
1720 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1721 /* We abuse IFM_AUTO to represent MII. */
1722 m->dc_media = IFM_AUTO;
1723 m->dc_gp_len = l->dc_gpr_len;
1724
1725 p = (u_int8_t *)l;
1726 p += sizeof(struct dc_eblock_mii);
1727 m->dc_gp_ptr = p;
1728 p += 2 * l->dc_gpr_len;
1729 m->dc_reset_len = *p;
1730 p++;
1731 m->dc_reset_ptr = p;
1732
1733 m->dc_next = sc->dc_mi;
1734 sc->dc_mi = m;
1735 }
1736
1737 static void
1738 dc_read_srom(struct dc_softc *sc, int bits)
1739 {
1740 int size;
1741
1742 size = 2 << bits;
1743 sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT);
1744 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0);
1745 }
1746
1747 static void
1748 dc_parse_21143_srom(struct dc_softc *sc)
1749 {
1750 struct dc_leaf_hdr *lhdr;
1751 struct dc_eblock_hdr *hdr;
1752 int have_mii, i, loff;
1753 char *ptr;
1754
1755 have_mii = 0;
1756 loff = sc->dc_srom[27];
1757 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1758
1759 ptr = (char *)lhdr;
1760 ptr += sizeof(struct dc_leaf_hdr) - 1;
1761 /*
1762 * Look if we got a MII media block.
1763 */
1764 for (i = 0; i < lhdr->dc_mcnt; i++) {
1765 hdr = (struct dc_eblock_hdr *)ptr;
1766 if (hdr->dc_type == DC_EBLOCK_MII)
1767 have_mii++;
1768
1769 ptr += (hdr->dc_len & 0x7F);
1770 ptr++;
1771 }
1772
1773 /*
1774 * Do the same thing again. Only use SIA and SYM media
1775 * blocks if no MII media block is available.
1776 */
1777 ptr = (char *)lhdr;
1778 ptr += sizeof(struct dc_leaf_hdr) - 1;
1779 for (i = 0; i < lhdr->dc_mcnt; i++) {
1780 hdr = (struct dc_eblock_hdr *)ptr;
1781 switch (hdr->dc_type) {
1782 case DC_EBLOCK_MII:
1783 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1784 break;
1785 case DC_EBLOCK_SIA:
1786 if (! have_mii)
1787 dc_decode_leaf_sia(sc,
1788 (struct dc_eblock_sia *)hdr);
1789 break;
1790 case DC_EBLOCK_SYM:
1791 if (! have_mii)
1792 dc_decode_leaf_sym(sc,
1793 (struct dc_eblock_sym *)hdr);
1794 break;
1795 default:
1796 /* Don't care. Yet. */
1797 break;
1798 }
1799 ptr += (hdr->dc_len & 0x7F);
1800 ptr++;
1801 }
1802 }
1803
1804 static void
1805 dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1806 {
1807 u_int32_t *paddr;
1808
1809 KASSERT(nseg == 1,
1810 ("%s: wrong number of segments (%d)", __func__, nseg));
1811 paddr = arg;
1812 *paddr = segs->ds_addr;
1813 }
1814
1815 /*
1816 * Attach the interface. Allocate softc structures, do ifmedia
1817 * setup and ethernet/BPF attach.
1818 */
1819 static int
1820 dc_attach(device_t dev)
1821 {
1822 int tmp = 0;
1823 u_char eaddr[ETHER_ADDR_LEN];
1824 u_int32_t command;
1825 struct dc_softc *sc;
1826 struct ifnet *ifp;
1827 u_int32_t reg, revision;
1828 int error = 0, rid, mac_offset;
1829 int i;
1830 u_int8_t *mac;
1831
1832 sc = device_get_softc(dev);
1833 sc->dc_dev = dev;
1834
1835 mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1836 MTX_DEF);
1837
1838 /*
1839 * Map control/status registers.
1840 */
1841 pci_enable_busmaster(dev);
1842
1843 rid = DC_RID;
1844 sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE);
1845
1846 if (sc->dc_res == NULL) {
1847 device_printf(dev, "couldn't map ports/memory\n");
1848 error = ENXIO;
1849 goto fail;
1850 }
1851
1852 sc->dc_btag = rman_get_bustag(sc->dc_res);
1853 sc->dc_bhandle = rman_get_bushandle(sc->dc_res);
1854
1855 /* Allocate interrupt. */
1856 rid = 0;
1857 sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1858 RF_SHAREABLE | RF_ACTIVE);
1859
1860 if (sc->dc_irq == NULL) {
1861 device_printf(dev, "couldn't map interrupt\n");
1862 error = ENXIO;
1863 goto fail;
1864 }
1865
1866 /* Need this info to decide on a chip type. */
1867 sc->dc_info = dc_devtype(dev);
1868 revision = pci_get_revid(dev);
1869
1870 /* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */
1871 if (sc->dc_info->dc_devid !=
1872 DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168) &&
1873 sc->dc_info->dc_devid !=
1874 DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201))
1875 dc_eeprom_width(sc);
1876
1877 switch (sc->dc_info->dc_devid) {
1878 case DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143):
1879 sc->dc_type = DC_TYPE_21143;
1880 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1881 sc->dc_flags |= DC_REDUCED_MII_POLL;
1882 /* Save EEPROM contents so we can parse them later. */
1883 dc_read_srom(sc, sc->dc_romwidth);
1884 break;
1885 case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009):
1886 case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100):
1887 case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102):
1888 sc->dc_type = DC_TYPE_DM9102;
1889 sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS;
1890 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD;
1891 sc->dc_flags |= DC_TX_ALIGN;
1892 sc->dc_pmode = DC_PMODE_MII;
1893
1894 /* Increase the latency timer value. */
1895 pci_write_config(dev, PCIR_LATTIMER, 0x80, 1);
1896 break;
1897 case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981):
1898 sc->dc_type = DC_TYPE_AL981;
1899 sc->dc_flags |= DC_TX_USE_TX_INTR;
1900 sc->dc_flags |= DC_TX_ADMTEK_WAR;
1901 sc->dc_pmode = DC_PMODE_MII;
1902 dc_read_srom(sc, sc->dc_romwidth);
1903 break;
1904 case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985):
1905 case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511):
1906 case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513):
1907 case DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD):
1908 case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_FA511):
1909 case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500):
1910 case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX):
1911 case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242):
1912 case DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX):
1913 case DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T):
1914 case DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB):
1915 case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120):
1916 case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130):
1917 case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08):
1918 case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09):
1919 sc->dc_type = DC_TYPE_AN985;
1920 sc->dc_flags |= DC_64BIT_HASH;
1921 sc->dc_flags |= DC_TX_USE_TX_INTR;
1922 sc->dc_flags |= DC_TX_ADMTEK_WAR;
1923 sc->dc_pmode = DC_PMODE_MII;
1924 /* Don't read SROM for - auto-loaded on reset */
1925 break;
1926 case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713):
1927 case DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP):
1928 if (revision < DC_REVISION_98713A) {
1929 sc->dc_type = DC_TYPE_98713;
1930 }
1931 if (revision >= DC_REVISION_98713A) {
1932 sc->dc_type = DC_TYPE_98713A;
1933 sc->dc_flags |= DC_21143_NWAY;
1934 }
1935 sc->dc_flags |= DC_REDUCED_MII_POLL;
1936 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1937 break;
1938 case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5):
1939 case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217):
1940 /*
1941 * Macronix MX98715AEC-C/D/E parts have only a
1942 * 128-bit hash table. We need to deal with these
1943 * in the same manner as the PNIC II so that we
1944 * get the right number of bits out of the
1945 * CRC routine.
1946 */
1947 if (revision >= DC_REVISION_98715AEC_C &&
1948 revision < DC_REVISION_98725)
1949 sc->dc_flags |= DC_128BIT_HASH;
1950 sc->dc_type = DC_TYPE_987x5;
1951 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1952 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
1953 break;
1954 case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727):
1955 sc->dc_type = DC_TYPE_987x5;
1956 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1957 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
1958 break;
1959 case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115):
1960 sc->dc_type = DC_TYPE_PNICII;
1961 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH;
1962 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
1963 break;
1964 case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168):
1965 sc->dc_type = DC_TYPE_PNIC;
1966 sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS;
1967 sc->dc_flags |= DC_PNIC_RX_BUG_WAR;
1968 sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT);
1969 if (revision < DC_REVISION_82C169)
1970 sc->dc_pmode = DC_PMODE_SYM;
1971 break;
1972 case DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A):
1973 sc->dc_type = DC_TYPE_ASIX;
1974 sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG;
1975 sc->dc_flags |= DC_REDUCED_MII_POLL;
1976 sc->dc_pmode = DC_PMODE_MII;
1977 break;
1978 case DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201):
1979 sc->dc_type = DC_TYPE_XIRCOM;
1980 sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE |
1981 DC_TX_ALIGN;
1982 /*
1983 * We don't actually need to coalesce, but we're doing
1984 * it to obtain a double word aligned buffer.
1985 * The DC_TX_COALESCE flag is required.
1986 */
1987 sc->dc_pmode = DC_PMODE_MII;
1988 break;
1989 case DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112):
1990 sc->dc_type = DC_TYPE_CONEXANT;
1991 sc->dc_flags |= DC_TX_INTR_ALWAYS;
1992 sc->dc_flags |= DC_REDUCED_MII_POLL;
1993 sc->dc_pmode = DC_PMODE_MII;
1994 dc_read_srom(sc, sc->dc_romwidth);
1995 break;
1996 default:
1997 device_printf(dev, "unknown device: %x\n",
1998 sc->dc_info->dc_devid);
1999 break;
2000 }
2001
2002 /* Save the cache line size. */
2003 if (DC_IS_DAVICOM(sc))
2004 sc->dc_cachesize = 0;
2005 else
2006 sc->dc_cachesize = pci_get_cachelnsz(dev);
2007
2008 /* Reset the adapter. */
2009 dc_reset(sc);
2010
2011 /* Take 21143 out of snooze mode */
2012 if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) {
2013 command = pci_read_config(dev, DC_PCI_CFDD, 4);
2014 command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE);
2015 pci_write_config(dev, DC_PCI_CFDD, command, 4);
2016 }
2017
2018 /*
2019 * Try to learn something about the supported media.
2020 * We know that ASIX and ADMtek and Davicom devices
2021 * will *always* be using MII media, so that's a no-brainer.
2022 * The tricky ones are the Macronix/PNIC II and the
2023 * Intel 21143.
2024 */
2025 if (DC_IS_INTEL(sc))
2026 dc_parse_21143_srom(sc);
2027 else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2028 if (sc->dc_type == DC_TYPE_98713)
2029 sc->dc_pmode = DC_PMODE_MII;
2030 else
2031 sc->dc_pmode = DC_PMODE_SYM;
2032 } else if (!sc->dc_pmode)
2033 sc->dc_pmode = DC_PMODE_MII;
2034
2035 /*
2036 * Get station address from the EEPROM.
2037 */
2038 switch(sc->dc_type) {
2039 case DC_TYPE_98713:
2040 case DC_TYPE_98713A:
2041 case DC_TYPE_987x5:
2042 case DC_TYPE_PNICII:
2043 dc_read_eeprom(sc, (caddr_t)&mac_offset,
2044 (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
2045 dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0);
2046 break;
2047 case DC_TYPE_PNIC:
2048 dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1);
2049 break;
2050 case DC_TYPE_DM9102:
2051 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2052 #ifdef __sparc64__
2053 /*
2054 * If this is an onboard dc(4) the station address read from
2055 * the EEPROM is all zero and we have to get it from the fcode.
2056 */
2057 for (i = 0; i < ETHER_ADDR_LEN; i++)
2058 if (eaddr[i] != 0x00)
2059 break;
2060 if (i >= ETHER_ADDR_LEN)
2061 OF_getetheraddr(dev, eaddr);
2062 #endif
2063 break;
2064 case DC_TYPE_21143:
2065 case DC_TYPE_ASIX:
2066 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2067 break;
2068 case DC_TYPE_AL981:
2069 case DC_TYPE_AN985:
2070 reg = CSR_READ_4(sc, DC_AL_PAR0);
2071 mac = (uint8_t *)&eaddr[0];
2072 mac[0] = (reg >> 0) & 0xff;
2073 mac[1] = (reg >> 8) & 0xff;
2074 mac[2] = (reg >> 16) & 0xff;
2075 mac[3] = (reg >> 24) & 0xff;
2076 reg = CSR_READ_4(sc, DC_AL_PAR1);
2077 mac[4] = (reg >> 0) & 0xff;
2078 mac[5] = (reg >> 8) & 0xff;
2079 break;
2080 case DC_TYPE_CONEXANT:
2081 bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr,
2082 ETHER_ADDR_LEN);
2083 break;
2084 case DC_TYPE_XIRCOM:
2085 /* The MAC comes from the CIS. */
2086 mac = pci_get_ether(dev);
2087 if (!mac) {
2088 device_printf(dev, "No station address in CIS!\n");
2089 error = ENXIO;
2090 goto fail;
2091 }
2092 bcopy(mac, eaddr, ETHER_ADDR_LEN);
2093 break;
2094 default:
2095 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2096 break;
2097 }
2098
2099 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
2100 error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0,
2101 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2102 sizeof(struct dc_list_data), 1, sizeof(struct dc_list_data),
2103 0, NULL, NULL, &sc->dc_ltag);
2104 if (error) {
2105 device_printf(dev, "failed to allocate busdma tag\n");
2106 error = ENXIO;
2107 goto fail;
2108 }
2109 error = bus_dmamem_alloc(sc->dc_ltag, (void **)&sc->dc_ldata,
2110 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->dc_lmap);
2111 if (error) {
2112 device_printf(dev, "failed to allocate DMA safe memory\n");
2113 error = ENXIO;
2114 goto fail;
2115 }
2116 error = bus_dmamap_load(sc->dc_ltag, sc->dc_lmap, sc->dc_ldata,
2117 sizeof(struct dc_list_data), dc_dma_map_addr, &sc->dc_laddr,
2118 BUS_DMA_NOWAIT);
2119 if (error) {
2120 device_printf(dev, "cannot get address of the descriptors\n");
2121 error = ENXIO;
2122 goto fail;
2123 }
2124
2125 /*
2126 * Allocate a busdma tag and DMA safe memory for the multicast
2127 * setup frame.
2128 */
2129 error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0,
2130 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2131 DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, DC_SFRAME_LEN + DC_MIN_FRAMELEN,
2132 0, NULL, NULL, &sc->dc_stag);
2133 if (error) {
2134 device_printf(dev, "failed to allocate busdma tag\n");
2135 error = ENXIO;
2136 goto fail;
2137 }
2138 error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf,
2139 BUS_DMA_NOWAIT, &sc->dc_smap);
2140 if (error) {
2141 device_printf(dev, "failed to allocate DMA safe memory\n");
2142 error = ENXIO;
2143 goto fail;
2144 }
2145 error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf,
2146 DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT);
2147 if (error) {
2148 device_printf(dev, "cannot get address of the descriptors\n");
2149 error = ENXIO;
2150 goto fail;
2151 }
2152
2153 /* Allocate a busdma tag for mbufs. */
2154 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
2155 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2156 MCLBYTES * DC_MAXFRAGS, DC_MAXFRAGS, MCLBYTES,
2157 0, NULL, NULL, &sc->dc_mtag);
2158 if (error) {
2159 device_printf(dev, "failed to allocate busdma tag\n");
2160 error = ENXIO;
2161 goto fail;
2162 }
2163
2164 /* Create the TX/RX busdma maps. */
2165 for (i = 0; i < DC_TX_LIST_CNT; i++) {
2166 error = bus_dmamap_create(sc->dc_mtag, 0,
2167 &sc->dc_cdata.dc_tx_map[i]);
2168 if (error) {
2169 device_printf(dev, "failed to init TX ring\n");
2170 error = ENXIO;
2171 goto fail;
2172 }
2173 }
2174 for (i = 0; i < DC_RX_LIST_CNT; i++) {
2175 error = bus_dmamap_create(sc->dc_mtag, 0,
2176 &sc->dc_cdata.dc_rx_map[i]);
2177 if (error) {
2178 device_printf(dev, "failed to init RX ring\n");
2179 error = ENXIO;
2180 goto fail;
2181 }
2182 }
2183 error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_sparemap);
2184 if (error) {
2185 device_printf(dev, "failed to init RX ring\n");
2186 error = ENXIO;
2187 goto fail;
2188 }
2189
2190 ifp = sc->dc_ifp = if_alloc(IFT_ETHER);
2191 if (ifp == NULL) {
2192 device_printf(dev, "can not if_alloc()\n");
2193 error = ENOSPC;
2194 goto fail;
2195 }
2196 ifp->if_softc = sc;
2197 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2198 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2199 ifp->if_ioctl = dc_ioctl;
2200 ifp->if_start = dc_start;
2201 ifp->if_init = dc_init;
2202 IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1);
2203 ifp->if_snd.ifq_drv_maxlen = DC_TX_LIST_CNT - 1;
2204 IFQ_SET_READY(&ifp->if_snd);
2205
2206 /*
2207 * Do MII setup. If this is a 21143, check for a PHY on the
2208 * MII bus after applying any necessary fixups to twiddle the
2209 * GPIO bits. If we don't end up finding a PHY, restore the
2210 * old selection (SIA only or SIA/SYM) and attach the dcphy
2211 * driver instead.
2212 */
2213 if (DC_IS_INTEL(sc)) {
2214 dc_apply_fixup(sc, IFM_AUTO);
2215 tmp = sc->dc_pmode;
2216 sc->dc_pmode = DC_PMODE_MII;
2217 }
2218
2219 /*
2220 * Setup General Purpose port mode and data so the tulip can talk
2221 * to the MII. This needs to be done before mii_phy_probe so that
2222 * we can actually see them.
2223 */
2224 if (DC_IS_XIRCOM(sc)) {
2225 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2226 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2227 DELAY(10);
2228 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2229 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2230 DELAY(10);
2231 }
2232
2233 error = mii_phy_probe(dev, &sc->dc_miibus,
2234 dc_ifmedia_upd, dc_ifmedia_sts);
2235
2236 if (error && DC_IS_INTEL(sc)) {
2237 sc->dc_pmode = tmp;
2238 if (sc->dc_pmode != DC_PMODE_SIA)
2239 sc->dc_pmode = DC_PMODE_SYM;
2240 sc->dc_flags |= DC_21143_NWAY;
2241 mii_phy_probe(dev, &sc->dc_miibus,
2242 dc_ifmedia_upd, dc_ifmedia_sts);
2243 /*
2244 * For non-MII cards, we need to have the 21143
2245 * drive the LEDs. Except there are some systems
2246 * like the NEC VersaPro NoteBook PC which have no
2247 * LEDs, and twiddling these bits has adverse effects
2248 * on them. (I.e. you suddenly can't get a link.)
2249 */
2250 if (!(pci_get_subvendor(dev) == 0x1033 &&
2251 pci_get_subdevice(dev) == 0x8028))
2252 sc->dc_flags |= DC_TULIP_LEDS;
2253 error = 0;
2254 }
2255
2256 if (error) {
2257 device_printf(dev, "MII without any PHY!\n");
2258 goto fail;
2259 }
2260
2261 if (DC_IS_ADMTEK(sc)) {
2262 /*
2263 * Set automatic TX underrun recovery for the ADMtek chips
2264 */
2265 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
2266 }
2267
2268 /*
2269 * Tell the upper layer(s) we support long frames.
2270 */
2271 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2272 ifp->if_capabilities |= IFCAP_VLAN_MTU;
2273 ifp->if_capenable = ifp->if_capabilities;
2274 #ifdef DEVICE_POLLING
2275 ifp->if_capabilities |= IFCAP_POLLING;
2276 #endif
2277
2278 callout_init_mtx(&sc->dc_stat_ch, &sc->dc_mtx, 0);
2279 callout_init_mtx(&sc->dc_wdog_ch, &sc->dc_mtx, 0);
2280
2281 #ifdef SRM_MEDIA
2282 sc->dc_srm_media = 0;
2283
2284 /* Remember the SRM console media setting */
2285 if (DC_IS_INTEL(sc)) {
2286 command = pci_read_config(dev, DC_PCI_CFDD, 4);
2287 command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE);
2288 switch ((command >> 8) & 0xff) {
2289 case 3:
2290 sc->dc_srm_media = IFM_10_T;
2291 break;
2292 case 4:
2293 sc->dc_srm_media = IFM_10_T | IFM_FDX;
2294 break;
2295 case 5:
2296 sc->dc_srm_media = IFM_100_TX;
2297 break;
2298 case 6:
2299 sc->dc_srm_media = IFM_100_TX | IFM_FDX;
2300 break;
2301 }
2302 if (sc->dc_srm_media)
2303 sc->dc_srm_media |= IFM_ACTIVE | IFM_ETHER;
2304 }
2305 #endif
2306
2307 /*
2308 * Call MI attach routine.
2309 */
2310 ether_ifattach(ifp, eaddr);
2311
2312 /* Hook interrupt last to avoid having to lock softc */
2313 error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | INTR_MPSAFE,
2314 dc_intr, sc, &sc->dc_intrhand);
2315
2316 if (error) {
2317 device_printf(dev, "couldn't set up irq\n");
2318 ether_ifdetach(ifp);
2319 goto fail;
2320 }
2321
2322 fail:
2323 if (error)
2324 dc_detach(dev);
2325 return (error);
2326 }
2327
2328 /*
2329 * Shutdown hardware and free up resources. This can be called any
2330 * time after the mutex has been initialized. It is called in both
2331 * the error case in attach and the normal detach case so it needs
2332 * to be careful about only freeing resources that have actually been
2333 * allocated.
2334 */
2335 static int
2336 dc_detach(device_t dev)
2337 {
2338 struct dc_softc *sc;
2339 struct ifnet *ifp;
2340 struct dc_mediainfo *m;
2341 int i;
2342
2343 sc = device_get_softc(dev);
2344 KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized"));
2345
2346 ifp = sc->dc_ifp;
2347
2348 #ifdef DEVICE_POLLING
2349 if (ifp->if_capenable & IFCAP_POLLING)
2350 ether_poll_deregister(ifp);
2351 #endif
2352
2353 /* These should only be active if attach succeeded */
2354 if (device_is_attached(dev)) {
2355 DC_LOCK(sc);
2356 dc_stop(sc);
2357 DC_UNLOCK(sc);
2358 callout_drain(&sc->dc_stat_ch);
2359 callout_drain(&sc->dc_wdog_ch);
2360 ether_ifdetach(ifp);
2361 }
2362 if (sc->dc_miibus)
2363 device_delete_child(dev, sc->dc_miibus);
2364 bus_generic_detach(dev);
2365
2366 if (sc->dc_intrhand)
2367 bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand);
2368 if (sc->dc_irq)
2369 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq);
2370 if (sc->dc_res)
2371 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res);
2372
2373 if (ifp)
2374 if_free(ifp);
2375
2376 if (sc->dc_cdata.dc_sbuf != NULL)
2377 bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf, sc->dc_smap);
2378 if (sc->dc_ldata != NULL)
2379 bus_dmamem_free(sc->dc_ltag, sc->dc_ldata, sc->dc_lmap);
2380 if (sc->dc_mtag) {
2381 for (i = 0; i < DC_TX_LIST_CNT; i++)
2382 if (sc->dc_cdata.dc_tx_map[i] != NULL)
2383 bus_dmamap_destroy(sc->dc_mtag,
2384 sc->dc_cdata.dc_tx_map[i]);
2385 for (i = 0; i < DC_RX_LIST_CNT; i++)
2386 if (sc->dc_cdata.dc_rx_map[i] != NULL)
2387 bus_dmamap_destroy(sc->dc_mtag,
2388 sc->dc_cdata.dc_rx_map[i]);
2389 bus_dmamap_destroy(sc->dc_mtag, sc->dc_sparemap);
2390 }
2391 if (sc->dc_stag)
2392 bus_dma_tag_destroy(sc->dc_stag);
2393 if (sc->dc_mtag)
2394 bus_dma_tag_destroy(sc->dc_mtag);
2395 if (sc->dc_ltag)
2396 bus_dma_tag_destroy(sc->dc_ltag);
2397
2398 free(sc->dc_pnic_rx_buf, M_DEVBUF);
2399
2400 while (sc->dc_mi != NULL) {
2401 m = sc->dc_mi->dc_next;
2402 free(sc->dc_mi, M_DEVBUF);
2403 sc->dc_mi = m;
2404 }
2405 free(sc->dc_srom, M_DEVBUF);
2406
2407 mtx_destroy(&sc->dc_mtx);
2408
2409 return (0);
2410 }
2411
2412 /*
2413 * Initialize the transmit descriptors.
2414 */
2415 static int
2416 dc_list_tx_init(struct dc_softc *sc)
2417 {
2418 struct dc_chain_data *cd;
2419 struct dc_list_data *ld;
2420 int i, nexti;
2421
2422 cd = &sc->dc_cdata;
2423 ld = sc->dc_ldata;
2424 for (i = 0; i < DC_TX_LIST_CNT; i++) {
2425 if (i == DC_TX_LIST_CNT - 1)
2426 nexti = 0;
2427 else
2428 nexti = i + 1;
2429 ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti));
2430 cd->dc_tx_chain[i] = NULL;
2431 ld->dc_tx_list[i].dc_data = 0;
2432 ld->dc_tx_list[i].dc_ctl = 0;
2433 }
2434
2435 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
2436 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
2437 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2438 return (0);
2439 }
2440
2441
2442 /*
2443 * Initialize the RX descriptors and allocate mbufs for them. Note that
2444 * we arrange the descriptors in a closed ring, so that the last descriptor
2445 * points back to the first.
2446 */
2447 static int
2448 dc_list_rx_init(struct dc_softc *sc)
2449 {
2450 struct dc_chain_data *cd;
2451 struct dc_list_data *ld;
2452 int i, nexti;
2453
2454 cd = &sc->dc_cdata;
2455 ld = sc->dc_ldata;
2456
2457 for (i = 0; i < DC_RX_LIST_CNT; i++) {
2458 if (dc_newbuf(sc, i, 1) != 0)
2459 return (ENOBUFS);
2460 if (i == DC_RX_LIST_CNT - 1)
2461 nexti = 0;
2462 else
2463 nexti = i + 1;
2464 ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti));
2465 }
2466
2467 cd->dc_rx_prod = 0;
2468 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
2469 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2470 return (0);
2471 }
2472
2473 /*
2474 * Initialize an RX descriptor and attach an MBUF cluster.
2475 */
2476 static int
2477 dc_newbuf(struct dc_softc *sc, int i, int alloc)
2478 {
2479 struct mbuf *m_new;
2480 bus_dmamap_t tmp;
2481 bus_dma_segment_t segs[1];
2482 int error, nseg;
2483
2484 if (alloc) {
2485 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2486 if (m_new == NULL)
2487 return (ENOBUFS);
2488 } else {
2489 m_new = sc->dc_cdata.dc_rx_chain[i];
2490 m_new->m_data = m_new->m_ext.ext_buf;
2491 }
2492 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
2493 m_adj(m_new, sizeof(u_int64_t));
2494
2495 /*
2496 * If this is a PNIC chip, zero the buffer. This is part
2497 * of the workaround for the receive bug in the 82c168 and
2498 * 82c169 chips.
2499 */
2500 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
2501 bzero(mtod(m_new, char *), m_new->m_len);
2502
2503 /* No need to remap the mbuf if we're reusing it. */
2504 if (alloc) {
2505 error = bus_dmamap_load_mbuf_sg(sc->dc_mtag, sc->dc_sparemap,
2506 m_new, segs, &nseg, 0);
2507 if (error) {
2508 m_freem(m_new);
2509 return (error);
2510 }
2511 KASSERT(nseg == 1,
2512 ("%s: wrong number of segments (%d)", __func__, nseg));
2513 sc->dc_ldata->dc_rx_list[i].dc_data = htole32(segs->ds_addr);
2514 bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]);
2515 tmp = sc->dc_cdata.dc_rx_map[i];
2516 sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap;
2517 sc->dc_sparemap = tmp;
2518 sc->dc_cdata.dc_rx_chain[i] = m_new;
2519 }
2520
2521 sc->dc_ldata->dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN);
2522 sc->dc_ldata->dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN);
2523 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i],
2524 BUS_DMASYNC_PREREAD);
2525 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
2526 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2527 return (0);
2528 }
2529
2530 /*
2531 * Grrrrr.
2532 * The PNIC chip has a terrible bug in it that manifests itself during
2533 * periods of heavy activity. The exact mode of failure if difficult to
2534 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
2535 * will happen on slow machines. The bug is that sometimes instead of
2536 * uploading one complete frame during reception, it uploads what looks
2537 * like the entire contents of its FIFO memory. The frame we want is at
2538 * the end of the whole mess, but we never know exactly how much data has
2539 * been uploaded, so salvaging the frame is hard.
2540 *
2541 * There is only one way to do it reliably, and it's disgusting.
2542 * Here's what we know:
2543 *
2544 * - We know there will always be somewhere between one and three extra
2545 * descriptors uploaded.
2546 *
2547 * - We know the desired received frame will always be at the end of the
2548 * total data upload.
2549 *
2550 * - We know the size of the desired received frame because it will be
2551 * provided in the length field of the status word in the last descriptor.
2552 *
2553 * Here's what we do:
2554 *
2555 * - When we allocate buffers for the receive ring, we bzero() them.
2556 * This means that we know that the buffer contents should be all
2557 * zeros, except for data uploaded by the chip.
2558 *
2559 * - We also force the PNIC chip to upload frames that include the
2560 * ethernet CRC at the end.
2561 *
2562 * - We gather all of the bogus frame data into a single buffer.
2563 *
2564 * - We then position a pointer at the end of this buffer and scan
2565 * backwards until we encounter the first non-zero byte of data.
2566 * This is the end of the received frame. We know we will encounter
2567 * some data at the end of the frame because the CRC will always be
2568 * there, so even if the sender transmits a packet of all zeros,
2569 * we won't be fooled.
2570 *
2571 * - We know the size of the actual received frame, so we subtract
2572 * that value from the current pointer location. This brings us
2573 * to the start of the actual received packet.
2574 *
2575 * - We copy this into an mbuf and pass it on, along with the actual
2576 * frame length.
2577 *
2578 * The performance hit is tremendous, but it beats dropping frames all
2579 * the time.
2580 */
2581
2582 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG)
2583 static void
2584 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
2585 {
2586 struct dc_desc *cur_rx;
2587 struct dc_desc *c = NULL;
2588 struct mbuf *m = NULL;
2589 unsigned char *ptr;
2590 int i, total_len;
2591 u_int32_t rxstat = 0;
2592
2593 i = sc->dc_pnic_rx_bug_save;
2594 cur_rx = &sc->dc_ldata->dc_rx_list[idx];
2595 ptr = sc->dc_pnic_rx_buf;
2596 bzero(ptr, DC_RXLEN * 5);
2597
2598 /* Copy all the bytes from the bogus buffers. */
2599 while (1) {
2600 c = &sc->dc_ldata->dc_rx_list[i];
2601 rxstat = le32toh(c->dc_status);
2602 m = sc->dc_cdata.dc_rx_chain[i];
2603 bcopy(mtod(m, char *), ptr, DC_RXLEN);
2604 ptr += DC_RXLEN;
2605 /* If this is the last buffer, break out. */
2606 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
2607 break;
2608 dc_newbuf(sc, i, 0);
2609 DC_INC(i, DC_RX_LIST_CNT);
2610 }
2611
2612 /* Find the length of the actual receive frame. */
2613 total_len = DC_RXBYTES(rxstat);
2614
2615 /* Scan backwards until we hit a non-zero byte. */
2616 while (*ptr == 0x00)
2617 ptr--;
2618
2619 /* Round off. */
2620 if ((uintptr_t)(ptr) & 0x3)
2621 ptr -= 1;
2622
2623 /* Now find the start of the frame. */
2624 ptr -= total_len;
2625 if (ptr < sc->dc_pnic_rx_buf)
2626 ptr = sc->dc_pnic_rx_buf;
2627
2628 /*
2629 * Now copy the salvaged frame to the last mbuf and fake up
2630 * the status word to make it look like a successful
2631 * frame reception.
2632 */
2633 dc_newbuf(sc, i, 0);
2634 bcopy(ptr, mtod(m, char *), total_len);
2635 cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2636 }
2637
2638 /*
2639 * This routine searches the RX ring for dirty descriptors in the
2640 * event that the rxeof routine falls out of sync with the chip's
2641 * current descriptor pointer. This may happen sometimes as a result
2642 * of a "no RX buffer available" condition that happens when the chip
2643 * consumes all of the RX buffers before the driver has a chance to
2644 * process the RX ring. This routine may need to be called more than
2645 * once to bring the driver back in sync with the chip, however we
2646 * should still be getting RX DONE interrupts to drive the search
2647 * for new packets in the RX ring, so we should catch up eventually.
2648 */
2649 static int
2650 dc_rx_resync(struct dc_softc *sc)
2651 {
2652 struct dc_desc *cur_rx;
2653 int i, pos;
2654
2655 pos = sc->dc_cdata.dc_rx_prod;
2656
2657 for (i = 0; i < DC_RX_LIST_CNT; i++) {
2658 cur_rx = &sc->dc_ldata->dc_rx_list[pos];
2659 if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN))
2660 break;
2661 DC_INC(pos, DC_RX_LIST_CNT);
2662 }
2663
2664 /* If the ring really is empty, then just return. */
2665 if (i == DC_RX_LIST_CNT)
2666 return (0);
2667
2668 /* We've fallen behing the chip: catch it. */
2669 sc->dc_cdata.dc_rx_prod = pos;
2670
2671 return (EAGAIN);
2672 }
2673
2674 /*
2675 * A frame has been uploaded: pass the resulting mbuf chain up to
2676 * the higher level protocols.
2677 */
2678 static void
2679 dc_rxeof(struct dc_softc *sc)
2680 {
2681 struct mbuf *m, *m0;
2682 struct ifnet *ifp;
2683 struct dc_desc *cur_rx;
2684 int i, total_len = 0;
2685 u_int32_t rxstat;
2686
2687 DC_LOCK_ASSERT(sc);
2688
2689 ifp = sc->dc_ifp;
2690 i = sc->dc_cdata.dc_rx_prod;
2691
2692 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD);
2693 while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) &
2694 DC_RXSTAT_OWN)) {
2695 #ifdef DEVICE_POLLING
2696 if (ifp->if_capenable & IFCAP_POLLING) {
2697 if (sc->rxcycles <= 0)
2698 break;
2699 sc->rxcycles--;
2700 }
2701 #endif
2702 cur_rx = &sc->dc_ldata->dc_rx_list[i];
2703 rxstat = le32toh(cur_rx->dc_status);
2704 m = sc->dc_cdata.dc_rx_chain[i];
2705 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i],
2706 BUS_DMASYNC_POSTREAD);
2707 total_len = DC_RXBYTES(rxstat);
2708
2709 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2710 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2711 if (rxstat & DC_RXSTAT_FIRSTFRAG)
2712 sc->dc_pnic_rx_bug_save = i;
2713 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2714 DC_INC(i, DC_RX_LIST_CNT);
2715 continue;
2716 }
2717 dc_pnic_rx_bug_war(sc, i);
2718 rxstat = le32toh(cur_rx->dc_status);
2719 total_len = DC_RXBYTES(rxstat);
2720 }
2721 }
2722
2723 /*
2724 * If an error occurs, update stats, clear the
2725 * status word and leave the mbuf cluster in place:
2726 * it should simply get re-used next time this descriptor
2727 * comes up in the ring. However, don't report long
2728 * frames as errors since they could be vlans.
2729 */
2730 if ((rxstat & DC_RXSTAT_RXERR)) {
2731 if (!(rxstat & DC_RXSTAT_GIANT) ||
2732 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
2733 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
2734 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) {
2735 ifp->if_ierrors++;
2736 if (rxstat & DC_RXSTAT_COLLSEEN)
2737 ifp->if_collisions++;
2738 dc_newbuf(sc, i, 0);
2739 if (rxstat & DC_RXSTAT_CRCERR) {
2740 DC_INC(i, DC_RX_LIST_CNT);
2741 continue;
2742 } else {
2743 dc_init_locked(sc);
2744 return;
2745 }
2746 }
2747 }
2748
2749 /* No errors; receive the packet. */
2750 total_len -= ETHER_CRC_LEN;
2751 #ifdef __NO_STRICT_ALIGNMENT
2752 /*
2753 * On architectures without alignment problems we try to
2754 * allocate a new buffer for the receive ring, and pass up
2755 * the one where the packet is already, saving the expensive
2756 * copy done in m_devget().
2757 * If we are on an architecture with alignment problems, or
2758 * if the allocation fails, then use m_devget and leave the
2759 * existing buffer in the receive ring.
2760 */
2761 if (dc_newbuf(sc, i, 1) == 0) {
2762 m->m_pkthdr.rcvif = ifp;
2763 m->m_pkthdr.len = m->m_len = total_len;
2764 DC_INC(i, DC_RX_LIST_CNT);
2765 } else
2766 #endif
2767 {
2768 m0 = m_devget(mtod(m, char *), total_len,
2769 ETHER_ALIGN, ifp, NULL);
2770 dc_newbuf(sc, i, 0);
2771 DC_INC(i, DC_RX_LIST_CNT);
2772 if (m0 == NULL) {
2773 ifp->if_ierrors++;
2774 continue;
2775 }
2776 m = m0;
2777 }
2778
2779 ifp->if_ipackets++;
2780 DC_UNLOCK(sc);
2781 (*ifp->if_input)(ifp, m);
2782 DC_LOCK(sc);
2783 }
2784
2785 sc->dc_cdata.dc_rx_prod = i;
2786 }
2787
2788 /*
2789 * A frame was downloaded to the chip. It's safe for us to clean up
2790 * the list buffers.
2791 */
2792 static void
2793 dc_txeof(struct dc_softc *sc)
2794 {
2795 struct dc_desc *cur_tx = NULL;
2796 struct ifnet *ifp;
2797 int idx;
2798 u_int32_t ctl, txstat;
2799
2800 ifp = sc->dc_ifp;
2801
2802 /*
2803 * Go through our tx list and free mbufs for those
2804 * frames that have been transmitted.
2805 */
2806 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD);
2807 idx = sc->dc_cdata.dc_tx_cons;
2808 while (idx != sc->dc_cdata.dc_tx_prod) {
2809
2810 cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2811 txstat = le32toh(cur_tx->dc_status);
2812 ctl = le32toh(cur_tx->dc_ctl);
2813
2814 if (txstat & DC_TXSTAT_OWN)
2815 break;
2816
2817 if (!(ctl & DC_TXCTL_LASTFRAG) || ctl & DC_TXCTL_SETUP) {
2818 if (ctl & DC_TXCTL_SETUP) {
2819 /*
2820 * Yes, the PNIC is so brain damaged
2821 * that it will sometimes generate a TX
2822 * underrun error while DMAing the RX
2823 * filter setup frame. If we detect this,
2824 * we have to send the setup frame again,
2825 * or else the filter won't be programmed
2826 * correctly.
2827 */
2828 if (DC_IS_PNIC(sc)) {
2829 if (txstat & DC_TXSTAT_ERRSUM)
2830 dc_setfilt(sc);
2831 }
2832 sc->dc_cdata.dc_tx_chain[idx] = NULL;
2833 }
2834 sc->dc_cdata.dc_tx_cnt--;
2835 DC_INC(idx, DC_TX_LIST_CNT);
2836 continue;
2837 }
2838
2839 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2840 /*
2841 * XXX: Why does my Xircom taunt me so?
2842 * For some reason it likes setting the CARRLOST flag
2843 * even when the carrier is there. wtf?!?
2844 * Who knows, but Conexant chips have the
2845 * same problem. Maybe they took lessons
2846 * from Xircom.
2847 */
2848 if (/*sc->dc_type == DC_TYPE_21143 &&*/
2849 sc->dc_pmode == DC_PMODE_MII &&
2850 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM |
2851 DC_TXSTAT_NOCARRIER)))
2852 txstat &= ~DC_TXSTAT_ERRSUM;
2853 } else {
2854 if (/*sc->dc_type == DC_TYPE_21143 &&*/
2855 sc->dc_pmode == DC_PMODE_MII &&
2856 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM |
2857 DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST)))
2858 txstat &= ~DC_TXSTAT_ERRSUM;
2859 }
2860
2861 if (txstat & DC_TXSTAT_ERRSUM) {
2862 ifp->if_oerrors++;
2863 if (txstat & DC_TXSTAT_EXCESSCOLL)
2864 ifp->if_collisions++;
2865 if (txstat & DC_TXSTAT_LATECOLL)
2866 ifp->if_collisions++;
2867 if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2868 dc_init_locked(sc);
2869 return;
2870 }
2871 }
2872
2873 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2874
2875 ifp->if_opackets++;
2876 if (sc->dc_cdata.dc_tx_chain[idx] != NULL) {
2877 bus_dmamap_sync(sc->dc_mtag,
2878 sc->dc_cdata.dc_tx_map[idx],
2879 BUS_DMASYNC_POSTWRITE);
2880 bus_dmamap_unload(sc->dc_mtag,
2881 sc->dc_cdata.dc_tx_map[idx]);
2882 m_freem(sc->dc_cdata.dc_tx_chain[idx]);
2883 sc->dc_cdata.dc_tx_chain[idx] = NULL;
2884 }
2885
2886 sc->dc_cdata.dc_tx_cnt--;
2887 DC_INC(idx, DC_TX_LIST_CNT);
2888 }
2889 sc->dc_cdata.dc_tx_cons = idx;
2890
2891 if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > DC_TX_LIST_RSVD)
2892 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2893
2894 if (sc->dc_cdata.dc_tx_cnt == 0)
2895 sc->dc_wdog_timer = 0;
2896 }
2897
2898 static void
2899 dc_tick(void *xsc)
2900 {
2901 struct dc_softc *sc;
2902 struct mii_data *mii;
2903 struct ifnet *ifp;
2904 u_int32_t r;
2905
2906 sc = xsc;
2907 DC_LOCK_ASSERT(sc);
2908 ifp = sc->dc_ifp;
2909 mii = device_get_softc(sc->dc_miibus);
2910
2911 if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2912 if (sc->dc_flags & DC_21143_NWAY) {
2913 r = CSR_READ_4(sc, DC_10BTSTAT);
2914 if (IFM_SUBTYPE(mii->mii_media_active) ==
2915 IFM_100_TX && (r & DC_TSTAT_LS100)) {
2916 sc->dc_link = 0;
2917 mii_mediachg(mii);
2918 }
2919 if (IFM_SUBTYPE(mii->mii_media_active) ==
2920 IFM_10_T && (r & DC_TSTAT_LS10)) {
2921 sc->dc_link = 0;
2922 mii_mediachg(mii);
2923 }
2924 if (sc->dc_link == 0)
2925 mii_tick(mii);
2926 } else {
2927 r = CSR_READ_4(sc, DC_ISR);
2928 if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT &&
2929 sc->dc_cdata.dc_tx_cnt == 0) {
2930 mii_tick(mii);
2931 if (!(mii->mii_media_status & IFM_ACTIVE))
2932 sc->dc_link = 0;
2933 }
2934 }
2935 } else
2936 mii_tick(mii);
2937
2938 /*
2939 * When the init routine completes, we expect to be able to send
2940 * packets right away, and in fact the network code will send a
2941 * gratuitous ARP the moment the init routine marks the interface
2942 * as running. However, even though the MAC may have been initialized,
2943 * there may be a delay of a few seconds before the PHY completes
2944 * autonegotiation and the link is brought up. Any transmissions
2945 * made during that delay will be lost. Dealing with this is tricky:
2946 * we can't just pause in the init routine while waiting for the
2947 * PHY to come ready since that would bring the whole system to
2948 * a screeching halt for several seconds.
2949 *
2950 * What we do here is prevent the TX start routine from sending
2951 * any packets until a link has been established. After the
2952 * interface has been initialized, the tick routine will poll
2953 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2954 * that time, packets will stay in the send queue, and once the
2955 * link comes up, they will be flushed out to the wire.
2956 */
2957 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE &&
2958 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2959 sc->dc_link++;
2960 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2961 dc_start_locked(ifp);
2962 }
2963
2964 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2965 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc);
2966 else
2967 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc);
2968 }
2969
2970 /*
2971 * A transmit underrun has occurred. Back off the transmit threshold,
2972 * or switch to store and forward mode if we have to.
2973 */
2974 static void
2975 dc_tx_underrun(struct dc_softc *sc)
2976 {
2977 u_int32_t isr;
2978 int i;
2979
2980 if (DC_IS_DAVICOM(sc))
2981 dc_init_locked(sc);
2982
2983 if (DC_IS_INTEL(sc)) {
2984 /*
2985 * The real 21143 requires that the transmitter be idle
2986 * in order to change the transmit threshold or store
2987 * and forward state.
2988 */
2989 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2990
2991 for (i = 0; i < DC_TIMEOUT; i++) {
2992 isr = CSR_READ_4(sc, DC_ISR);
2993 if (isr & DC_ISR_TX_IDLE)
2994 break;
2995 DELAY(10);
2996 }
2997 if (i == DC_TIMEOUT) {
2998 device_printf(sc->dc_dev,
2999 "%s: failed to force tx to idle state\n",
3000 __func__);
3001 dc_init_locked(sc);
3002 }
3003 }
3004
3005 device_printf(sc->dc_dev, "TX underrun -- ");
3006 sc->dc_txthresh += DC_TXTHRESH_INC;
3007 if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
3008 printf("using store and forward mode\n");
3009 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3010 } else {
3011 printf("increasing TX threshold\n");
3012 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
3013 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
3014 }
3015
3016 if (DC_IS_INTEL(sc))
3017 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
3018 }
3019
3020 #ifdef DEVICE_POLLING
3021 static poll_handler_t dc_poll;
3022
3023 static void
3024 dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3025 {
3026 struct dc_softc *sc = ifp->if_softc;
3027
3028 DC_LOCK(sc);
3029
3030 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3031 DC_UNLOCK(sc);
3032 return;
3033 }
3034
3035 sc->rxcycles = count;
3036 dc_rxeof(sc);
3037 dc_txeof(sc);
3038 if (!IFQ_IS_EMPTY(&ifp->if_snd) &&
3039 !(ifp->if_drv_flags & IFF_DRV_OACTIVE))
3040 dc_start_locked(ifp);
3041
3042 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
3043 u_int32_t status;
3044
3045 status = CSR_READ_4(sc, DC_ISR);
3046 status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF |
3047 DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN |
3048 DC_ISR_BUS_ERR);
3049 if (!status) {
3050 DC_UNLOCK(sc);
3051 return;
3052 }
3053 /* ack what we have */
3054 CSR_WRITE_4(sc, DC_ISR, status);
3055
3056 if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) {
3057 u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED);
3058 ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff);
3059
3060 if (dc_rx_resync(sc))
3061 dc_rxeof(sc);
3062 }
3063 /* restart transmit unit if necessary */
3064 if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt)
3065 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3066
3067 if (status & DC_ISR_TX_UNDERRUN)
3068 dc_tx_underrun(sc);
3069
3070 if (status & DC_ISR_BUS_ERR) {
3071 if_printf(ifp, "%s: bus error\n", __func__);
3072 dc_reset(sc);
3073 dc_init_locked(sc);
3074 }
3075 }
3076 DC_UNLOCK(sc);
3077 }
3078 #endif /* DEVICE_POLLING */
3079
3080 static void
3081 dc_intr(void *arg)
3082 {
3083 struct dc_softc *sc;
3084 struct ifnet *ifp;
3085 u_int32_t status;
3086
3087 sc = arg;
3088
3089 if (sc->suspended)
3090 return;
3091
3092 if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0)
3093 return;
3094
3095 DC_LOCK(sc);
3096 ifp = sc->dc_ifp;
3097 #ifdef DEVICE_POLLING
3098 if (ifp->if_capenable & IFCAP_POLLING) {
3099 DC_UNLOCK(sc);
3100 return;
3101 }
3102 #endif
3103
3104 /* Suppress unwanted interrupts */
3105 if (!(ifp->if_flags & IFF_UP)) {
3106 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
3107 dc_stop(sc);
3108 DC_UNLOCK(sc);
3109 return;
3110 }
3111
3112 /* Disable interrupts. */
3113 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3114
3115 while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) &&
3116 status != 0xFFFFFFFF &&
3117 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3118
3119 CSR_WRITE_4(sc, DC_ISR, status);
3120
3121 if (status & DC_ISR_RX_OK) {
3122 int curpkts;
3123 curpkts = ifp->if_ipackets;
3124 dc_rxeof(sc);
3125 if (curpkts == ifp->if_ipackets) {
3126 while (dc_rx_resync(sc))
3127 dc_rxeof(sc);
3128 }
3129 }
3130
3131 if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF))
3132 dc_txeof(sc);
3133
3134 if (status & DC_ISR_TX_IDLE) {
3135 dc_txeof(sc);
3136 if (sc->dc_cdata.dc_tx_cnt) {
3137 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
3138 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3139 }
3140 }
3141
3142 if (status & DC_ISR_TX_UNDERRUN)
3143 dc_tx_underrun(sc);
3144
3145 if ((status & DC_ISR_RX_WATDOGTIMEO)
3146 || (status & DC_ISR_RX_NOBUF)) {
3147 int curpkts;
3148 curpkts = ifp->if_ipackets;
3149 dc_rxeof(sc);
3150 if (curpkts == ifp->if_ipackets) {
3151 while (dc_rx_resync(sc))
3152 dc_rxeof(sc);
3153 }
3154 }
3155
3156 if (status & DC_ISR_BUS_ERR) {
3157 dc_reset(sc);
3158 dc_init_locked(sc);
3159 }
3160 }
3161
3162 /* Re-enable interrupts. */
3163 CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3164
3165 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3166 dc_start_locked(ifp);
3167
3168 DC_UNLOCK(sc);
3169 }
3170
3171 /*
3172 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
3173 * pointers to the fragment pointers.
3174 */
3175 static int
3176 dc_encap(struct dc_softc *sc, struct mbuf **m_head)
3177 {
3178 bus_dma_segment_t segs[DC_MAXFRAGS];
3179 struct dc_desc *f;
3180 struct mbuf *m;
3181 int chainlen, cur, error, first, frag, i, idx, nseg;
3182
3183 /*
3184 * If there's no way we can send any packets, return now.
3185 */
3186 if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt <= DC_TX_LIST_RSVD)
3187 return (ENOBUFS);
3188
3189 /*
3190 * Count the number of frags in this chain to see if
3191 * we need to m_defrag. Since the descriptor list is shared
3192 * by all packets, we'll m_defrag long chains so that they
3193 * do not use up the entire list, even if they would fit.
3194 */
3195 chainlen = 0;
3196 for (m = *m_head; m != NULL; m = m->m_next)
3197 chainlen++;
3198
3199 m = NULL;
3200 if ((sc->dc_flags & DC_TX_COALESCE && ((*m_head)->m_next != NULL ||
3201 sc->dc_flags & DC_TX_ALIGN)) || (chainlen > DC_TX_LIST_CNT / 4) ||
3202 (DC_TX_LIST_CNT - (chainlen + sc->dc_cdata.dc_tx_cnt) <=
3203 DC_TX_LIST_RSVD)) {
3204 m = m_defrag(*m_head, M_DONTWAIT);
3205 if (m == NULL) {
3206 m_freem(*m_head);
3207 *m_head = NULL;
3208 return (ENOBUFS);
3209 }
3210 *m_head = m;
3211 }
3212 idx = sc->dc_cdata.dc_tx_prod;
3213 error = bus_dmamap_load_mbuf_sg(sc->dc_mtag,
3214 sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0);
3215 if (error == EFBIG) {
3216 m = m_defrag(*m_head, M_DONTWAIT);
3217 if (m == NULL) {
3218 m_freem(*m_head);
3219 *m_head = NULL;
3220 return (ENOBUFS);
3221 }
3222 *m_head = m;
3223 error = bus_dmamap_load_mbuf_sg(sc->dc_mtag,
3224 sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0);
3225 if (error != 0) {
3226 m_freem(*m_head);
3227 *m_head = NULL;
3228 return (error);
3229 }
3230 } else if (error != 0)
3231 return (error);
3232 KASSERT(nseg <= DC_MAXFRAGS,
3233 ("%s: wrong number of segments (%d)", __func__, nseg));
3234 if (nseg == 0) {
3235 m_freem(*m_head);
3236 *m_head = NULL;
3237 return (EIO);
3238 }
3239
3240 first = cur = frag = sc->dc_cdata.dc_tx_prod;
3241 for (i = 0; i < nseg; i++) {
3242 if ((sc->dc_flags & DC_TX_ADMTEK_WAR) &&
3243 (frag == (DC_TX_LIST_CNT - 1)) &&
3244 (first != sc->dc_cdata.dc_tx_first)) {
3245 bus_dmamap_unload(sc->dc_mtag,
3246 sc->dc_cdata.dc_tx_map[first]);
3247 m_freem(*m_head);
3248 *m_head = NULL;
3249 return (ENOBUFS);
3250 }
3251
3252 f = &sc->dc_ldata->dc_tx_list[frag];
3253 f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len);
3254 if (i == 0) {
3255 f->dc_status = 0;
3256 f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
3257 } else
3258 f->dc_status = htole32(DC_TXSTAT_OWN);
3259 f->dc_data = htole32(segs[i].ds_addr);
3260 cur = frag;
3261 DC_INC(frag, DC_TX_LIST_CNT);
3262 }
3263
3264 sc->dc_cdata.dc_tx_prod = frag;
3265 sc->dc_cdata.dc_tx_cnt += nseg;
3266 sc->dc_cdata.dc_tx_chain[cur] = *m_head;
3267 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
3268 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
3269 sc->dc_ldata->dc_tx_list[first].dc_ctl |=
3270 htole32(DC_TXCTL_FINT);
3271 if (sc->dc_flags & DC_TX_INTR_ALWAYS)
3272 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT);
3273 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
3274 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT);
3275 sc->dc_ldata->dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN);
3276
3277 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx],
3278 BUS_DMASYNC_PREWRITE);
3279 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
3280 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3281 return (0);
3282 }
3283
3284 static void
3285 dc_start(struct ifnet *ifp)
3286 {
3287 struct dc_softc *sc;
3288
3289 sc = ifp->if_softc;
3290 DC_LOCK(sc);
3291 dc_start_locked(ifp);
3292 DC_UNLOCK(sc);
3293 }
3294
3295 /*
3296 * Main transmit routine
3297 * To avoid having to do mbuf copies, we put pointers to the mbuf data
3298 * regions directly in the transmit lists. We also save a copy of the
3299 * pointers since the transmit list fragment pointers are physical
3300 * addresses.
3301 */
3302 static void
3303 dc_start_locked(struct ifnet *ifp)
3304 {
3305 struct dc_softc *sc;
3306 struct mbuf *m_head = NULL;
3307 unsigned int queued = 0;
3308 int idx;
3309
3310 sc = ifp->if_softc;
3311
3312 DC_LOCK_ASSERT(sc);
3313
3314 if (!sc->dc_link && ifp->if_snd.ifq_len < 10)
3315 return;
3316
3317 if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
3318 return;
3319
3320 idx = sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod;
3321
3322 while (sc->dc_cdata.dc_tx_chain[idx] == NULL) {
3323 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3324 if (m_head == NULL)
3325 break;
3326
3327 if (dc_encap(sc, &m_head)) {
3328 if (m_head == NULL)
3329 break;
3330 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3331 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3332 break;
3333 }
3334 idx = sc->dc_cdata.dc_tx_prod;
3335
3336 queued++;
3337 /*
3338 * If there's a BPF listener, bounce a copy of this frame
3339 * to him.
3340 */
3341 BPF_MTAP(ifp, m_head);
3342
3343 if (sc->dc_flags & DC_TX_ONE) {
3344 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3345 break;
3346 }
3347 }
3348
3349 if (queued > 0) {
3350 /* Transmit */
3351 if (!(sc->dc_flags & DC_TX_POLL))
3352 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3353
3354 /*
3355 * Set a timeout in case the chip goes out to lunch.
3356 */
3357 sc->dc_wdog_timer = 5;
3358 }
3359 }
3360
3361 static void
3362 dc_init(void *xsc)
3363 {
3364 struct dc_softc *sc = xsc;
3365
3366 DC_LOCK(sc);
3367 dc_init_locked(sc);
3368 #ifdef SRM_MEDIA
3369 if(sc->dc_srm_media) {
3370 struct ifreq ifr;
3371 struct mii_data *mii;
3372
3373 ifr.ifr_media = sc->dc_srm_media;
3374 sc->dc_srm_media = 0;
3375 DC_UNLOCK(sc);
3376 mii = device_get_softc(sc->dc_miibus);
3377 ifmedia_ioctl(sc->dc_ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA);
3378 } else
3379 #endif
3380 DC_UNLOCK(sc);
3381 }
3382
3383 static void
3384 dc_init_locked(struct dc_softc *sc)
3385 {
3386 struct ifnet *ifp = sc->dc_ifp;
3387 struct mii_data *mii;
3388
3389 DC_LOCK_ASSERT(sc);
3390
3391 mii = device_get_softc(sc->dc_miibus);
3392
3393 /*
3394 * Cancel pending I/O and free all RX/TX buffers.
3395 */
3396 dc_stop(sc);
3397 dc_reset(sc);
3398
3399 /*
3400 * Set cache alignment and burst length.
3401 */
3402 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
3403 CSR_WRITE_4(sc, DC_BUSCTL, 0);
3404 else
3405 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE);
3406 /*
3407 * Evenly share the bus between receive and transmit process.
3408 */
3409 if (DC_IS_INTEL(sc))
3410 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
3411 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
3412 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
3413 } else {
3414 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
3415 }
3416 if (sc->dc_flags & DC_TX_POLL)
3417 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
3418 switch(sc->dc_cachesize) {
3419 case 32:
3420 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
3421 break;
3422 case 16:
3423 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
3424 break;
3425 case 8:
3426 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
3427 break;
3428 case 0:
3429 default:
3430 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
3431 break;
3432 }
3433
3434 if (sc->dc_flags & DC_TX_STORENFWD)
3435 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3436 else {
3437 if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
3438 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3439 } else {
3440 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3441 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
3442 }
3443 }
3444
3445 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
3446 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
3447
3448 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
3449 /*
3450 * The app notes for the 98713 and 98715A say that
3451 * in order to have the chips operate properly, a magic
3452 * number must be written to CSR16. Macronix does not
3453 * document the meaning of these bits so there's no way
3454 * to know exactly what they do. The 98713 has a magic
3455 * number all its own; the rest all use a different one.
3456 */
3457 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
3458 if (sc->dc_type == DC_TYPE_98713)
3459 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
3460 else
3461 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
3462 }
3463
3464 if (DC_IS_XIRCOM(sc)) {
3465 /*
3466 * setup General Purpose Port mode and data so the tulip
3467 * can talk to the MII.
3468 */
3469 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
3470 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
3471 DELAY(10);
3472 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
3473 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
3474 DELAY(10);
3475 }
3476
3477 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
3478 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
3479
3480 /* Init circular RX list. */
3481 if (dc_list_rx_init(sc) == ENOBUFS) {
3482 device_printf(sc->dc_dev,
3483 "initialization failed: no memory for rx buffers\n");
3484 dc_stop(sc);
3485 return;
3486 }
3487
3488 /*
3489 * Init TX descriptors.
3490 */
3491 dc_list_tx_init(sc);
3492
3493 /*
3494 * Load the address of the RX list.
3495 */
3496 CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0));
3497 CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0));
3498
3499 /*
3500 * Enable interrupts.
3501 */
3502 #ifdef DEVICE_POLLING
3503 /*
3504 * ... but only if we are not polling, and make sure they are off in
3505 * the case of polling. Some cards (e.g. fxp) turn interrupts on
3506 * after a reset.
3507 */
3508 if (ifp->if_capenable & IFCAP_POLLING)
3509 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3510 else
3511 #endif
3512 CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3513 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
3514
3515 /* Enable transmitter. */
3516 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
3517
3518 /*
3519 * If this is an Intel 21143 and we're not using the
3520 * MII port, program the LED control pins so we get
3521 * link and activity indications.
3522 */
3523 if (sc->dc_flags & DC_TULIP_LEDS) {
3524 CSR_WRITE_4(sc, DC_WATCHDOG,
3525 DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY);
3526 CSR_WRITE_4(sc, DC_WATCHDOG, 0);
3527 }
3528
3529 /*
3530 * Load the RX/multicast filter. We do this sort of late
3531 * because the filter programming scheme on the 21143 and
3532 * some clones requires DMAing a setup frame via the TX
3533 * engine, and we need the transmitter enabled for that.
3534 */
3535 dc_setfilt(sc);
3536
3537 /* Enable receiver. */
3538 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
3539 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
3540
3541 mii_mediachg(mii);
3542 dc_setcfg(sc, sc->dc_if_media);
3543
3544 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3545 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3546
3547 /* Don't start the ticker if this is a homePNA link. */
3548 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
3549 sc->dc_link = 1;
3550 else {
3551 if (sc->dc_flags & DC_21143_NWAY)
3552 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc);
3553 else
3554 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc);
3555 }
3556
3557 sc->dc_wdog_timer = 0;
3558 callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc);
3559 }
3560
3561 /*
3562 * Set media options.
3563 */
3564 static int
3565 dc_ifmedia_upd(struct ifnet *ifp)
3566 {
3567 struct dc_softc *sc;
3568 struct mii_data *mii;
3569 struct ifmedia *ifm;
3570
3571 sc = ifp->if_softc;
3572 mii = device_get_softc(sc->dc_miibus);
3573 DC_LOCK(sc);
3574 mii_mediachg(mii);
3575 ifm = &mii->mii_media;
3576
3577 if (DC_IS_DAVICOM(sc) &&
3578 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
3579 dc_setcfg(sc, ifm->ifm_media);
3580 else
3581 sc->dc_link = 0;
3582 DC_UNLOCK(sc);
3583
3584 return (0);
3585 }
3586
3587 /*
3588 * Report current media status.
3589 */
3590 static void
3591 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3592 {
3593 struct dc_softc *sc;
3594 struct mii_data *mii;
3595 struct ifmedia *ifm;
3596
3597 sc = ifp->if_softc;
3598 mii = device_get_softc(sc->dc_miibus);
3599 DC_LOCK(sc);
3600 mii_pollstat(mii);
3601 ifm = &mii->mii_media;
3602 if (DC_IS_DAVICOM(sc)) {
3603 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
3604 ifmr->ifm_active = ifm->ifm_media;
3605 ifmr->ifm_status = 0;
3606 DC_UNLOCK(sc);
3607 return;
3608 }
3609 }
3610 ifmr->ifm_active = mii->mii_media_active;
3611 ifmr->ifm_status = mii->mii_media_status;
3612 DC_UNLOCK(sc);
3613 }
3614
3615 static int
3616 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3617 {
3618 struct dc_softc *sc = ifp->if_softc;
3619 struct ifreq *ifr = (struct ifreq *)data;
3620 struct mii_data *mii;
3621 int error = 0;
3622
3623 switch (command) {
3624 case SIOCSIFFLAGS:
3625 DC_LOCK(sc);
3626 if (ifp->if_flags & IFF_UP) {
3627 int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) &
3628 (IFF_PROMISC | IFF_ALLMULTI);
3629
3630 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3631 if (need_setfilt)
3632 dc_setfilt(sc);
3633 } else {
3634 sc->dc_txthresh = 0;
3635 dc_init_locked(sc);
3636 }
3637 } else {
3638 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3639 dc_stop(sc);
3640 }
3641 sc->dc_if_flags = ifp->if_flags;
3642 DC_UNLOCK(sc);
3643 error = 0;
3644 break;
3645 case SIOCADDMULTI:
3646 case SIOCDELMULTI:
3647 DC_LOCK(sc);
3648 dc_setfilt(sc);
3649 DC_UNLOCK(sc);
3650 error = 0;
3651 break;
3652 case SIOCGIFMEDIA:
3653 case SIOCSIFMEDIA:
3654 mii = device_get_softc(sc->dc_miibus);
3655 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3656 #ifdef SRM_MEDIA
3657 DC_LOCK(sc);
3658 if (sc->dc_srm_media)
3659 sc->dc_srm_media = 0;
3660 DC_UNLOCK(sc);
3661 #endif
3662 break;
3663 case SIOCSIFCAP:
3664 #ifdef DEVICE_POLLING
3665 if (ifr->ifr_reqcap & IFCAP_POLLING &&
3666 !(ifp->if_capenable & IFCAP_POLLING)) {
3667 error = ether_poll_register(dc_poll, ifp);
3668 if (error)
3669 return(error);
3670 DC_LOCK(sc);
3671 /* Disable interrupts */
3672 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3673 ifp->if_capenable |= IFCAP_POLLING;
3674 DC_UNLOCK(sc);
3675 return (error);
3676 }
3677 if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
3678 ifp->if_capenable & IFCAP_POLLING) {
3679 error = ether_poll_deregister(ifp);
3680 /* Enable interrupts. */
3681 DC_LOCK(sc);
3682 CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3683 ifp->if_capenable &= ~IFCAP_POLLING;
3684 DC_UNLOCK(sc);
3685 return (error);
3686 }
3687 #endif /* DEVICE_POLLING */
3688 break;
3689 default:
3690 error = ether_ioctl(ifp, command, data);
3691 break;
3692 }
3693
3694 return (error);
3695 }
3696
3697 static void
3698 dc_watchdog(void *xsc)
3699 {
3700 struct dc_softc *sc = xsc;
3701 struct ifnet *ifp;
3702
3703 DC_LOCK_ASSERT(sc);
3704
3705 if (sc->dc_wdog_timer == 0 || --sc->dc_wdog_timer != 0) {
3706 callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc);
3707 return;
3708 }
3709
3710 ifp = sc->dc_ifp;
3711 ifp->if_oerrors++;
3712 device_printf(sc->dc_dev, "watchdog timeout\n");
3713
3714 dc_stop(sc);
3715 dc_reset(sc);
3716 dc_init_locked(sc);
3717
3718 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3719 dc_start_locked(ifp);
3720 }
3721
3722 /*
3723 * Stop the adapter and free any mbufs allocated to the
3724 * RX and TX lists.
3725 */
3726 static void
3727 dc_stop(struct dc_softc *sc)
3728 {
3729 struct ifnet *ifp;
3730 struct dc_list_data *ld;
3731 struct dc_chain_data *cd;
3732 int i;
3733 u_int32_t ctl;
3734
3735 DC_LOCK_ASSERT(sc);
3736
3737 ifp = sc->dc_ifp;
3738 ld = sc->dc_ldata;
3739 cd = &sc->dc_cdata;
3740
3741 callout_stop(&sc->dc_stat_ch);
3742 callout_stop(&sc->dc_wdog_ch);
3743 sc->dc_wdog_timer = 0;
3744
3745 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3746
3747 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON));
3748 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3749 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3750 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3751 sc->dc_link = 0;
3752
3753 /*
3754 * Free data in the RX lists.
3755 */
3756 for (i = 0; i < DC_RX_LIST_CNT; i++) {
3757 if (cd->dc_rx_chain[i] != NULL) {
3758 m_freem(cd->dc_rx_chain[i]);
3759 cd->dc_rx_chain[i] = NULL;
3760 }
3761 }
3762 bzero(&ld->dc_rx_list, sizeof(ld->dc_rx_list));
3763
3764 /*
3765 * Free the TX list buffers.
3766 */
3767 for (i = 0; i < DC_TX_LIST_CNT; i++) {
3768 if (cd->dc_tx_chain[i] != NULL) {
3769 ctl = le32toh(ld->dc_tx_list[i].dc_ctl);
3770 if ((ctl & DC_TXCTL_SETUP) ||
3771 !(ctl & DC_TXCTL_LASTFRAG)) {
3772 cd->dc_tx_chain[i] = NULL;
3773 continue;
3774 }
3775 bus_dmamap_unload(sc->dc_mtag, cd->dc_tx_map[i]);
3776 m_freem(cd->dc_tx_chain[i]);
3777 cd->dc_tx_chain[i] = NULL;
3778 }
3779 }
3780 bzero(&ld->dc_tx_list, sizeof(ld->dc_tx_list));
3781 }
3782
3783 /*
3784 * Device suspend routine. Stop the interface and save some PCI
3785 * settings in case the BIOS doesn't restore them properly on
3786 * resume.
3787 */
3788 static int
3789 dc_suspend(device_t dev)
3790 {
3791 struct dc_softc *sc;
3792
3793 sc = device_get_softc(dev);
3794 DC_LOCK(sc);
3795 dc_stop(sc);
3796 sc->suspended = 1;
3797 DC_UNLOCK(sc);
3798
3799 return (0);
3800 }
3801
3802 /*
3803 * Device resume routine. Restore some PCI settings in case the BIOS
3804 * doesn't, re-enable busmastering, and restart the interface if
3805 * appropriate.
3806 */
3807 static int
3808 dc_resume(device_t dev)
3809 {
3810 struct dc_softc *sc;
3811 struct ifnet *ifp;
3812
3813 sc = device_get_softc(dev);
3814 ifp = sc->dc_ifp;
3815
3816 /* reinitialize interface if necessary */
3817 DC_LOCK(sc);
3818 if (ifp->if_flags & IFF_UP)
3819 dc_init_locked(sc);
3820
3821 sc->suspended = 0;
3822 DC_UNLOCK(sc);
3823
3824 return (0);
3825 }
3826
3827 /*
3828 * Stop all chip I/O so that the kernel's probe routines don't
3829 * get confused by errant DMAs when rebooting.
3830 */
3831 static int
3832 dc_shutdown(device_t dev)
3833 {
3834 struct dc_softc *sc;
3835
3836 sc = device_get_softc(dev);
3837
3838 DC_LOCK(sc);
3839 dc_stop(sc);
3840 DC_UNLOCK(sc);
3841
3842 return (0);
3843 }
Cache object: 65598c14441edbe67c10667df73ab8a0
|