FreeBSD/Linux Kernel Cross Reference
sys/pci/if_dc.c
1 /*-
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 /*
37 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
38 * series chips and several workalikes including the following:
39 *
40 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
41 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
42 * Lite-On 82c168/82c169 PNIC (www.litecom.com)
43 * ASIX Electronics AX88140A (www.asix.com.tw)
44 * ASIX Electronics AX88141 (www.asix.com.tw)
45 * ADMtek AL981 (www.admtek.com.tw)
46 * ADMtek AN985 (www.admtek.com.tw)
47 * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek AN985
48 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49 * Accton EN1217 (www.accton.com)
50 * Xircom X3201 (www.xircom.com)
51 * Abocom FE2500
52 * Conexant LANfinity (www.conexant.com)
53 * 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com)
54 *
55 * Datasheets for the 21143 are available at developer.intel.com.
56 * Datasheets for the clone parts can be found at their respective sites.
57 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
58 * The PNIC II is essentially a Macronix 98715A chip; the only difference
59 * worth noting is that its multicast hash table is only 128 bits wide
60 * instead of 512.
61 *
62 * Written by Bill Paul <wpaul@ee.columbia.edu>
63 * Electrical Engineering Department
64 * Columbia University, New York City
65 */
66 /*
67 * The Intel 21143 is the successor to the DEC 21140. It is basically
68 * the same as the 21140 but with a few new features. The 21143 supports
69 * three kinds of media attachments:
70 *
71 * o MII port, for 10Mbps and 100Mbps support and NWAY
72 * autonegotiation provided by an external PHY.
73 * o SYM port, for symbol mode 100Mbps support.
74 * o 10baseT port.
75 * o AUI/BNC port.
76 *
77 * The 100Mbps SYM port and 10baseT port can be used together in
78 * combination with the internal NWAY support to create a 10/100
79 * autosensing configuration.
80 *
81 * Note that not all tulip workalikes are handled in this driver: we only
82 * deal with those which are relatively well behaved. The Winbond is
83 * handled separately due to its different register offsets and the
84 * special handling needed for its various bugs. The PNIC is handled
85 * here, but I'm not thrilled about it.
86 *
87 * All of the workalike chips use some form of MII transceiver support
88 * with the exception of the Macronix chips, which also have a SYM port.
89 * The ASIX AX88140A is also documented to have a SYM port, but all
90 * the cards I've seen use an MII transceiver, probably because the
91 * AX88140A doesn't support internal NWAY.
92 */
93
94 #include <sys/param.h>
95 #include <sys/endian.h>
96 #include <sys/systm.h>
97 #include <sys/sockio.h>
98 #include <sys/mbuf.h>
99 #include <sys/malloc.h>
100 #include <sys/kernel.h>
101 #include <sys/module.h>
102 #include <sys/socket.h>
103 #include <sys/sysctl.h>
104
105 #include <net/if.h>
106 #include <net/if_arp.h>
107 #include <net/ethernet.h>
108 #include <net/if_dl.h>
109 #include <net/if_media.h>
110 #include <net/if_types.h>
111 #include <net/if_vlan_var.h>
112
113 #include <net/bpf.h>
114
115 #include <machine/bus_pio.h>
116 #include <machine/bus_memio.h>
117 #include <machine/bus.h>
118 #include <machine/resource.h>
119 #include <sys/bus.h>
120 #include <sys/rman.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124
125 #include <dev/pci/pcireg.h>
126 #include <dev/pci/pcivar.h>
127
128 #define DC_USEIOSPACE
129 #ifdef __alpha__
130 #define SRM_MEDIA
131 #endif
132
133 #include <pci/if_dcreg.h>
134
135 #ifdef __sparc64__
136 #include <dev/ofw/openfirm.h>
137 #include <machine/ofw_machdep.h>
138 #endif
139
140 MODULE_DEPEND(dc, pci, 1, 1, 1);
141 MODULE_DEPEND(dc, ether, 1, 1, 1);
142 MODULE_DEPEND(dc, miibus, 1, 1, 1);
143
144 /* "controller miibus0" required. See GENERIC if you get errors here. */
145 #include "miibus_if.h"
146
147 /*
148 * Various supported device vendors/types and their names.
149 */
150 static struct dc_type dc_devs[] = {
151 { DC_VENDORID_DEC, DC_DEVICEID_21143,
152 "Intel 21143 10/100BaseTX" },
153 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009,
154 "Davicom DM9009 10/100BaseTX" },
155 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100,
156 "Davicom DM9100 10/100BaseTX" },
157 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102,
158 "Davicom DM9102 10/100BaseTX" },
159 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102,
160 "Davicom DM9102A 10/100BaseTX" },
161 { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981,
162 "ADMtek AL981 10/100BaseTX" },
163 { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985,
164 "ADMtek AN985 10/100BaseTX" },
165 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511,
166 "ADMtek ADM9511 10/100BaseTX" },
167 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513,
168 "ADMtek ADM9513 10/100BaseTX" },
169 { DC_VENDORID_ADMTEK, DC_DEVICEID_FA511,
170 "Netgear FA511 10/100BaseTX" },
171 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A,
172 "ASIX AX88140A 10/100BaseTX" },
173 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A,
174 "ASIX AX88141 10/100BaseTX" },
175 { DC_VENDORID_MX, DC_DEVICEID_98713,
176 "Macronix 98713 10/100BaseTX" },
177 { DC_VENDORID_MX, DC_DEVICEID_98713,
178 "Macronix 98713A 10/100BaseTX" },
179 { DC_VENDORID_CP, DC_DEVICEID_98713_CP,
180 "Compex RL100-TX 10/100BaseTX" },
181 { DC_VENDORID_CP, DC_DEVICEID_98713_CP,
182 "Compex RL100-TX 10/100BaseTX" },
183 { DC_VENDORID_MX, DC_DEVICEID_987x5,
184 "Macronix 98715/98715A 10/100BaseTX" },
185 { DC_VENDORID_MX, DC_DEVICEID_987x5,
186 "Macronix 98715AEC-C 10/100BaseTX" },
187 { DC_VENDORID_MX, DC_DEVICEID_987x5,
188 "Macronix 98725 10/100BaseTX" },
189 { DC_VENDORID_MX, DC_DEVICEID_98727,
190 "Macronix 98727/98732 10/100BaseTX" },
191 { DC_VENDORID_LO, DC_DEVICEID_82C115,
192 "LC82C115 PNIC II 10/100BaseTX" },
193 { DC_VENDORID_LO, DC_DEVICEID_82C168,
194 "82c168 PNIC 10/100BaseTX" },
195 { DC_VENDORID_LO, DC_DEVICEID_82C168,
196 "82c169 PNIC 10/100BaseTX" },
197 { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217,
198 "Accton EN1217 10/100BaseTX" },
199 { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242,
200 "Accton EN2242 MiniPCI 10/100BaseTX" },
201 { DC_VENDORID_XIRCOM, DC_DEVICEID_X3201,
202 "Xircom X3201 10/100BaseTX" },
203 { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500,
204 "Abocom FE2500 10/100BaseTX" },
205 { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX,
206 "Abocom FE2500MX 10/100BaseTX" },
207 { DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112,
208 "Conexant LANfinity MiniPCI 10/100BaseTX" },
209 { DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX,
210 "Hawking CB102 CardBus 10/100" },
211 { DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T,
212 "PlaneX FNW-3602-T CardBus 10/100" },
213 { DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB,
214 "3Com OfficeConnect 10/100B" },
215 { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120,
216 "Microsoft MN-120 CardBus 10/100" },
217 { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130,
218 "Microsoft MN-130 10/100" },
219 { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130_FAKE,
220 "Microsoft MN-130 10/100" },
221 { 0, 0, NULL }
222 };
223
224 static int dc_probe(device_t);
225 static int dc_attach(device_t);
226 static int dc_detach(device_t);
227 static int dc_suspend(device_t);
228 static int dc_resume(device_t);
229 static struct dc_type *dc_devtype(device_t);
230 static int dc_newbuf(struct dc_softc *, int, int);
231 static int dc_encap(struct dc_softc *, struct mbuf **);
232 static void dc_pnic_rx_bug_war(struct dc_softc *, int);
233 static int dc_rx_resync(struct dc_softc *);
234 static void dc_rxeof(struct dc_softc *);
235 static void dc_txeof(struct dc_softc *);
236 static void dc_tick(void *);
237 static void dc_tx_underrun(struct dc_softc *);
238 static void dc_intr(void *);
239 static void dc_start(struct ifnet *);
240 static int dc_ioctl(struct ifnet *, u_long, caddr_t);
241 static void dc_init(void *);
242 static void dc_stop(struct dc_softc *);
243 static void dc_watchdog(struct ifnet *);
244 static void dc_shutdown(device_t);
245 static int dc_ifmedia_upd(struct ifnet *);
246 static void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
247
248 static void dc_delay(struct dc_softc *);
249 static void dc_eeprom_idle(struct dc_softc *);
250 static void dc_eeprom_putbyte(struct dc_softc *, int);
251 static void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
252 static void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
253 static void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *);
254 static void dc_eeprom_width(struct dc_softc *);
255 static void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
256
257 static void dc_mii_writebit(struct dc_softc *, int);
258 static int dc_mii_readbit(struct dc_softc *);
259 static void dc_mii_sync(struct dc_softc *);
260 static void dc_mii_send(struct dc_softc *, u_int32_t, int);
261 static int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
262 static int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
263 static int dc_miibus_readreg(device_t, int, int);
264 static int dc_miibus_writereg(device_t, int, int, int);
265 static void dc_miibus_statchg(device_t);
266 static void dc_miibus_mediainit(device_t);
267
268 static void dc_setcfg(struct dc_softc *, int);
269 static uint32_t dc_mchash_le(struct dc_softc *, const uint8_t *);
270 static uint32_t dc_mchash_be(const uint8_t *);
271 static void dc_setfilt_21143(struct dc_softc *);
272 static void dc_setfilt_asix(struct dc_softc *);
273 static void dc_setfilt_admtek(struct dc_softc *);
274 static void dc_setfilt_xircom(struct dc_softc *);
275
276 static void dc_setfilt(struct dc_softc *);
277
278 static void dc_reset(struct dc_softc *);
279 static int dc_list_rx_init(struct dc_softc *);
280 static int dc_list_tx_init(struct dc_softc *);
281
282 static void dc_read_srom(struct dc_softc *, int);
283 static void dc_parse_21143_srom(struct dc_softc *);
284 static void dc_decode_leaf_sia(struct dc_softc *, struct dc_eblock_sia *);
285 static void dc_decode_leaf_mii(struct dc_softc *, struct dc_eblock_mii *);
286 static void dc_decode_leaf_sym(struct dc_softc *, struct dc_eblock_sym *);
287 static void dc_apply_fixup(struct dc_softc *, int);
288
289 static void dc_dma_map_txbuf(void *, bus_dma_segment_t *, int, bus_size_t, int);
290 static void dc_dma_map_rxbuf(void *, bus_dma_segment_t *, int, bus_size_t, int);
291
292 #ifdef DC_USEIOSPACE
293 #define DC_RES SYS_RES_IOPORT
294 #define DC_RID DC_PCI_CFBIO
295 #else
296 #define DC_RES SYS_RES_MEMORY
297 #define DC_RID DC_PCI_CFBMA
298 #endif
299
300 static device_method_t dc_methods[] = {
301 /* Device interface */
302 DEVMETHOD(device_probe, dc_probe),
303 DEVMETHOD(device_attach, dc_attach),
304 DEVMETHOD(device_detach, dc_detach),
305 DEVMETHOD(device_suspend, dc_suspend),
306 DEVMETHOD(device_resume, dc_resume),
307 DEVMETHOD(device_shutdown, dc_shutdown),
308
309 /* bus interface */
310 DEVMETHOD(bus_print_child, bus_generic_print_child),
311 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
312
313 /* MII interface */
314 DEVMETHOD(miibus_readreg, dc_miibus_readreg),
315 DEVMETHOD(miibus_writereg, dc_miibus_writereg),
316 DEVMETHOD(miibus_statchg, dc_miibus_statchg),
317 DEVMETHOD(miibus_mediainit, dc_miibus_mediainit),
318
319 { 0, 0 }
320 };
321
322 static driver_t dc_driver = {
323 "dc",
324 dc_methods,
325 sizeof(struct dc_softc)
326 };
327
328 static devclass_t dc_devclass;
329 #ifdef __i386__
330 static int dc_quick = 1;
331 SYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, &dc_quick, 0,
332 "do not m_devget() in dc driver");
333 #endif
334
335 DRIVER_MODULE(dc, cardbus, dc_driver, dc_devclass, 0, 0);
336 DRIVER_MODULE(dc, pci, dc_driver, dc_devclass, 0, 0);
337 DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0);
338
339 #define DC_SETBIT(sc, reg, x) \
340 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
341
342 #define DC_CLRBIT(sc, reg, x) \
343 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
344
345 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x))
346 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x))
347
348 #define IS_MPSAFE 0
349
350 static void
351 dc_delay(struct dc_softc *sc)
352 {
353 int idx;
354
355 for (idx = (300 / 33) + 1; idx > 0; idx--)
356 CSR_READ_4(sc, DC_BUSCTL);
357 }
358
359 static void
360 dc_eeprom_width(struct dc_softc *sc)
361 {
362 int i;
363
364 /* Force EEPROM to idle state. */
365 dc_eeprom_idle(sc);
366
367 /* Enter EEPROM access mode. */
368 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
369 dc_delay(sc);
370 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
371 dc_delay(sc);
372 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
373 dc_delay(sc);
374 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
375 dc_delay(sc);
376
377 for (i = 3; i--;) {
378 if (6 & (1 << i))
379 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
380 else
381 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
382 dc_delay(sc);
383 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
384 dc_delay(sc);
385 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
386 dc_delay(sc);
387 }
388
389 for (i = 1; i <= 12; i++) {
390 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
391 dc_delay(sc);
392 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
393 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
394 dc_delay(sc);
395 break;
396 }
397 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
398 dc_delay(sc);
399 }
400
401 /* Turn off EEPROM access mode. */
402 dc_eeprom_idle(sc);
403
404 if (i < 4 || i > 12)
405 sc->dc_romwidth = 6;
406 else
407 sc->dc_romwidth = i;
408
409 /* Enter EEPROM access mode. */
410 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
411 dc_delay(sc);
412 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
413 dc_delay(sc);
414 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
415 dc_delay(sc);
416 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
417 dc_delay(sc);
418
419 /* Turn off EEPROM access mode. */
420 dc_eeprom_idle(sc);
421 }
422
423 static void
424 dc_eeprom_idle(struct dc_softc *sc)
425 {
426 int i;
427
428 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
429 dc_delay(sc);
430 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
431 dc_delay(sc);
432 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
433 dc_delay(sc);
434 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
435 dc_delay(sc);
436
437 for (i = 0; i < 25; i++) {
438 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
439 dc_delay(sc);
440 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
441 dc_delay(sc);
442 }
443
444 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
445 dc_delay(sc);
446 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
447 dc_delay(sc);
448 CSR_WRITE_4(sc, DC_SIO, 0x00000000);
449 }
450
451 /*
452 * Send a read command and address to the EEPROM, check for ACK.
453 */
454 static void
455 dc_eeprom_putbyte(struct dc_softc *sc, int addr)
456 {
457 int d, i;
458
459 d = DC_EECMD_READ >> 6;
460 for (i = 3; i--; ) {
461 if (d & (1 << i))
462 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
463 else
464 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
465 dc_delay(sc);
466 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
467 dc_delay(sc);
468 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
469 dc_delay(sc);
470 }
471
472 /*
473 * Feed in each bit and strobe the clock.
474 */
475 for (i = sc->dc_romwidth; i--;) {
476 if (addr & (1 << i)) {
477 SIO_SET(DC_SIO_EE_DATAIN);
478 } else {
479 SIO_CLR(DC_SIO_EE_DATAIN);
480 }
481 dc_delay(sc);
482 SIO_SET(DC_SIO_EE_CLK);
483 dc_delay(sc);
484 SIO_CLR(DC_SIO_EE_CLK);
485 dc_delay(sc);
486 }
487 }
488
489 /*
490 * Read a word of data stored in the EEPROM at address 'addr.'
491 * The PNIC 82c168/82c169 has its own non-standard way to read
492 * the EEPROM.
493 */
494 static void
495 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest)
496 {
497 int i;
498 u_int32_t r;
499
500 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr);
501
502 for (i = 0; i < DC_TIMEOUT; i++) {
503 DELAY(1);
504 r = CSR_READ_4(sc, DC_SIO);
505 if (!(r & DC_PN_SIOCTL_BUSY)) {
506 *dest = (u_int16_t)(r & 0xFFFF);
507 return;
508 }
509 }
510 }
511
512 /*
513 * Read a word of data stored in the EEPROM at address 'addr.'
514 * The Xircom X3201 has its own non-standard way to read
515 * the EEPROM, too.
516 */
517 static void
518 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest)
519 {
520
521 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
522
523 addr *= 2;
524 CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
525 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
526 addr += 1;
527 CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
528 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
529
530 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
531 }
532
533 /*
534 * Read a word of data stored in the EEPROM at address 'addr.'
535 */
536 static void
537 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest)
538 {
539 int i;
540 u_int16_t word = 0;
541
542 /* Force EEPROM to idle state. */
543 dc_eeprom_idle(sc);
544
545 /* Enter EEPROM access mode. */
546 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
547 dc_delay(sc);
548 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
549 dc_delay(sc);
550 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
551 dc_delay(sc);
552 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
553 dc_delay(sc);
554
555 /*
556 * Send address of word we want to read.
557 */
558 dc_eeprom_putbyte(sc, addr);
559
560 /*
561 * Start reading bits from EEPROM.
562 */
563 for (i = 0x8000; i; i >>= 1) {
564 SIO_SET(DC_SIO_EE_CLK);
565 dc_delay(sc);
566 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
567 word |= i;
568 dc_delay(sc);
569 SIO_CLR(DC_SIO_EE_CLK);
570 dc_delay(sc);
571 }
572
573 /* Turn off EEPROM access mode. */
574 dc_eeprom_idle(sc);
575
576 *dest = word;
577 }
578
579 /*
580 * Read a sequence of words from the EEPROM.
581 */
582 static void
583 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int be)
584 {
585 int i;
586 u_int16_t word = 0, *ptr;
587
588 for (i = 0; i < cnt; i++) {
589 if (DC_IS_PNIC(sc))
590 dc_eeprom_getword_pnic(sc, off + i, &word);
591 else if (DC_IS_XIRCOM(sc))
592 dc_eeprom_getword_xircom(sc, off + i, &word);
593 else
594 dc_eeprom_getword(sc, off + i, &word);
595 ptr = (u_int16_t *)(dest + (i * 2));
596 if (be)
597 *ptr = be16toh(word);
598 else
599 *ptr = le16toh(word);
600 }
601 }
602
603 /*
604 * The following two routines are taken from the Macronix 98713
605 * Application Notes pp.19-21.
606 */
607 /*
608 * Write a bit to the MII bus.
609 */
610 static void
611 dc_mii_writebit(struct dc_softc *sc, int bit)
612 {
613
614 if (bit)
615 CSR_WRITE_4(sc, DC_SIO,
616 DC_SIO_ROMCTL_WRITE | DC_SIO_MII_DATAOUT);
617 else
618 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
619
620 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
621 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
622 }
623
624 /*
625 * Read a bit from the MII bus.
626 */
627 static int
628 dc_mii_readbit(struct dc_softc *sc)
629 {
630
631 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ | DC_SIO_MII_DIR);
632 CSR_READ_4(sc, DC_SIO);
633 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
634 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
635 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
636 return (1);
637
638 return (0);
639 }
640
641 /*
642 * Sync the PHYs by setting data bit and strobing the clock 32 times.
643 */
644 static void
645 dc_mii_sync(struct dc_softc *sc)
646 {
647 int i;
648
649 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
650
651 for (i = 0; i < 32; i++)
652 dc_mii_writebit(sc, 1);
653 }
654
655 /*
656 * Clock a series of bits through the MII.
657 */
658 static void
659 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt)
660 {
661 int i;
662
663 for (i = (0x1 << (cnt - 1)); i; i >>= 1)
664 dc_mii_writebit(sc, bits & i);
665 }
666
667 /*
668 * Read an PHY register through the MII.
669 */
670 static int
671 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame)
672 {
673 int i, ack;
674
675 DC_LOCK(sc);
676
677 /*
678 * Set up frame for RX.
679 */
680 frame->mii_stdelim = DC_MII_STARTDELIM;
681 frame->mii_opcode = DC_MII_READOP;
682 frame->mii_turnaround = 0;
683 frame->mii_data = 0;
684
685 /*
686 * Sync the PHYs.
687 */
688 dc_mii_sync(sc);
689
690 /*
691 * Send command/address info.
692 */
693 dc_mii_send(sc, frame->mii_stdelim, 2);
694 dc_mii_send(sc, frame->mii_opcode, 2);
695 dc_mii_send(sc, frame->mii_phyaddr, 5);
696 dc_mii_send(sc, frame->mii_regaddr, 5);
697
698 #ifdef notdef
699 /* Idle bit */
700 dc_mii_writebit(sc, 1);
701 dc_mii_writebit(sc, 0);
702 #endif
703
704 /* Check for ack. */
705 ack = dc_mii_readbit(sc);
706
707 /*
708 * Now try reading data bits. If the ack failed, we still
709 * need to clock through 16 cycles to keep the PHY(s) in sync.
710 */
711 if (ack) {
712 for (i = 0; i < 16; i++)
713 dc_mii_readbit(sc);
714 goto fail;
715 }
716
717 for (i = 0x8000; i; i >>= 1) {
718 if (!ack) {
719 if (dc_mii_readbit(sc))
720 frame->mii_data |= i;
721 }
722 }
723
724 fail:
725
726 dc_mii_writebit(sc, 0);
727 dc_mii_writebit(sc, 0);
728
729 DC_UNLOCK(sc);
730
731 if (ack)
732 return (1);
733 return (0);
734 }
735
736 /*
737 * Write to a PHY register through the MII.
738 */
739 static int
740 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame)
741 {
742
743 DC_LOCK(sc);
744 /*
745 * Set up frame for TX.
746 */
747
748 frame->mii_stdelim = DC_MII_STARTDELIM;
749 frame->mii_opcode = DC_MII_WRITEOP;
750 frame->mii_turnaround = DC_MII_TURNAROUND;
751
752 /*
753 * Sync the PHYs.
754 */
755 dc_mii_sync(sc);
756
757 dc_mii_send(sc, frame->mii_stdelim, 2);
758 dc_mii_send(sc, frame->mii_opcode, 2);
759 dc_mii_send(sc, frame->mii_phyaddr, 5);
760 dc_mii_send(sc, frame->mii_regaddr, 5);
761 dc_mii_send(sc, frame->mii_turnaround, 2);
762 dc_mii_send(sc, frame->mii_data, 16);
763
764 /* Idle bit. */
765 dc_mii_writebit(sc, 0);
766 dc_mii_writebit(sc, 0);
767
768 DC_UNLOCK(sc);
769
770 return (0);
771 }
772
773 static int
774 dc_miibus_readreg(device_t dev, int phy, int reg)
775 {
776 struct dc_mii_frame frame;
777 struct dc_softc *sc;
778 int i, rval, phy_reg = 0;
779
780 sc = device_get_softc(dev);
781 bzero(&frame, sizeof(frame));
782
783 /*
784 * Note: both the AL981 and AN985 have internal PHYs,
785 * however the AL981 provides direct access to the PHY
786 * registers while the AN985 uses a serial MII interface.
787 * The AN985's MII interface is also buggy in that you
788 * can read from any MII address (0 to 31), but only address 1
789 * behaves normally. To deal with both cases, we pretend
790 * that the PHY is at MII address 1.
791 */
792 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
793 return (0);
794
795 /*
796 * Note: the ukphy probes of the RS7112 report a PHY at
797 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
798 * so we only respond to correct one.
799 */
800 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
801 return (0);
802
803 if (sc->dc_pmode != DC_PMODE_MII) {
804 if (phy == (MII_NPHY - 1)) {
805 switch (reg) {
806 case MII_BMSR:
807 /*
808 * Fake something to make the probe
809 * code think there's a PHY here.
810 */
811 return (BMSR_MEDIAMASK);
812 break;
813 case MII_PHYIDR1:
814 if (DC_IS_PNIC(sc))
815 return (DC_VENDORID_LO);
816 return (DC_VENDORID_DEC);
817 break;
818 case MII_PHYIDR2:
819 if (DC_IS_PNIC(sc))
820 return (DC_DEVICEID_82C168);
821 return (DC_DEVICEID_21143);
822 break;
823 default:
824 return (0);
825 break;
826 }
827 } else
828 return (0);
829 }
830
831 if (DC_IS_PNIC(sc)) {
832 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
833 (phy << 23) | (reg << 18));
834 for (i = 0; i < DC_TIMEOUT; i++) {
835 DELAY(1);
836 rval = CSR_READ_4(sc, DC_PN_MII);
837 if (!(rval & DC_PN_MII_BUSY)) {
838 rval &= 0xFFFF;
839 return (rval == 0xFFFF ? 0 : rval);
840 }
841 }
842 return (0);
843 }
844
845 if (DC_IS_COMET(sc)) {
846 switch (reg) {
847 case MII_BMCR:
848 phy_reg = DC_AL_BMCR;
849 break;
850 case MII_BMSR:
851 phy_reg = DC_AL_BMSR;
852 break;
853 case MII_PHYIDR1:
854 phy_reg = DC_AL_VENID;
855 break;
856 case MII_PHYIDR2:
857 phy_reg = DC_AL_DEVID;
858 break;
859 case MII_ANAR:
860 phy_reg = DC_AL_ANAR;
861 break;
862 case MII_ANLPAR:
863 phy_reg = DC_AL_LPAR;
864 break;
865 case MII_ANER:
866 phy_reg = DC_AL_ANER;
867 break;
868 default:
869 printf("dc%d: phy_read: bad phy register %x\n",
870 sc->dc_unit, reg);
871 return (0);
872 break;
873 }
874
875 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
876
877 if (rval == 0xFFFF)
878 return (0);
879 return (rval);
880 }
881
882 frame.mii_phyaddr = phy;
883 frame.mii_regaddr = reg;
884 if (sc->dc_type == DC_TYPE_98713) {
885 phy_reg = CSR_READ_4(sc, DC_NETCFG);
886 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
887 }
888 dc_mii_readreg(sc, &frame);
889 if (sc->dc_type == DC_TYPE_98713)
890 CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
891
892 return (frame.mii_data);
893 }
894
895 static int
896 dc_miibus_writereg(device_t dev, int phy, int reg, int data)
897 {
898 struct dc_softc *sc;
899 struct dc_mii_frame frame;
900 int i, phy_reg = 0;
901
902 sc = device_get_softc(dev);
903 bzero(&frame, sizeof(frame));
904
905 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
906 return (0);
907
908 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
909 return (0);
910
911 if (DC_IS_PNIC(sc)) {
912 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
913 (phy << 23) | (reg << 10) | data);
914 for (i = 0; i < DC_TIMEOUT; i++) {
915 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
916 break;
917 }
918 return (0);
919 }
920
921 if (DC_IS_COMET(sc)) {
922 switch (reg) {
923 case MII_BMCR:
924 phy_reg = DC_AL_BMCR;
925 break;
926 case MII_BMSR:
927 phy_reg = DC_AL_BMSR;
928 break;
929 case MII_PHYIDR1:
930 phy_reg = DC_AL_VENID;
931 break;
932 case MII_PHYIDR2:
933 phy_reg = DC_AL_DEVID;
934 break;
935 case MII_ANAR:
936 phy_reg = DC_AL_ANAR;
937 break;
938 case MII_ANLPAR:
939 phy_reg = DC_AL_LPAR;
940 break;
941 case MII_ANER:
942 phy_reg = DC_AL_ANER;
943 break;
944 default:
945 printf("dc%d: phy_write: bad phy register %x\n",
946 sc->dc_unit, reg);
947 return (0);
948 break;
949 }
950
951 CSR_WRITE_4(sc, phy_reg, data);
952 return (0);
953 }
954
955 frame.mii_phyaddr = phy;
956 frame.mii_regaddr = reg;
957 frame.mii_data = data;
958
959 if (sc->dc_type == DC_TYPE_98713) {
960 phy_reg = CSR_READ_4(sc, DC_NETCFG);
961 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
962 }
963 dc_mii_writereg(sc, &frame);
964 if (sc->dc_type == DC_TYPE_98713)
965 CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
966
967 return (0);
968 }
969
970 static void
971 dc_miibus_statchg(device_t dev)
972 {
973 struct dc_softc *sc;
974 struct mii_data *mii;
975 struct ifmedia *ifm;
976
977 sc = device_get_softc(dev);
978 if (DC_IS_ADMTEK(sc))
979 return;
980
981 mii = device_get_softc(sc->dc_miibus);
982 ifm = &mii->mii_media;
983 if (DC_IS_DAVICOM(sc) &&
984 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
985 dc_setcfg(sc, ifm->ifm_media);
986 sc->dc_if_media = ifm->ifm_media;
987 } else {
988 dc_setcfg(sc, mii->mii_media_active);
989 sc->dc_if_media = mii->mii_media_active;
990 }
991 }
992
993 /*
994 * Special support for DM9102A cards with HomePNA PHYs. Note:
995 * with the Davicom DM9102A/DM9801 eval board that I have, it seems
996 * to be impossible to talk to the management interface of the DM9801
997 * PHY (its MDIO pin is not connected to anything). Consequently,
998 * the driver has to just 'know' about the additional mode and deal
999 * with it itself. *sigh*
1000 */
1001 static void
1002 dc_miibus_mediainit(device_t dev)
1003 {
1004 struct dc_softc *sc;
1005 struct mii_data *mii;
1006 struct ifmedia *ifm;
1007 int rev;
1008
1009 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF;
1010
1011 sc = device_get_softc(dev);
1012 mii = device_get_softc(sc->dc_miibus);
1013 ifm = &mii->mii_media;
1014
1015 if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A)
1016 ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL);
1017 }
1018
1019 #define DC_BITS_512 9
1020 #define DC_BITS_128 7
1021 #define DC_BITS_64 6
1022
1023 static uint32_t
1024 dc_mchash_le(struct dc_softc *sc, const uint8_t *addr)
1025 {
1026 uint32_t crc;
1027
1028 /* Compute CRC for the address value. */
1029 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
1030
1031 /*
1032 * The hash table on the PNIC II and the MX98715AEC-C/D/E
1033 * chips is only 128 bits wide.
1034 */
1035 if (sc->dc_flags & DC_128BIT_HASH)
1036 return (crc & ((1 << DC_BITS_128) - 1));
1037
1038 /* The hash table on the MX98715BEC is only 64 bits wide. */
1039 if (sc->dc_flags & DC_64BIT_HASH)
1040 return (crc & ((1 << DC_BITS_64) - 1));
1041
1042 /* Xircom's hash filtering table is different (read: weird) */
1043 /* Xircom uses the LEAST significant bits */
1044 if (DC_IS_XIRCOM(sc)) {
1045 if ((crc & 0x180) == 0x180)
1046 return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4));
1047 else
1048 return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 +
1049 (12 << 4));
1050 }
1051
1052 return (crc & ((1 << DC_BITS_512) - 1));
1053 }
1054
1055 /*
1056 * Calculate CRC of a multicast group address, return the lower 6 bits.
1057 */
1058 static uint32_t
1059 dc_mchash_be(const uint8_t *addr)
1060 {
1061 uint32_t crc;
1062
1063 /* Compute CRC for the address value. */
1064 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
1065
1066 /* Return the filter bit position. */
1067 return ((crc >> 26) & 0x0000003F);
1068 }
1069
1070 /*
1071 * 21143-style RX filter setup routine. Filter programming is done by
1072 * downloading a special setup frame into the TX engine. 21143, Macronix,
1073 * PNIC, PNIC II and Davicom chips are programmed this way.
1074 *
1075 * We always program the chip using 'hash perfect' mode, i.e. one perfect
1076 * address (our node address) and a 512-bit hash filter for multicast
1077 * frames. We also sneak the broadcast address into the hash filter since
1078 * we need that too.
1079 */
1080 static void
1081 dc_setfilt_21143(struct dc_softc *sc)
1082 {
1083 struct dc_desc *sframe;
1084 u_int32_t h, *sp;
1085 struct ifmultiaddr *ifma;
1086 struct ifnet *ifp;
1087 int i;
1088
1089 ifp = &sc->arpcom.ac_if;
1090
1091 i = sc->dc_cdata.dc_tx_prod;
1092 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1093 sc->dc_cdata.dc_tx_cnt++;
1094 sframe = &sc->dc_ldata->dc_tx_list[i];
1095 sp = sc->dc_cdata.dc_sbuf;
1096 bzero(sp, DC_SFRAME_LEN);
1097
1098 sframe->dc_data = htole32(sc->dc_saddr);
1099 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1100 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1101
1102 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
1103
1104 /* If we want promiscuous mode, set the allframes bit. */
1105 if (ifp->if_flags & IFF_PROMISC)
1106 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1107 else
1108 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1109
1110 if (ifp->if_flags & IFF_ALLMULTI)
1111 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1112 else
1113 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1114
1115 IF_ADDR_LOCK(ifp);
1116 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1117 if (ifma->ifma_addr->sa_family != AF_LINK)
1118 continue;
1119 h = dc_mchash_le(sc,
1120 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1121 sp[h >> 4] |= htole32(1 << (h & 0xF));
1122 }
1123 IF_ADDR_UNLOCK(ifp);
1124
1125 if (ifp->if_flags & IFF_BROADCAST) {
1126 h = dc_mchash_le(sc, ifp->if_broadcastaddr);
1127 sp[h >> 4] |= htole32(1 << (h & 0xF));
1128 }
1129
1130 /* Set our MAC address */
1131 sp[39] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1132 sp[40] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1133 sp[41] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1134
1135 sframe->dc_status = htole32(DC_TXSTAT_OWN);
1136 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1137
1138 /*
1139 * The PNIC takes an exceedingly long time to process its
1140 * setup frame; wait 10ms after posting the setup frame
1141 * before proceeding, just so it has time to swallow its
1142 * medicine.
1143 */
1144 DELAY(10000);
1145
1146 ifp->if_timer = 5;
1147 }
1148
1149 static void
1150 dc_setfilt_admtek(struct dc_softc *sc)
1151 {
1152 struct ifnet *ifp;
1153 struct ifmultiaddr *ifma;
1154 int h = 0;
1155 u_int32_t hashes[2] = { 0, 0 };
1156
1157 ifp = &sc->arpcom.ac_if;
1158
1159 /* Init our MAC address. */
1160 CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
1161 CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
1162
1163 /* If we want promiscuous mode, set the allframes bit. */
1164 if (ifp->if_flags & IFF_PROMISC)
1165 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1166 else
1167 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1168
1169 if (ifp->if_flags & IFF_ALLMULTI)
1170 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1171 else
1172 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1173
1174 /* First, zot all the existing hash bits. */
1175 CSR_WRITE_4(sc, DC_AL_MAR0, 0);
1176 CSR_WRITE_4(sc, DC_AL_MAR1, 0);
1177
1178 /*
1179 * If we're already in promisc or allmulti mode, we
1180 * don't have to bother programming the multicast filter.
1181 */
1182 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))
1183 return;
1184
1185 /* Now program new ones. */
1186 IF_ADDR_LOCK(ifp);
1187 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1188 if (ifma->ifma_addr->sa_family != AF_LINK)
1189 continue;
1190 if (DC_IS_CENTAUR(sc))
1191 h = dc_mchash_le(sc,
1192 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1193 else
1194 h = dc_mchash_be(
1195 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1196 if (h < 32)
1197 hashes[0] |= (1 << h);
1198 else
1199 hashes[1] |= (1 << (h - 32));
1200 }
1201 IF_ADDR_UNLOCK(ifp);
1202
1203 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1204 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1205 }
1206
1207 static void
1208 dc_setfilt_asix(struct dc_softc *sc)
1209 {
1210 struct ifnet *ifp;
1211 struct ifmultiaddr *ifma;
1212 int h = 0;
1213 u_int32_t hashes[2] = { 0, 0 };
1214
1215 ifp = &sc->arpcom.ac_if;
1216
1217 /* Init our MAC address */
1218 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1219 CSR_WRITE_4(sc, DC_AX_FILTDATA,
1220 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
1221 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1222 CSR_WRITE_4(sc, DC_AX_FILTDATA,
1223 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
1224
1225 /* If we want promiscuous mode, set the allframes bit. */
1226 if (ifp->if_flags & IFF_PROMISC)
1227 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1228 else
1229 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1230
1231 if (ifp->if_flags & IFF_ALLMULTI)
1232 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1233 else
1234 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1235
1236 /*
1237 * The ASIX chip has a special bit to enable reception
1238 * of broadcast frames.
1239 */
1240 if (ifp->if_flags & IFF_BROADCAST)
1241 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1242 else
1243 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1244
1245 /* first, zot all the existing hash bits */
1246 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1247 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1248 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1249 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1250
1251 /*
1252 * If we're already in promisc or allmulti mode, we
1253 * don't have to bother programming the multicast filter.
1254 */
1255 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))
1256 return;
1257
1258 /* now program new ones */
1259 IF_ADDR_LOCK(ifp);
1260 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1261 if (ifma->ifma_addr->sa_family != AF_LINK)
1262 continue;
1263 h = dc_mchash_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1264 if (h < 32)
1265 hashes[0] |= (1 << h);
1266 else
1267 hashes[1] |= (1 << (h - 32));
1268 }
1269 IF_ADDR_UNLOCK(ifp);
1270
1271 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1272 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1273 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1274 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1275 }
1276
1277 static void
1278 dc_setfilt_xircom(struct dc_softc *sc)
1279 {
1280 struct ifnet *ifp;
1281 struct ifmultiaddr *ifma;
1282 struct dc_desc *sframe;
1283 u_int32_t h, *sp;
1284 int i;
1285
1286 ifp = &sc->arpcom.ac_if;
1287 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
1288
1289 i = sc->dc_cdata.dc_tx_prod;
1290 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1291 sc->dc_cdata.dc_tx_cnt++;
1292 sframe = &sc->dc_ldata->dc_tx_list[i];
1293 sp = sc->dc_cdata.dc_sbuf;
1294 bzero(sp, DC_SFRAME_LEN);
1295
1296 sframe->dc_data = htole32(sc->dc_saddr);
1297 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1298 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1299
1300 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
1301
1302 /* If we want promiscuous mode, set the allframes bit. */
1303 if (ifp->if_flags & IFF_PROMISC)
1304 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1305 else
1306 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1307
1308 if (ifp->if_flags & IFF_ALLMULTI)
1309 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1310 else
1311 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1312
1313 IF_ADDR_LOCK(ifp);
1314 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1315 if (ifma->ifma_addr->sa_family != AF_LINK)
1316 continue;
1317 h = dc_mchash_le(sc,
1318 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1319 sp[h >> 4] |= htole32(1 << (h & 0xF));
1320 }
1321 IF_ADDR_UNLOCK(ifp);
1322
1323 if (ifp->if_flags & IFF_BROADCAST) {
1324 h = dc_mchash_le(sc, ifp->if_broadcastaddr);
1325 sp[h >> 4] |= htole32(1 << (h & 0xF));
1326 }
1327
1328 /* Set our MAC address */
1329 sp[0] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1330 sp[1] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1331 sp[2] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1332
1333 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1334 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1335 ifp->if_flags |= IFF_RUNNING;
1336 sframe->dc_status = htole32(DC_TXSTAT_OWN);
1337 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1338
1339 /*
1340 * Wait some time...
1341 */
1342 DELAY(1000);
1343
1344 ifp->if_timer = 5;
1345 }
1346
1347 static void
1348 dc_setfilt(struct dc_softc *sc)
1349 {
1350
1351 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1352 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1353 dc_setfilt_21143(sc);
1354
1355 if (DC_IS_ASIX(sc))
1356 dc_setfilt_asix(sc);
1357
1358 if (DC_IS_ADMTEK(sc))
1359 dc_setfilt_admtek(sc);
1360
1361 if (DC_IS_XIRCOM(sc))
1362 dc_setfilt_xircom(sc);
1363 }
1364
1365 /*
1366 * In order to fiddle with the 'full-duplex' and '100Mbps' bits in
1367 * the netconfig register, we first have to put the transmit and/or
1368 * receive logic in the idle state.
1369 */
1370 static void
1371 dc_setcfg(struct dc_softc *sc, int media)
1372 {
1373 int i, restart = 0, watchdogreg;
1374 u_int32_t isr;
1375
1376 if (IFM_SUBTYPE(media) == IFM_NONE)
1377 return;
1378
1379 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) {
1380 restart = 1;
1381 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
1382
1383 for (i = 0; i < DC_TIMEOUT; i++) {
1384 isr = CSR_READ_4(sc, DC_ISR);
1385 if (isr & DC_ISR_TX_IDLE &&
1386 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1387 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
1388 break;
1389 DELAY(10);
1390 }
1391
1392 if (i == DC_TIMEOUT)
1393 printf("dc%d: failed to force tx and "
1394 "rx to idle state\n", sc->dc_unit);
1395 }
1396
1397 if (IFM_SUBTYPE(media) == IFM_100_TX) {
1398 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1399 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1400 if (sc->dc_pmode == DC_PMODE_MII) {
1401 if (DC_IS_INTEL(sc)) {
1402 /* There's a write enable bit here that reads as 1. */
1403 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1404 watchdogreg &= ~DC_WDOG_CTLWREN;
1405 watchdogreg |= DC_WDOG_JABBERDIS;
1406 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1407 } else {
1408 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1409 }
1410 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1411 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER));
1412 if (sc->dc_type == DC_TYPE_98713)
1413 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1414 DC_NETCFG_SCRAMBLER));
1415 if (!DC_IS_DAVICOM(sc))
1416 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1417 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1418 if (DC_IS_INTEL(sc))
1419 dc_apply_fixup(sc, IFM_AUTO);
1420 } else {
1421 if (DC_IS_PNIC(sc)) {
1422 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1423 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1424 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1425 }
1426 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1427 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1428 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1429 if (DC_IS_INTEL(sc))
1430 dc_apply_fixup(sc,
1431 (media & IFM_GMASK) == IFM_FDX ?
1432 IFM_100_TX | IFM_FDX : IFM_100_TX);
1433 }
1434 }
1435
1436 if (IFM_SUBTYPE(media) == IFM_10_T) {
1437 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1438 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1439 if (sc->dc_pmode == DC_PMODE_MII) {
1440 /* There's a write enable bit here that reads as 1. */
1441 if (DC_IS_INTEL(sc)) {
1442 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1443 watchdogreg &= ~DC_WDOG_CTLWREN;
1444 watchdogreg |= DC_WDOG_JABBERDIS;
1445 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1446 } else {
1447 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1448 }
1449 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1450 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER));
1451 if (sc->dc_type == DC_TYPE_98713)
1452 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1453 if (!DC_IS_DAVICOM(sc))
1454 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1455 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1456 if (DC_IS_INTEL(sc))
1457 dc_apply_fixup(sc, IFM_AUTO);
1458 } else {
1459 if (DC_IS_PNIC(sc)) {
1460 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1461 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1462 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1463 }
1464 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1465 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1466 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1467 if (DC_IS_INTEL(sc)) {
1468 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1469 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1470 if ((media & IFM_GMASK) == IFM_FDX)
1471 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1472 else
1473 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1474 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1475 DC_CLRBIT(sc, DC_10BTCTRL,
1476 DC_TCTL_AUTONEGENBL);
1477 dc_apply_fixup(sc,
1478 (media & IFM_GMASK) == IFM_FDX ?
1479 IFM_10_T | IFM_FDX : IFM_10_T);
1480 DELAY(20000);
1481 }
1482 }
1483 }
1484
1485 /*
1486 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1487 * PHY and we want HomePNA mode, set the portsel bit to turn
1488 * on the external MII port.
1489 */
1490 if (DC_IS_DAVICOM(sc)) {
1491 if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1492 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1493 sc->dc_link = 1;
1494 } else {
1495 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1496 }
1497 }
1498
1499 if ((media & IFM_GMASK) == IFM_FDX) {
1500 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1501 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1502 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1503 } else {
1504 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1505 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1506 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1507 }
1508
1509 if (restart)
1510 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON);
1511 }
1512
1513 static void
1514 dc_reset(struct dc_softc *sc)
1515 {
1516 int i;
1517
1518 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1519
1520 for (i = 0; i < DC_TIMEOUT; i++) {
1521 DELAY(10);
1522 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1523 break;
1524 }
1525
1526 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) ||
1527 DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) {
1528 DELAY(10000);
1529 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1530 i = 0;
1531 }
1532
1533 if (i == DC_TIMEOUT)
1534 printf("dc%d: reset never completed!\n", sc->dc_unit);
1535
1536 /* Wait a little while for the chip to get its brains in order. */
1537 DELAY(1000);
1538
1539 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1540 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1541 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1542
1543 /*
1544 * Bring the SIA out of reset. In some cases, it looks
1545 * like failing to unreset the SIA soon enough gets it
1546 * into a state where it will never come out of reset
1547 * until we reset the whole chip again.
1548 */
1549 if (DC_IS_INTEL(sc)) {
1550 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1551 CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1552 CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1553 }
1554 }
1555
1556 static struct dc_type *
1557 dc_devtype(device_t dev)
1558 {
1559 struct dc_type *t;
1560 u_int32_t rev;
1561
1562 t = dc_devs;
1563
1564 while (t->dc_name != NULL) {
1565 if ((pci_get_vendor(dev) == t->dc_vid) &&
1566 (pci_get_device(dev) == t->dc_did)) {
1567 /* Check the PCI revision */
1568 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF;
1569 if (t->dc_did == DC_DEVICEID_98713 &&
1570 rev >= DC_REVISION_98713A)
1571 t++;
1572 if (t->dc_did == DC_DEVICEID_98713_CP &&
1573 rev >= DC_REVISION_98713A)
1574 t++;
1575 if (t->dc_did == DC_DEVICEID_987x5 &&
1576 rev >= DC_REVISION_98715AEC_C)
1577 t++;
1578 if (t->dc_did == DC_DEVICEID_987x5 &&
1579 rev >= DC_REVISION_98725)
1580 t++;
1581 if (t->dc_did == DC_DEVICEID_AX88140A &&
1582 rev >= DC_REVISION_88141)
1583 t++;
1584 if (t->dc_did == DC_DEVICEID_82C168 &&
1585 rev >= DC_REVISION_82C169)
1586 t++;
1587 if (t->dc_did == DC_DEVICEID_DM9102 &&
1588 rev >= DC_REVISION_DM9102A)
1589 t++;
1590 /*
1591 * The Microsoft MN-130 has a device ID of 0x0002,
1592 * which happens to be the same as the PNIC 82c168.
1593 * To keep dc_attach() from getting confused, we
1594 * pretend its ID is something different.
1595 * XXX: ideally, dc_attach() should be checking
1596 * vendorid+deviceid together to avoid such
1597 * collisions.
1598 */
1599 if (t->dc_vid == DC_VENDORID_MICROSOFT &&
1600 t->dc_did == DC_DEVICEID_MSMN130)
1601 t++;
1602 return (t);
1603 }
1604 t++;
1605 }
1606
1607 return (NULL);
1608 }
1609
1610 /*
1611 * Probe for a 21143 or clone chip. Check the PCI vendor and device
1612 * IDs against our list and return a device name if we find a match.
1613 * We do a little bit of extra work to identify the exact type of
1614 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID,
1615 * but different revision IDs. The same is true for 98715/98715A
1616 * chips and the 98725, as well as the ASIX and ADMtek chips. In some
1617 * cases, the exact chip revision affects driver behavior.
1618 */
1619 static int
1620 dc_probe(device_t dev)
1621 {
1622 struct dc_type *t;
1623
1624 t = dc_devtype(dev);
1625
1626 if (t != NULL) {
1627 device_set_desc(dev, t->dc_name);
1628 return (BUS_PROBE_DEFAULT);
1629 }
1630
1631 return (ENXIO);
1632 }
1633
1634 static void
1635 dc_apply_fixup(struct dc_softc *sc, int media)
1636 {
1637 struct dc_mediainfo *m;
1638 u_int8_t *p;
1639 int i;
1640 u_int32_t reg;
1641
1642 m = sc->dc_mi;
1643
1644 while (m != NULL) {
1645 if (m->dc_media == media)
1646 break;
1647 m = m->dc_next;
1648 }
1649
1650 if (m == NULL)
1651 return;
1652
1653 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1654 reg = (p[0] | (p[1] << 8)) << 16;
1655 CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1656 }
1657
1658 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1659 reg = (p[0] | (p[1] << 8)) << 16;
1660 CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1661 }
1662 }
1663
1664 static void
1665 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
1666 {
1667 struct dc_mediainfo *m;
1668
1669 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1670 switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
1671 case DC_SIA_CODE_10BT:
1672 m->dc_media = IFM_10_T;
1673 break;
1674 case DC_SIA_CODE_10BT_FDX:
1675 m->dc_media = IFM_10_T | IFM_FDX;
1676 break;
1677 case DC_SIA_CODE_10B2:
1678 m->dc_media = IFM_10_2;
1679 break;
1680 case DC_SIA_CODE_10B5:
1681 m->dc_media = IFM_10_5;
1682 break;
1683 default:
1684 break;
1685 }
1686
1687 /*
1688 * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1689 * Things apparently already work for cards that do
1690 * supply Media Specific Data.
1691 */
1692 if (l->dc_sia_code & DC_SIA_CODE_EXT) {
1693 m->dc_gp_len = 2;
1694 m->dc_gp_ptr =
1695 (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1696 } else {
1697 m->dc_gp_len = 2;
1698 m->dc_gp_ptr =
1699 (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1700 }
1701
1702 m->dc_next = sc->dc_mi;
1703 sc->dc_mi = m;
1704
1705 sc->dc_pmode = DC_PMODE_SIA;
1706 }
1707
1708 static void
1709 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
1710 {
1711 struct dc_mediainfo *m;
1712
1713 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1714 if (l->dc_sym_code == DC_SYM_CODE_100BT)
1715 m->dc_media = IFM_100_TX;
1716
1717 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1718 m->dc_media = IFM_100_TX | IFM_FDX;
1719
1720 m->dc_gp_len = 2;
1721 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1722
1723 m->dc_next = sc->dc_mi;
1724 sc->dc_mi = m;
1725
1726 sc->dc_pmode = DC_PMODE_SYM;
1727 }
1728
1729 static void
1730 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
1731 {
1732 struct dc_mediainfo *m;
1733 u_int8_t *p;
1734
1735 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1736 /* We abuse IFM_AUTO to represent MII. */
1737 m->dc_media = IFM_AUTO;
1738 m->dc_gp_len = l->dc_gpr_len;
1739
1740 p = (u_int8_t *)l;
1741 p += sizeof(struct dc_eblock_mii);
1742 m->dc_gp_ptr = p;
1743 p += 2 * l->dc_gpr_len;
1744 m->dc_reset_len = *p;
1745 p++;
1746 m->dc_reset_ptr = p;
1747
1748 m->dc_next = sc->dc_mi;
1749 sc->dc_mi = m;
1750 }
1751
1752 static void
1753 dc_read_srom(struct dc_softc *sc, int bits)
1754 {
1755 int size;
1756
1757 size = 2 << bits;
1758 sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT);
1759 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0);
1760 }
1761
1762 static void
1763 dc_parse_21143_srom(struct dc_softc *sc)
1764 {
1765 struct dc_leaf_hdr *lhdr;
1766 struct dc_eblock_hdr *hdr;
1767 int have_mii, i, loff;
1768 char *ptr;
1769
1770 have_mii = 0;
1771 loff = sc->dc_srom[27];
1772 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1773
1774 ptr = (char *)lhdr;
1775 ptr += sizeof(struct dc_leaf_hdr) - 1;
1776 /*
1777 * Look if we got a MII media block.
1778 */
1779 for (i = 0; i < lhdr->dc_mcnt; i++) {
1780 hdr = (struct dc_eblock_hdr *)ptr;
1781 if (hdr->dc_type == DC_EBLOCK_MII)
1782 have_mii++;
1783
1784 ptr += (hdr->dc_len & 0x7F);
1785 ptr++;
1786 }
1787
1788 /*
1789 * Do the same thing again. Only use SIA and SYM media
1790 * blocks if no MII media block is available.
1791 */
1792 ptr = (char *)lhdr;
1793 ptr += sizeof(struct dc_leaf_hdr) - 1;
1794 for (i = 0; i < lhdr->dc_mcnt; i++) {
1795 hdr = (struct dc_eblock_hdr *)ptr;
1796 switch (hdr->dc_type) {
1797 case DC_EBLOCK_MII:
1798 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1799 break;
1800 case DC_EBLOCK_SIA:
1801 if (! have_mii)
1802 dc_decode_leaf_sia(sc,
1803 (struct dc_eblock_sia *)hdr);
1804 break;
1805 case DC_EBLOCK_SYM:
1806 if (! have_mii)
1807 dc_decode_leaf_sym(sc,
1808 (struct dc_eblock_sym *)hdr);
1809 break;
1810 default:
1811 /* Don't care. Yet. */
1812 break;
1813 }
1814 ptr += (hdr->dc_len & 0x7F);
1815 ptr++;
1816 }
1817 }
1818
1819 static void
1820 dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1821 {
1822 u_int32_t *paddr;
1823
1824 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
1825 paddr = arg;
1826 *paddr = segs->ds_addr;
1827 }
1828
1829 /*
1830 * Attach the interface. Allocate softc structures, do ifmedia
1831 * setup and ethernet/BPF attach.
1832 */
1833 static int
1834 dc_attach(device_t dev)
1835 {
1836 int tmp = 0;
1837 u_char eaddr[ETHER_ADDR_LEN];
1838 u_int32_t command;
1839 struct dc_softc *sc;
1840 struct ifnet *ifp;
1841 u_int32_t revision;
1842 int unit, error = 0, rid, mac_offset;
1843 int i;
1844 u_int8_t *mac;
1845
1846 sc = device_get_softc(dev);
1847 unit = device_get_unit(dev);
1848
1849 mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1850 MTX_DEF | MTX_RECURSE);
1851
1852 /*
1853 * Map control/status registers.
1854 */
1855 pci_enable_busmaster(dev);
1856
1857 rid = DC_RID;
1858 sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE);
1859
1860 if (sc->dc_res == NULL) {
1861 printf("dc%d: couldn't map ports/memory\n", unit);
1862 error = ENXIO;
1863 goto fail;
1864 }
1865
1866 sc->dc_btag = rman_get_bustag(sc->dc_res);
1867 sc->dc_bhandle = rman_get_bushandle(sc->dc_res);
1868
1869 /* Allocate interrupt. */
1870 rid = 0;
1871 sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1872 RF_SHAREABLE | RF_ACTIVE);
1873
1874 if (sc->dc_irq == NULL) {
1875 printf("dc%d: couldn't map interrupt\n", unit);
1876 error = ENXIO;
1877 goto fail;
1878 }
1879
1880 /* Need this info to decide on a chip type. */
1881 sc->dc_info = dc_devtype(dev);
1882 revision = pci_read_config(dev, DC_PCI_CFRV, 4) & 0x000000FF;
1883
1884 /* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */
1885 if (sc->dc_info->dc_did != DC_DEVICEID_82C168 &&
1886 sc->dc_info->dc_did != DC_DEVICEID_X3201)
1887 dc_eeprom_width(sc);
1888
1889 switch (sc->dc_info->dc_did) {
1890 case DC_DEVICEID_21143:
1891 sc->dc_type = DC_TYPE_21143;
1892 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1893 sc->dc_flags |= DC_REDUCED_MII_POLL;
1894 /* Save EEPROM contents so we can parse them later. */
1895 dc_read_srom(sc, sc->dc_romwidth);
1896 break;
1897 case DC_DEVICEID_DM9009:
1898 case DC_DEVICEID_DM9100:
1899 case DC_DEVICEID_DM9102:
1900 sc->dc_type = DC_TYPE_DM9102;
1901 sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS;
1902 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD;
1903 sc->dc_flags |= DC_TX_ALIGN;
1904 sc->dc_pmode = DC_PMODE_MII;
1905 /* Increase the latency timer value. */
1906 command = pci_read_config(dev, DC_PCI_CFLT, 4);
1907 command &= 0xFFFF00FF;
1908 command |= 0x00008000;
1909 pci_write_config(dev, DC_PCI_CFLT, command, 4);
1910 break;
1911 case DC_DEVICEID_AL981:
1912 sc->dc_type = DC_TYPE_AL981;
1913 sc->dc_flags |= DC_TX_USE_TX_INTR;
1914 sc->dc_flags |= DC_TX_ADMTEK_WAR;
1915 sc->dc_pmode = DC_PMODE_MII;
1916 dc_read_srom(sc, sc->dc_romwidth);
1917 break;
1918 case DC_DEVICEID_AN985:
1919 case DC_DEVICEID_ADM9511:
1920 case DC_DEVICEID_ADM9513:
1921 case DC_DEVICEID_FA511:
1922 case DC_DEVICEID_FE2500:
1923 case DC_DEVICEID_EN2242:
1924 case DC_DEVICEID_HAWKING_PN672TX:
1925 case DC_DEVICEID_3CSOHOB:
1926 case DC_DEVICEID_MSMN120:
1927 case DC_DEVICEID_MSMN130_FAKE: /* XXX avoid collision with PNIC*/
1928 sc->dc_type = DC_TYPE_AN985;
1929 sc->dc_flags |= DC_64BIT_HASH;
1930 sc->dc_flags |= DC_TX_USE_TX_INTR;
1931 sc->dc_flags |= DC_TX_ADMTEK_WAR;
1932 sc->dc_pmode = DC_PMODE_MII;
1933 /* Don't read SROM for - auto-loaded on reset */
1934 break;
1935 case DC_DEVICEID_98713:
1936 case DC_DEVICEID_98713_CP:
1937 if (revision < DC_REVISION_98713A) {
1938 sc->dc_type = DC_TYPE_98713;
1939 }
1940 if (revision >= DC_REVISION_98713A) {
1941 sc->dc_type = DC_TYPE_98713A;
1942 sc->dc_flags |= DC_21143_NWAY;
1943 }
1944 sc->dc_flags |= DC_REDUCED_MII_POLL;
1945 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1946 break;
1947 case DC_DEVICEID_987x5:
1948 case DC_DEVICEID_EN1217:
1949 /*
1950 * Macronix MX98715AEC-C/D/E parts have only a
1951 * 128-bit hash table. We need to deal with these
1952 * in the same manner as the PNIC II so that we
1953 * get the right number of bits out of the
1954 * CRC routine.
1955 */
1956 if (revision >= DC_REVISION_98715AEC_C &&
1957 revision < DC_REVISION_98725)
1958 sc->dc_flags |= DC_128BIT_HASH;
1959 sc->dc_type = DC_TYPE_987x5;
1960 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1961 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
1962 break;
1963 case DC_DEVICEID_98727:
1964 sc->dc_type = DC_TYPE_987x5;
1965 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1966 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
1967 break;
1968 case DC_DEVICEID_82C115:
1969 sc->dc_type = DC_TYPE_PNICII;
1970 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH;
1971 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
1972 break;
1973 case DC_DEVICEID_82C168:
1974 sc->dc_type = DC_TYPE_PNIC;
1975 sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS;
1976 sc->dc_flags |= DC_PNIC_RX_BUG_WAR;
1977 sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT);
1978 if (revision < DC_REVISION_82C169)
1979 sc->dc_pmode = DC_PMODE_SYM;
1980 break;
1981 case DC_DEVICEID_AX88140A:
1982 sc->dc_type = DC_TYPE_ASIX;
1983 sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG;
1984 sc->dc_flags |= DC_REDUCED_MII_POLL;
1985 sc->dc_pmode = DC_PMODE_MII;
1986 break;
1987 case DC_DEVICEID_X3201:
1988 sc->dc_type = DC_TYPE_XIRCOM;
1989 sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE |
1990 DC_TX_ALIGN;
1991 /*
1992 * We don't actually need to coalesce, but we're doing
1993 * it to obtain a double word aligned buffer.
1994 * The DC_TX_COALESCE flag is required.
1995 */
1996 sc->dc_pmode = DC_PMODE_MII;
1997 break;
1998 case DC_DEVICEID_RS7112:
1999 sc->dc_type = DC_TYPE_CONEXANT;
2000 sc->dc_flags |= DC_TX_INTR_ALWAYS;
2001 sc->dc_flags |= DC_REDUCED_MII_POLL;
2002 sc->dc_pmode = DC_PMODE_MII;
2003 dc_read_srom(sc, sc->dc_romwidth);
2004 break;
2005 default:
2006 printf("dc%d: unknown device: %x\n", sc->dc_unit,
2007 sc->dc_info->dc_did);
2008 break;
2009 }
2010
2011 /* Save the cache line size. */
2012 if (DC_IS_DAVICOM(sc))
2013 sc->dc_cachesize = 0;
2014 else
2015 sc->dc_cachesize = pci_read_config(dev,
2016 DC_PCI_CFLT, 4) & 0xFF;
2017
2018 /* Reset the adapter. */
2019 dc_reset(sc);
2020
2021 /* Take 21143 out of snooze mode */
2022 if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) {
2023 command = pci_read_config(dev, DC_PCI_CFDD, 4);
2024 command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE);
2025 pci_write_config(dev, DC_PCI_CFDD, command, 4);
2026 }
2027
2028 /*
2029 * Try to learn something about the supported media.
2030 * We know that ASIX and ADMtek and Davicom devices
2031 * will *always* be using MII media, so that's a no-brainer.
2032 * The tricky ones are the Macronix/PNIC II and the
2033 * Intel 21143.
2034 */
2035 if (DC_IS_INTEL(sc))
2036 dc_parse_21143_srom(sc);
2037 else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2038 if (sc->dc_type == DC_TYPE_98713)
2039 sc->dc_pmode = DC_PMODE_MII;
2040 else
2041 sc->dc_pmode = DC_PMODE_SYM;
2042 } else if (!sc->dc_pmode)
2043 sc->dc_pmode = DC_PMODE_MII;
2044
2045 /*
2046 * Get station address from the EEPROM.
2047 */
2048 switch(sc->dc_type) {
2049 case DC_TYPE_98713:
2050 case DC_TYPE_98713A:
2051 case DC_TYPE_987x5:
2052 case DC_TYPE_PNICII:
2053 dc_read_eeprom(sc, (caddr_t)&mac_offset,
2054 (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
2055 dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0);
2056 break;
2057 case DC_TYPE_PNIC:
2058 dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1);
2059 break;
2060 case DC_TYPE_DM9102:
2061 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2062 #ifdef __sparc64__
2063 /*
2064 * If this is an onboard dc(4) the station address read from
2065 * the EEPROM is all zero and we have to get it from the fcode.
2066 */
2067 for (i = 0; i < ETHER_ADDR_LEN; i++)
2068 if (eaddr[i] != 0x00)
2069 break;
2070 if (i >= ETHER_ADDR_LEN)
2071 OF_getetheraddr(dev, eaddr);
2072 #endif
2073 break;
2074 case DC_TYPE_21143:
2075 case DC_TYPE_ASIX:
2076 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2077 break;
2078 case DC_TYPE_AL981:
2079 case DC_TYPE_AN985:
2080 *(u_int32_t *)(&eaddr[0]) = CSR_READ_4(sc, DC_AL_PAR0);
2081 *(u_int16_t *)(&eaddr[4]) = CSR_READ_4(sc, DC_AL_PAR1);
2082 break;
2083 case DC_TYPE_CONEXANT:
2084 bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr,
2085 ETHER_ADDR_LEN);
2086 break;
2087 case DC_TYPE_XIRCOM:
2088 /* The MAC comes from the CIS. */
2089 mac = pci_get_ether(dev);
2090 if (!mac) {
2091 device_printf(dev, "No station address in CIS!\n");
2092 error = ENXIO;
2093 goto fail;
2094 }
2095 bcopy(mac, eaddr, ETHER_ADDR_LEN);
2096 break;
2097 default:
2098 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2099 break;
2100 }
2101
2102 sc->dc_unit = unit;
2103 bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
2104
2105 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
2106 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT,
2107 BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct dc_list_data), 1,
2108 sizeof(struct dc_list_data), 0, NULL, NULL, &sc->dc_ltag);
2109 if (error) {
2110 printf("dc%d: failed to allocate busdma tag\n", unit);
2111 error = ENXIO;
2112 goto fail;
2113 }
2114 error = bus_dmamem_alloc(sc->dc_ltag, (void **)&sc->dc_ldata,
2115 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->dc_lmap);
2116 if (error) {
2117 printf("dc%d: failed to allocate DMA safe memory\n", unit);
2118 error = ENXIO;
2119 goto fail;
2120 }
2121 error = bus_dmamap_load(sc->dc_ltag, sc->dc_lmap, sc->dc_ldata,
2122 sizeof(struct dc_list_data), dc_dma_map_addr, &sc->dc_laddr,
2123 BUS_DMA_NOWAIT);
2124 if (error) {
2125 printf("dc%d: cannot get address of the descriptors\n", unit);
2126 error = ENXIO;
2127 goto fail;
2128 }
2129
2130 /*
2131 * Allocate a busdma tag and DMA safe memory for the multicast
2132 * setup frame.
2133 */
2134 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT,
2135 BUS_SPACE_MAXADDR, NULL, NULL, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1,
2136 DC_SFRAME_LEN + DC_MIN_FRAMELEN, 0, NULL, NULL, &sc->dc_stag);
2137 if (error) {
2138 printf("dc%d: failed to allocate busdma tag\n", unit);
2139 error = ENXIO;
2140 goto fail;
2141 }
2142 error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf,
2143 BUS_DMA_NOWAIT, &sc->dc_smap);
2144 if (error) {
2145 printf("dc%d: failed to allocate DMA safe memory\n", unit);
2146 error = ENXIO;
2147 goto fail;
2148 }
2149 error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf,
2150 DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT);
2151 if (error) {
2152 printf("dc%d: cannot get address of the descriptors\n", unit);
2153 error = ENXIO;
2154 goto fail;
2155 }
2156
2157 /* Allocate a busdma tag for mbufs. */
2158 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2159 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, DC_TX_LIST_CNT, MCLBYTES,
2160 0, NULL, NULL, &sc->dc_mtag);
2161 if (error) {
2162 printf("dc%d: failed to allocate busdma tag\n", unit);
2163 error = ENXIO;
2164 goto fail;
2165 }
2166
2167 /* Create the TX/RX busdma maps. */
2168 for (i = 0; i < DC_TX_LIST_CNT; i++) {
2169 error = bus_dmamap_create(sc->dc_mtag, 0,
2170 &sc->dc_cdata.dc_tx_map[i]);
2171 if (error) {
2172 printf("dc%d: failed to init TX ring\n", unit);
2173 error = ENXIO;
2174 goto fail;
2175 }
2176 }
2177 for (i = 0; i < DC_RX_LIST_CNT; i++) {
2178 error = bus_dmamap_create(sc->dc_mtag, 0,
2179 &sc->dc_cdata.dc_rx_map[i]);
2180 if (error) {
2181 printf("dc%d: failed to init RX ring\n", unit);
2182 error = ENXIO;
2183 goto fail;
2184 }
2185 }
2186 error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_sparemap);
2187 if (error) {
2188 printf("dc%d: failed to init RX ring\n", unit);
2189 error = ENXIO;
2190 goto fail;
2191 }
2192
2193 ifp = &sc->arpcom.ac_if;
2194 ifp->if_softc = sc;
2195 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2196 /* XXX: bleah, MTU gets overwritten in ether_ifattach() */
2197 ifp->if_mtu = ETHERMTU;
2198 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2199 if (!IS_MPSAFE)
2200 ifp->if_flags |= IFF_NEEDSGIANT;
2201 ifp->if_ioctl = dc_ioctl;
2202 ifp->if_start = dc_start;
2203 ifp->if_watchdog = dc_watchdog;
2204 ifp->if_init = dc_init;
2205 ifp->if_baudrate = 10000000;
2206 IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1);
2207 ifp->if_snd.ifq_drv_maxlen = DC_TX_LIST_CNT - 1;
2208 IFQ_SET_READY(&ifp->if_snd);
2209
2210 /*
2211 * Do MII setup. If this is a 21143, check for a PHY on the
2212 * MII bus after applying any necessary fixups to twiddle the
2213 * GPIO bits. If we don't end up finding a PHY, restore the
2214 * old selection (SIA only or SIA/SYM) and attach the dcphy
2215 * driver instead.
2216 */
2217 if (DC_IS_INTEL(sc)) {
2218 dc_apply_fixup(sc, IFM_AUTO);
2219 tmp = sc->dc_pmode;
2220 sc->dc_pmode = DC_PMODE_MII;
2221 }
2222
2223 /*
2224 * Setup General Purpose port mode and data so the tulip can talk
2225 * to the MII. This needs to be done before mii_phy_probe so that
2226 * we can actually see them.
2227 */
2228 if (DC_IS_XIRCOM(sc)) {
2229 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2230 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2231 DELAY(10);
2232 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2233 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2234 DELAY(10);
2235 }
2236
2237 error = mii_phy_probe(dev, &sc->dc_miibus,
2238 dc_ifmedia_upd, dc_ifmedia_sts);
2239
2240 if (error && DC_IS_INTEL(sc)) {
2241 sc->dc_pmode = tmp;
2242 if (sc->dc_pmode != DC_PMODE_SIA)
2243 sc->dc_pmode = DC_PMODE_SYM;
2244 sc->dc_flags |= DC_21143_NWAY;
2245 mii_phy_probe(dev, &sc->dc_miibus,
2246 dc_ifmedia_upd, dc_ifmedia_sts);
2247 /*
2248 * For non-MII cards, we need to have the 21143
2249 * drive the LEDs. Except there are some systems
2250 * like the NEC VersaPro NoteBook PC which have no
2251 * LEDs, and twiddling these bits has adverse effects
2252 * on them. (I.e. you suddenly can't get a link.)
2253 */
2254 if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033)
2255 sc->dc_flags |= DC_TULIP_LEDS;
2256 error = 0;
2257 }
2258
2259 if (error) {
2260 printf("dc%d: MII without any PHY!\n", sc->dc_unit);
2261 goto fail;
2262 }
2263
2264 if (DC_IS_ADMTEK(sc)) {
2265 /*
2266 * Set automatic TX underrun recovery for the ADMtek chips
2267 */
2268 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
2269 }
2270
2271 /*
2272 * Tell the upper layer(s) we support long frames.
2273 */
2274 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2275 ifp->if_capabilities |= IFCAP_VLAN_MTU;
2276 #ifdef DEVICE_POLLING
2277 ifp->if_capabilities |= IFCAP_POLLING;
2278 #endif
2279 ifp->if_capenable = ifp->if_capabilities;
2280
2281 callout_init(&sc->dc_stat_ch, IS_MPSAFE ? CALLOUT_MPSAFE : 0);
2282
2283 #ifdef SRM_MEDIA
2284 sc->dc_srm_media = 0;
2285
2286 /* Remember the SRM console media setting */
2287 if (DC_IS_INTEL(sc)) {
2288 command = pci_read_config(dev, DC_PCI_CFDD, 4);
2289 command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE);
2290 switch ((command >> 8) & 0xff) {
2291 case 3:
2292 sc->dc_srm_media = IFM_10_T;
2293 break;
2294 case 4:
2295 sc->dc_srm_media = IFM_10_T | IFM_FDX;
2296 break;
2297 case 5:
2298 sc->dc_srm_media = IFM_100_TX;
2299 break;
2300 case 6:
2301 sc->dc_srm_media = IFM_100_TX | IFM_FDX;
2302 break;
2303 }
2304 if (sc->dc_srm_media)
2305 sc->dc_srm_media |= IFM_ACTIVE | IFM_ETHER;
2306 }
2307 #endif
2308
2309 /*
2310 * Call MI attach routine.
2311 */
2312 ether_ifattach(ifp, eaddr);
2313
2314 /* Hook interrupt last to avoid having to lock softc */
2315 error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET |
2316 (IS_MPSAFE ? INTR_MPSAFE : 0),
2317 dc_intr, sc, &sc->dc_intrhand);
2318
2319 if (error) {
2320 printf("dc%d: couldn't set up irq\n", unit);
2321 ether_ifdetach(ifp);
2322 goto fail;
2323 }
2324
2325 fail:
2326 if (error)
2327 dc_detach(dev);
2328 return (error);
2329 }
2330
2331 /*
2332 * Shutdown hardware and free up resources. This can be called any
2333 * time after the mutex has been initialized. It is called in both
2334 * the error case in attach and the normal detach case so it needs
2335 * to be careful about only freeing resources that have actually been
2336 * allocated.
2337 */
2338 static int
2339 dc_detach(device_t dev)
2340 {
2341 struct dc_softc *sc;
2342 struct ifnet *ifp;
2343 struct dc_mediainfo *m;
2344 int i;
2345
2346 sc = device_get_softc(dev);
2347 KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized"));
2348 DC_LOCK(sc);
2349
2350 ifp = &sc->arpcom.ac_if;
2351
2352 /* These should only be active if attach succeeded */
2353 if (device_is_attached(dev)) {
2354 dc_stop(sc);
2355 ether_ifdetach(ifp);
2356 }
2357 if (sc->dc_miibus)
2358 device_delete_child(dev, sc->dc_miibus);
2359 bus_generic_detach(dev);
2360
2361 if (sc->dc_intrhand)
2362 bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand);
2363 if (sc->dc_irq)
2364 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq);
2365 if (sc->dc_res)
2366 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res);
2367
2368 if (sc->dc_cdata.dc_sbuf != NULL)
2369 bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf, sc->dc_smap);
2370 if (sc->dc_ldata != NULL)
2371 bus_dmamem_free(sc->dc_ltag, sc->dc_ldata, sc->dc_lmap);
2372 for (i = 0; i < DC_TX_LIST_CNT; i++)
2373 bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_tx_map[i]);
2374 for (i = 0; i < DC_RX_LIST_CNT; i++)
2375 bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]);
2376 bus_dmamap_destroy(sc->dc_mtag, sc->dc_sparemap);
2377 if (sc->dc_stag)
2378 bus_dma_tag_destroy(sc->dc_stag);
2379 if (sc->dc_mtag)
2380 bus_dma_tag_destroy(sc->dc_mtag);
2381 if (sc->dc_ltag)
2382 bus_dma_tag_destroy(sc->dc_ltag);
2383
2384 free(sc->dc_pnic_rx_buf, M_DEVBUF);
2385
2386 while (sc->dc_mi != NULL) {
2387 m = sc->dc_mi->dc_next;
2388 free(sc->dc_mi, M_DEVBUF);
2389 sc->dc_mi = m;
2390 }
2391 free(sc->dc_srom, M_DEVBUF);
2392
2393 DC_UNLOCK(sc);
2394 mtx_destroy(&sc->dc_mtx);
2395
2396 return (0);
2397 }
2398
2399 /*
2400 * Initialize the transmit descriptors.
2401 */
2402 static int
2403 dc_list_tx_init(struct dc_softc *sc)
2404 {
2405 struct dc_chain_data *cd;
2406 struct dc_list_data *ld;
2407 int i, nexti;
2408
2409 cd = &sc->dc_cdata;
2410 ld = sc->dc_ldata;
2411 for (i = 0; i < DC_TX_LIST_CNT; i++) {
2412 if (i == DC_TX_LIST_CNT - 1)
2413 nexti = 0;
2414 else
2415 nexti = i + 1;
2416 ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti));
2417 cd->dc_tx_chain[i] = NULL;
2418 ld->dc_tx_list[i].dc_data = 0;
2419 ld->dc_tx_list[i].dc_ctl = 0;
2420 }
2421
2422 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
2423 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
2424 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2425 return (0);
2426 }
2427
2428
2429 /*
2430 * Initialize the RX descriptors and allocate mbufs for them. Note that
2431 * we arrange the descriptors in a closed ring, so that the last descriptor
2432 * points back to the first.
2433 */
2434 static int
2435 dc_list_rx_init(struct dc_softc *sc)
2436 {
2437 struct dc_chain_data *cd;
2438 struct dc_list_data *ld;
2439 int i, nexti;
2440
2441 cd = &sc->dc_cdata;
2442 ld = sc->dc_ldata;
2443
2444 for (i = 0; i < DC_RX_LIST_CNT; i++) {
2445 if (dc_newbuf(sc, i, 1) != 0)
2446 return (ENOBUFS);
2447 if (i == DC_RX_LIST_CNT - 1)
2448 nexti = 0;
2449 else
2450 nexti = i + 1;
2451 ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti));
2452 }
2453
2454 cd->dc_rx_prod = 0;
2455 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
2456 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2457 return (0);
2458 }
2459
2460 static void
2461 dc_dma_map_rxbuf(arg, segs, nseg, mapsize, error)
2462 void *arg;
2463 bus_dma_segment_t *segs;
2464 int nseg;
2465 bus_size_t mapsize;
2466 int error;
2467 {
2468 struct dc_softc *sc;
2469 struct dc_desc *c;
2470
2471 sc = arg;
2472 c = &sc->dc_ldata->dc_rx_list[sc->dc_cdata.dc_rx_cur];
2473 if (error) {
2474 sc->dc_cdata.dc_rx_err = error;
2475 return;
2476 }
2477
2478 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
2479 sc->dc_cdata.dc_rx_err = 0;
2480 c->dc_data = htole32(segs->ds_addr);
2481 }
2482
2483 /*
2484 * Initialize an RX descriptor and attach an MBUF cluster.
2485 */
2486 static int
2487 dc_newbuf(struct dc_softc *sc, int i, int alloc)
2488 {
2489 struct mbuf *m_new;
2490 bus_dmamap_t tmp;
2491 int error;
2492
2493 if (alloc) {
2494 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2495 if (m_new == NULL)
2496 return (ENOBUFS);
2497 } else {
2498 m_new = sc->dc_cdata.dc_rx_chain[i];
2499 m_new->m_data = m_new->m_ext.ext_buf;
2500 }
2501 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
2502 m_adj(m_new, sizeof(u_int64_t));
2503
2504 /*
2505 * If this is a PNIC chip, zero the buffer. This is part
2506 * of the workaround for the receive bug in the 82c168 and
2507 * 82c169 chips.
2508 */
2509 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
2510 bzero(mtod(m_new, char *), m_new->m_len);
2511
2512 /* No need to remap the mbuf if we're reusing it. */
2513 if (alloc) {
2514 sc->dc_cdata.dc_rx_cur = i;
2515 error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_sparemap,
2516 m_new, dc_dma_map_rxbuf, sc, 0);
2517 if (error) {
2518 m_freem(m_new);
2519 return (error);
2520 }
2521 if (sc->dc_cdata.dc_rx_err != 0) {
2522 m_freem(m_new);
2523 return (sc->dc_cdata.dc_rx_err);
2524 }
2525 bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]);
2526 tmp = sc->dc_cdata.dc_rx_map[i];
2527 sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap;
2528 sc->dc_sparemap = tmp;
2529 sc->dc_cdata.dc_rx_chain[i] = m_new;
2530 }
2531
2532 sc->dc_ldata->dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN);
2533 sc->dc_ldata->dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN);
2534 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i],
2535 BUS_DMASYNC_PREREAD);
2536 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
2537 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2538 return (0);
2539 }
2540
2541 /*
2542 * Grrrrr.
2543 * The PNIC chip has a terrible bug in it that manifests itself during
2544 * periods of heavy activity. The exact mode of failure if difficult to
2545 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
2546 * will happen on slow machines. The bug is that sometimes instead of
2547 * uploading one complete frame during reception, it uploads what looks
2548 * like the entire contents of its FIFO memory. The frame we want is at
2549 * the end of the whole mess, but we never know exactly how much data has
2550 * been uploaded, so salvaging the frame is hard.
2551 *
2552 * There is only one way to do it reliably, and it's disgusting.
2553 * Here's what we know:
2554 *
2555 * - We know there will always be somewhere between one and three extra
2556 * descriptors uploaded.
2557 *
2558 * - We know the desired received frame will always be at the end of the
2559 * total data upload.
2560 *
2561 * - We know the size of the desired received frame because it will be
2562 * provided in the length field of the status word in the last descriptor.
2563 *
2564 * Here's what we do:
2565 *
2566 * - When we allocate buffers for the receive ring, we bzero() them.
2567 * This means that we know that the buffer contents should be all
2568 * zeros, except for data uploaded by the chip.
2569 *
2570 * - We also force the PNIC chip to upload frames that include the
2571 * ethernet CRC at the end.
2572 *
2573 * - We gather all of the bogus frame data into a single buffer.
2574 *
2575 * - We then position a pointer at the end of this buffer and scan
2576 * backwards until we encounter the first non-zero byte of data.
2577 * This is the end of the received frame. We know we will encounter
2578 * some data at the end of the frame because the CRC will always be
2579 * there, so even if the sender transmits a packet of all zeros,
2580 * we won't be fooled.
2581 *
2582 * - We know the size of the actual received frame, so we subtract
2583 * that value from the current pointer location. This brings us
2584 * to the start of the actual received packet.
2585 *
2586 * - We copy this into an mbuf and pass it on, along with the actual
2587 * frame length.
2588 *
2589 * The performance hit is tremendous, but it beats dropping frames all
2590 * the time.
2591 */
2592
2593 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG)
2594 static void
2595 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
2596 {
2597 struct dc_desc *cur_rx;
2598 struct dc_desc *c = NULL;
2599 struct mbuf *m = NULL;
2600 unsigned char *ptr;
2601 int i, total_len;
2602 u_int32_t rxstat = 0;
2603
2604 i = sc->dc_pnic_rx_bug_save;
2605 cur_rx = &sc->dc_ldata->dc_rx_list[idx];
2606 ptr = sc->dc_pnic_rx_buf;
2607 bzero(ptr, DC_RXLEN * 5);
2608
2609 /* Copy all the bytes from the bogus buffers. */
2610 while (1) {
2611 c = &sc->dc_ldata->dc_rx_list[i];
2612 rxstat = le32toh(c->dc_status);
2613 m = sc->dc_cdata.dc_rx_chain[i];
2614 bcopy(mtod(m, char *), ptr, DC_RXLEN);
2615 ptr += DC_RXLEN;
2616 /* If this is the last buffer, break out. */
2617 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
2618 break;
2619 dc_newbuf(sc, i, 0);
2620 DC_INC(i, DC_RX_LIST_CNT);
2621 }
2622
2623 /* Find the length of the actual receive frame. */
2624 total_len = DC_RXBYTES(rxstat);
2625
2626 /* Scan backwards until we hit a non-zero byte. */
2627 while (*ptr == 0x00)
2628 ptr--;
2629
2630 /* Round off. */
2631 if ((uintptr_t)(ptr) & 0x3)
2632 ptr -= 1;
2633
2634 /* Now find the start of the frame. */
2635 ptr -= total_len;
2636 if (ptr < sc->dc_pnic_rx_buf)
2637 ptr = sc->dc_pnic_rx_buf;
2638
2639 /*
2640 * Now copy the salvaged frame to the last mbuf and fake up
2641 * the status word to make it look like a successful
2642 * frame reception.
2643 */
2644 dc_newbuf(sc, i, 0);
2645 bcopy(ptr, mtod(m, char *), total_len);
2646 cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2647 }
2648
2649 /*
2650 * This routine searches the RX ring for dirty descriptors in the
2651 * event that the rxeof routine falls out of sync with the chip's
2652 * current descriptor pointer. This may happen sometimes as a result
2653 * of a "no RX buffer available" condition that happens when the chip
2654 * consumes all of the RX buffers before the driver has a chance to
2655 * process the RX ring. This routine may need to be called more than
2656 * once to bring the driver back in sync with the chip, however we
2657 * should still be getting RX DONE interrupts to drive the search
2658 * for new packets in the RX ring, so we should catch up eventually.
2659 */
2660 static int
2661 dc_rx_resync(struct dc_softc *sc)
2662 {
2663 struct dc_desc *cur_rx;
2664 int i, pos;
2665
2666 pos = sc->dc_cdata.dc_rx_prod;
2667
2668 for (i = 0; i < DC_RX_LIST_CNT; i++) {
2669 cur_rx = &sc->dc_ldata->dc_rx_list[pos];
2670 if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN))
2671 break;
2672 DC_INC(pos, DC_RX_LIST_CNT);
2673 }
2674
2675 /* If the ring really is empty, then just return. */
2676 if (i == DC_RX_LIST_CNT)
2677 return (0);
2678
2679 /* We've fallen behing the chip: catch it. */
2680 sc->dc_cdata.dc_rx_prod = pos;
2681
2682 return (EAGAIN);
2683 }
2684
2685 /*
2686 * A frame has been uploaded: pass the resulting mbuf chain up to
2687 * the higher level protocols.
2688 */
2689 static void
2690 dc_rxeof(struct dc_softc *sc)
2691 {
2692 struct mbuf *m;
2693 struct ifnet *ifp;
2694 struct dc_desc *cur_rx;
2695 int i, total_len = 0;
2696 u_int32_t rxstat;
2697
2698 DC_LOCK_ASSERT(sc);
2699
2700 ifp = &sc->arpcom.ac_if;
2701 i = sc->dc_cdata.dc_rx_prod;
2702
2703 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD);
2704 while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) &
2705 DC_RXSTAT_OWN)) {
2706 #ifdef DEVICE_POLLING
2707 if (ifp->if_flags & IFF_POLLING) {
2708 if (sc->rxcycles <= 0)
2709 break;
2710 sc->rxcycles--;
2711 }
2712 #endif
2713 cur_rx = &sc->dc_ldata->dc_rx_list[i];
2714 rxstat = le32toh(cur_rx->dc_status);
2715 m = sc->dc_cdata.dc_rx_chain[i];
2716 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i],
2717 BUS_DMASYNC_POSTREAD);
2718 total_len = DC_RXBYTES(rxstat);
2719
2720 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2721 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2722 if (rxstat & DC_RXSTAT_FIRSTFRAG)
2723 sc->dc_pnic_rx_bug_save = i;
2724 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2725 DC_INC(i, DC_RX_LIST_CNT);
2726 continue;
2727 }
2728 dc_pnic_rx_bug_war(sc, i);
2729 rxstat = le32toh(cur_rx->dc_status);
2730 total_len = DC_RXBYTES(rxstat);
2731 }
2732 }
2733
2734 /*
2735 * If an error occurs, update stats, clear the
2736 * status word and leave the mbuf cluster in place:
2737 * it should simply get re-used next time this descriptor
2738 * comes up in the ring. However, don't report long
2739 * frames as errors since they could be vlans.
2740 */
2741 if ((rxstat & DC_RXSTAT_RXERR)) {
2742 if (!(rxstat & DC_RXSTAT_GIANT) ||
2743 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
2744 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
2745 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) {
2746 ifp->if_ierrors++;
2747 if (rxstat & DC_RXSTAT_COLLSEEN)
2748 ifp->if_collisions++;
2749 dc_newbuf(sc, i, 0);
2750 if (rxstat & DC_RXSTAT_CRCERR) {
2751 DC_INC(i, DC_RX_LIST_CNT);
2752 continue;
2753 } else {
2754 dc_init(sc);
2755 return;
2756 }
2757 }
2758 }
2759
2760 /* No errors; receive the packet. */
2761 total_len -= ETHER_CRC_LEN;
2762 #ifdef __i386__
2763 /*
2764 * On the x86 we do not have alignment problems, so try to
2765 * allocate a new buffer for the receive ring, and pass up
2766 * the one where the packet is already, saving the expensive
2767 * copy done in m_devget().
2768 * If we are on an architecture with alignment problems, or
2769 * if the allocation fails, then use m_devget and leave the
2770 * existing buffer in the receive ring.
2771 */
2772 if (dc_quick && dc_newbuf(sc, i, 1) == 0) {
2773 m->m_pkthdr.rcvif = ifp;
2774 m->m_pkthdr.len = m->m_len = total_len;
2775 DC_INC(i, DC_RX_LIST_CNT);
2776 } else
2777 #endif
2778 {
2779 struct mbuf *m0;
2780
2781 m0 = m_devget(mtod(m, char *), total_len,
2782 ETHER_ALIGN, ifp, NULL);
2783 dc_newbuf(sc, i, 0);
2784 DC_INC(i, DC_RX_LIST_CNT);
2785 if (m0 == NULL) {
2786 ifp->if_ierrors++;
2787 continue;
2788 }
2789 m = m0;
2790 }
2791
2792 ifp->if_ipackets++;
2793 DC_UNLOCK(sc);
2794 (*ifp->if_input)(ifp, m);
2795 DC_LOCK(sc);
2796 }
2797
2798 sc->dc_cdata.dc_rx_prod = i;
2799 }
2800
2801 /*
2802 * A frame was downloaded to the chip. It's safe for us to clean up
2803 * the list buffers.
2804 */
2805
2806 static void
2807 dc_txeof(struct dc_softc *sc)
2808 {
2809 struct dc_desc *cur_tx = NULL;
2810 struct ifnet *ifp;
2811 int idx;
2812 u_int32_t ctl, txstat;
2813
2814 ifp = &sc->arpcom.ac_if;
2815
2816 /*
2817 * Go through our tx list and free mbufs for those
2818 * frames that have been transmitted.
2819 */
2820 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD);
2821 idx = sc->dc_cdata.dc_tx_cons;
2822 while (idx != sc->dc_cdata.dc_tx_prod) {
2823
2824 cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2825 txstat = le32toh(cur_tx->dc_status);
2826 ctl = le32toh(cur_tx->dc_ctl);
2827
2828 if (txstat & DC_TXSTAT_OWN)
2829 break;
2830
2831 if (!(ctl & DC_TXCTL_LASTFRAG) || ctl & DC_TXCTL_SETUP) {
2832 if (ctl & DC_TXCTL_SETUP) {
2833 /*
2834 * Yes, the PNIC is so brain damaged
2835 * that it will sometimes generate a TX
2836 * underrun error while DMAing the RX
2837 * filter setup frame. If we detect this,
2838 * we have to send the setup frame again,
2839 * or else the filter won't be programmed
2840 * correctly.
2841 */
2842 if (DC_IS_PNIC(sc)) {
2843 if (txstat & DC_TXSTAT_ERRSUM)
2844 dc_setfilt(sc);
2845 }
2846 sc->dc_cdata.dc_tx_chain[idx] = NULL;
2847 }
2848 sc->dc_cdata.dc_tx_cnt--;
2849 DC_INC(idx, DC_TX_LIST_CNT);
2850 continue;
2851 }
2852
2853 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2854 /*
2855 * XXX: Why does my Xircom taunt me so?
2856 * For some reason it likes setting the CARRLOST flag
2857 * even when the carrier is there. wtf?!?
2858 * Who knows, but Conexant chips have the
2859 * same problem. Maybe they took lessons
2860 * from Xircom.
2861 */
2862 if (/*sc->dc_type == DC_TYPE_21143 &&*/
2863 sc->dc_pmode == DC_PMODE_MII &&
2864 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM |
2865 DC_TXSTAT_NOCARRIER)))
2866 txstat &= ~DC_TXSTAT_ERRSUM;
2867 } else {
2868 if (/*sc->dc_type == DC_TYPE_21143 &&*/
2869 sc->dc_pmode == DC_PMODE_MII &&
2870 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM |
2871 DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST)))
2872 txstat &= ~DC_TXSTAT_ERRSUM;
2873 }
2874
2875 if (txstat & DC_TXSTAT_ERRSUM) {
2876 ifp->if_oerrors++;
2877 if (txstat & DC_TXSTAT_EXCESSCOLL)
2878 ifp->if_collisions++;
2879 if (txstat & DC_TXSTAT_LATECOLL)
2880 ifp->if_collisions++;
2881 if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2882 dc_init(sc);
2883 return;
2884 }
2885 }
2886
2887 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2888
2889 ifp->if_opackets++;
2890 if (sc->dc_cdata.dc_tx_chain[idx] != NULL) {
2891 bus_dmamap_sync(sc->dc_mtag,
2892 sc->dc_cdata.dc_tx_map[idx],
2893 BUS_DMASYNC_POSTWRITE);
2894 bus_dmamap_unload(sc->dc_mtag,
2895 sc->dc_cdata.dc_tx_map[idx]);
2896 m_freem(sc->dc_cdata.dc_tx_chain[idx]);
2897 sc->dc_cdata.dc_tx_chain[idx] = NULL;
2898 }
2899
2900 sc->dc_cdata.dc_tx_cnt--;
2901 DC_INC(idx, DC_TX_LIST_CNT);
2902 }
2903
2904 if (idx != sc->dc_cdata.dc_tx_cons) {
2905 /* Some buffers have been freed. */
2906 sc->dc_cdata.dc_tx_cons = idx;
2907 ifp->if_flags &= ~IFF_OACTIVE;
2908 }
2909 ifp->if_timer = (sc->dc_cdata.dc_tx_cnt == 0) ? 0 : 5;
2910 }
2911
2912 static void
2913 dc_tick(void *xsc)
2914 {
2915 struct dc_softc *sc;
2916 struct mii_data *mii;
2917 struct ifnet *ifp;
2918 u_int32_t r;
2919
2920 sc = xsc;
2921 DC_LOCK(sc);
2922 ifp = &sc->arpcom.ac_if;
2923 mii = device_get_softc(sc->dc_miibus);
2924
2925 if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2926 if (sc->dc_flags & DC_21143_NWAY) {
2927 r = CSR_READ_4(sc, DC_10BTSTAT);
2928 if (IFM_SUBTYPE(mii->mii_media_active) ==
2929 IFM_100_TX && (r & DC_TSTAT_LS100)) {
2930 sc->dc_link = 0;
2931 mii_mediachg(mii);
2932 }
2933 if (IFM_SUBTYPE(mii->mii_media_active) ==
2934 IFM_10_T && (r & DC_TSTAT_LS10)) {
2935 sc->dc_link = 0;
2936 mii_mediachg(mii);
2937 }
2938 if (sc->dc_link == 0)
2939 mii_tick(mii);
2940 } else {
2941 r = CSR_READ_4(sc, DC_ISR);
2942 if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT &&
2943 sc->dc_cdata.dc_tx_cnt == 0) {
2944 mii_tick(mii);
2945 if (!(mii->mii_media_status & IFM_ACTIVE))
2946 sc->dc_link = 0;
2947 }
2948 }
2949 } else
2950 mii_tick(mii);
2951
2952 /*
2953 * When the init routine completes, we expect to be able to send
2954 * packets right away, and in fact the network code will send a
2955 * gratuitous ARP the moment the init routine marks the interface
2956 * as running. However, even though the MAC may have been initialized,
2957 * there may be a delay of a few seconds before the PHY completes
2958 * autonegotiation and the link is brought up. Any transmissions
2959 * made during that delay will be lost. Dealing with this is tricky:
2960 * we can't just pause in the init routine while waiting for the
2961 * PHY to come ready since that would bring the whole system to
2962 * a screeching halt for several seconds.
2963 *
2964 * What we do here is prevent the TX start routine from sending
2965 * any packets until a link has been established. After the
2966 * interface has been initialized, the tick routine will poll
2967 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2968 * that time, packets will stay in the send queue, and once the
2969 * link comes up, they will be flushed out to the wire.
2970 */
2971 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE &&
2972 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2973 sc->dc_link++;
2974 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2975 dc_start(ifp);
2976 }
2977
2978 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2979 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc);
2980 else
2981 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc);
2982
2983 DC_UNLOCK(sc);
2984 }
2985
2986 /*
2987 * A transmit underrun has occurred. Back off the transmit threshold,
2988 * or switch to store and forward mode if we have to.
2989 */
2990 static void
2991 dc_tx_underrun(struct dc_softc *sc)
2992 {
2993 u_int32_t isr;
2994 int i;
2995
2996 if (DC_IS_DAVICOM(sc))
2997 dc_init(sc);
2998
2999 if (DC_IS_INTEL(sc)) {
3000 /*
3001 * The real 21143 requires that the transmitter be idle
3002 * in order to change the transmit threshold or store
3003 * and forward state.
3004 */
3005 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
3006
3007 for (i = 0; i < DC_TIMEOUT; i++) {
3008 isr = CSR_READ_4(sc, DC_ISR);
3009 if (isr & DC_ISR_TX_IDLE)
3010 break;
3011 DELAY(10);
3012 }
3013 if (i == DC_TIMEOUT) {
3014 printf("dc%d: failed to force tx to idle state\n",
3015 sc->dc_unit);
3016 dc_init(sc);
3017 }
3018 }
3019
3020 printf("dc%d: TX underrun -- ", sc->dc_unit);
3021 sc->dc_txthresh += DC_TXTHRESH_INC;
3022 if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
3023 printf("using store and forward mode\n");
3024 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3025 } else {
3026 printf("increasing TX threshold\n");
3027 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
3028 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
3029 }
3030
3031 if (DC_IS_INTEL(sc))
3032 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
3033 }
3034
3035 #ifdef DEVICE_POLLING
3036 static poll_handler_t dc_poll;
3037
3038 static void
3039 dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3040 {
3041 struct dc_softc *sc = ifp->if_softc;
3042
3043 if (!(ifp->if_capenable & IFCAP_POLLING)) {
3044 ether_poll_deregister(ifp);
3045 cmd = POLL_DEREGISTER;
3046 }
3047 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
3048 /* Re-enable interrupts. */
3049 CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3050 return;
3051 }
3052 DC_LOCK(sc);
3053 sc->rxcycles = count;
3054 dc_rxeof(sc);
3055 dc_txeof(sc);
3056 if (!IFQ_IS_EMPTY(&ifp->if_snd) && !(ifp->if_flags & IFF_OACTIVE))
3057 dc_start(ifp);
3058
3059 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
3060 u_int32_t status;
3061
3062 status = CSR_READ_4(sc, DC_ISR);
3063 status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF |
3064 DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN |
3065 DC_ISR_BUS_ERR);
3066 if (!status) {
3067 DC_UNLOCK(sc);
3068 return;
3069 }
3070 /* ack what we have */
3071 CSR_WRITE_4(sc, DC_ISR, status);
3072
3073 if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) {
3074 u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED);
3075 ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff);
3076
3077 if (dc_rx_resync(sc))
3078 dc_rxeof(sc);
3079 }
3080 /* restart transmit unit if necessary */
3081 if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt)
3082 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3083
3084 if (status & DC_ISR_TX_UNDERRUN)
3085 dc_tx_underrun(sc);
3086
3087 if (status & DC_ISR_BUS_ERR) {
3088 printf("dc_poll: dc%d bus error\n", sc->dc_unit);
3089 dc_reset(sc);
3090 dc_init(sc);
3091 }
3092 }
3093 DC_UNLOCK(sc);
3094 }
3095 #endif /* DEVICE_POLLING */
3096
3097 static void
3098 dc_intr(void *arg)
3099 {
3100 struct dc_softc *sc;
3101 struct ifnet *ifp;
3102 u_int32_t status;
3103
3104 sc = arg;
3105
3106 if (sc->suspended)
3107 return;
3108
3109 if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0)
3110 return;
3111
3112 DC_LOCK(sc);
3113 ifp = &sc->arpcom.ac_if;
3114 #ifdef DEVICE_POLLING
3115 if (ifp->if_flags & IFF_POLLING)
3116 goto done;
3117 if ((ifp->if_capenable & IFCAP_POLLING) &&
3118 ether_poll_register(dc_poll, ifp)) { /* ok, disable interrupts */
3119 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3120 goto done;
3121 }
3122 #endif
3123
3124 /* Suppress unwanted interrupts */
3125 if (!(ifp->if_flags & IFF_UP)) {
3126 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
3127 dc_stop(sc);
3128 DC_UNLOCK(sc);
3129 return;
3130 }
3131
3132 /* Disable interrupts. */
3133 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3134
3135 while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS)
3136 && status != 0xFFFFFFFF) {
3137
3138 CSR_WRITE_4(sc, DC_ISR, status);
3139
3140 if (status & DC_ISR_RX_OK) {
3141 int curpkts;
3142 curpkts = ifp->if_ipackets;
3143 dc_rxeof(sc);
3144 if (curpkts == ifp->if_ipackets) {
3145 while (dc_rx_resync(sc))
3146 dc_rxeof(sc);
3147 }
3148 }
3149
3150 if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF))
3151 dc_txeof(sc);
3152
3153 if (status & DC_ISR_TX_IDLE) {
3154 dc_txeof(sc);
3155 if (sc->dc_cdata.dc_tx_cnt) {
3156 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
3157 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3158 }
3159 }
3160
3161 if (status & DC_ISR_TX_UNDERRUN)
3162 dc_tx_underrun(sc);
3163
3164 if ((status & DC_ISR_RX_WATDOGTIMEO)
3165 || (status & DC_ISR_RX_NOBUF)) {
3166 int curpkts;
3167 curpkts = ifp->if_ipackets;
3168 dc_rxeof(sc);
3169 if (curpkts == ifp->if_ipackets) {
3170 while (dc_rx_resync(sc))
3171 dc_rxeof(sc);
3172 }
3173 }
3174
3175 if (status & DC_ISR_BUS_ERR) {
3176 dc_reset(sc);
3177 dc_init(sc);
3178 }
3179 }
3180
3181 /* Re-enable interrupts. */
3182 CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3183
3184 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3185 dc_start(ifp);
3186
3187 #ifdef DEVICE_POLLING
3188 done:
3189 #endif
3190
3191 DC_UNLOCK(sc);
3192 }
3193
3194 static void
3195 dc_dma_map_txbuf(arg, segs, nseg, mapsize, error)
3196 void *arg;
3197 bus_dma_segment_t *segs;
3198 int nseg;
3199 bus_size_t mapsize;
3200 int error;
3201 {
3202 struct dc_softc *sc;
3203 struct dc_desc *f;
3204 int cur, first, frag, i;
3205
3206 sc = arg;
3207 if (error) {
3208 sc->dc_cdata.dc_tx_err = error;
3209 return;
3210 }
3211
3212 first = cur = frag = sc->dc_cdata.dc_tx_prod;
3213 for (i = 0; i < nseg; i++) {
3214 if ((sc->dc_flags & DC_TX_ADMTEK_WAR) &&
3215 (frag == (DC_TX_LIST_CNT - 1)) &&
3216 (first != sc->dc_cdata.dc_tx_first)) {
3217 bus_dmamap_unload(sc->dc_mtag,
3218 sc->dc_cdata.dc_tx_map[first]);
3219 sc->dc_cdata.dc_tx_err = ENOBUFS;
3220 return;
3221 }
3222
3223 f = &sc->dc_ldata->dc_tx_list[frag];
3224 f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len);
3225 if (i == 0) {
3226 f->dc_status = 0;
3227 f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
3228 } else
3229 f->dc_status = htole32(DC_TXSTAT_OWN);
3230 f->dc_data = htole32(segs[i].ds_addr);
3231 cur = frag;
3232 DC_INC(frag, DC_TX_LIST_CNT);
3233 }
3234
3235 sc->dc_cdata.dc_tx_err = 0;
3236 sc->dc_cdata.dc_tx_prod = frag;
3237 sc->dc_cdata.dc_tx_cnt += nseg;
3238 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
3239 sc->dc_cdata.dc_tx_chain[cur] = sc->dc_cdata.dc_tx_mapping;
3240 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
3241 sc->dc_ldata->dc_tx_list[first].dc_ctl |=
3242 htole32(DC_TXCTL_FINT);
3243 if (sc->dc_flags & DC_TX_INTR_ALWAYS)
3244 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT);
3245 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
3246 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT);
3247 sc->dc_ldata->dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN);
3248 }
3249
3250 /*
3251 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
3252 * pointers to the fragment pointers.
3253 */
3254 static int
3255 dc_encap(struct dc_softc *sc, struct mbuf **m_head)
3256 {
3257 struct mbuf *m;
3258 int error, idx, chainlen = 0;
3259
3260 /*
3261 * If there's no way we can send any packets, return now.
3262 */
3263 if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt < 6)
3264 return (ENOBUFS);
3265
3266 /*
3267 * Count the number of frags in this chain to see if
3268 * we need to m_defrag. Since the descriptor list is shared
3269 * by all packets, we'll m_defrag long chains so that they
3270 * do not use up the entire list, even if they would fit.
3271 */
3272 for (m = *m_head; m != NULL; m = m->m_next)
3273 chainlen++;
3274
3275 if ((chainlen > DC_TX_LIST_CNT / 4) ||
3276 ((DC_TX_LIST_CNT - (chainlen + sc->dc_cdata.dc_tx_cnt)) < 6)) {
3277 m = m_defrag(*m_head, M_DONTWAIT);
3278 if (m == NULL)
3279 return (ENOBUFS);
3280 *m_head = m;
3281 }
3282
3283 /*
3284 * Start packing the mbufs in this chain into
3285 * the fragment pointers. Stop when we run out
3286 * of fragments or hit the end of the mbuf chain.
3287 */
3288 idx = sc->dc_cdata.dc_tx_prod;
3289 sc->dc_cdata.dc_tx_mapping = *m_head;
3290 error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx],
3291 *m_head, dc_dma_map_txbuf, sc, 0);
3292 if (error)
3293 return (error);
3294 if (sc->dc_cdata.dc_tx_err != 0)
3295 return (sc->dc_cdata.dc_tx_err);
3296 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx],
3297 BUS_DMASYNC_PREWRITE);
3298 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
3299 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3300 return (0);
3301 }
3302
3303 /*
3304 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3305 * to the mbuf data regions directly in the transmit lists. We also save a
3306 * copy of the pointers since the transmit list fragment pointers are
3307 * physical addresses.
3308 */
3309
3310 static void
3311 dc_start(struct ifnet *ifp)
3312 {
3313 struct dc_softc *sc;
3314 struct mbuf *m_head = NULL, *m;
3315 unsigned int queued = 0;
3316 int idx;
3317
3318 sc = ifp->if_softc;
3319
3320 DC_LOCK(sc);
3321
3322 if (!sc->dc_link && ifp->if_snd.ifq_len < 10) {
3323 DC_UNLOCK(sc);
3324 return;
3325 }
3326
3327 if (ifp->if_flags & IFF_OACTIVE) {
3328 DC_UNLOCK(sc);
3329 return;
3330 }
3331
3332 idx = sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod;
3333
3334 while (sc->dc_cdata.dc_tx_chain[idx] == NULL) {
3335 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3336 if (m_head == NULL)
3337 break;
3338
3339 if (sc->dc_flags & DC_TX_COALESCE &&
3340 (m_head->m_next != NULL ||
3341 sc->dc_flags & DC_TX_ALIGN)) {
3342 m = m_defrag(m_head, M_DONTWAIT);
3343 if (m == NULL) {
3344 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3345 ifp->if_flags |= IFF_OACTIVE;
3346 break;
3347 } else {
3348 m_head = m;
3349 }
3350 }
3351
3352 if (dc_encap(sc, &m_head)) {
3353 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3354 ifp->if_flags |= IFF_OACTIVE;
3355 break;
3356 }
3357 idx = sc->dc_cdata.dc_tx_prod;
3358
3359 queued++;
3360 /*
3361 * If there's a BPF listener, bounce a copy of this frame
3362 * to him.
3363 */
3364 BPF_MTAP(ifp, m_head);
3365
3366 if (sc->dc_flags & DC_TX_ONE) {
3367 ifp->if_flags |= IFF_OACTIVE;
3368 break;
3369 }
3370 }
3371
3372 if (queued > 0) {
3373 /* Transmit */
3374 if (!(sc->dc_flags & DC_TX_POLL))
3375 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3376
3377 /*
3378 * Set a timeout in case the chip goes out to lunch.
3379 */
3380 ifp->if_timer = 5;
3381 }
3382
3383 DC_UNLOCK(sc);
3384 }
3385
3386 static void
3387 dc_init(void *xsc)
3388 {
3389 struct dc_softc *sc = xsc;
3390 struct ifnet *ifp = &sc->arpcom.ac_if;
3391 struct mii_data *mii;
3392
3393 DC_LOCK(sc);
3394
3395 mii = device_get_softc(sc->dc_miibus);
3396
3397 /*
3398 * Cancel pending I/O and free all RX/TX buffers.
3399 */
3400 dc_stop(sc);
3401 dc_reset(sc);
3402
3403 /*
3404 * Set cache alignment and burst length.
3405 */
3406 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
3407 CSR_WRITE_4(sc, DC_BUSCTL, 0);
3408 else
3409 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE);
3410 /*
3411 * Evenly share the bus between receive and transmit process.
3412 */
3413 if (DC_IS_INTEL(sc))
3414 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
3415 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
3416 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
3417 } else {
3418 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
3419 }
3420 if (sc->dc_flags & DC_TX_POLL)
3421 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
3422 switch(sc->dc_cachesize) {
3423 case 32:
3424 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
3425 break;
3426 case 16:
3427 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
3428 break;
3429 case 8:
3430 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
3431 break;
3432 case 0:
3433 default:
3434 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
3435 break;
3436 }
3437
3438 if (sc->dc_flags & DC_TX_STORENFWD)
3439 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3440 else {
3441 if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
3442 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3443 } else {
3444 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3445 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
3446 }
3447 }
3448
3449 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
3450 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
3451
3452 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
3453 /*
3454 * The app notes for the 98713 and 98715A say that
3455 * in order to have the chips operate properly, a magic
3456 * number must be written to CSR16. Macronix does not
3457 * document the meaning of these bits so there's no way
3458 * to know exactly what they do. The 98713 has a magic
3459 * number all its own; the rest all use a different one.
3460 */
3461 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
3462 if (sc->dc_type == DC_TYPE_98713)
3463 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
3464 else
3465 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
3466 }
3467
3468 if (DC_IS_XIRCOM(sc)) {
3469 /*
3470 * setup General Purpose Port mode and data so the tulip
3471 * can talk to the MII.
3472 */
3473 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
3474 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
3475 DELAY(10);
3476 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
3477 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
3478 DELAY(10);
3479 }
3480
3481 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
3482 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
3483
3484 /* Init circular RX list. */
3485 if (dc_list_rx_init(sc) == ENOBUFS) {
3486 printf("dc%d: initialization failed: no "
3487 "memory for rx buffers\n", sc->dc_unit);
3488 dc_stop(sc);
3489 DC_UNLOCK(sc);
3490 return;
3491 }
3492
3493 /*
3494 * Init TX descriptors.
3495 */
3496 dc_list_tx_init(sc);
3497
3498 /*
3499 * Load the address of the RX list.
3500 */
3501 CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0));
3502 CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0));
3503
3504 /*
3505 * Enable interrupts.
3506 */
3507 #ifdef DEVICE_POLLING
3508 /*
3509 * ... but only if we are not polling, and make sure they are off in
3510 * the case of polling. Some cards (e.g. fxp) turn interrupts on
3511 * after a reset.
3512 */
3513 if (ifp->if_flags & IFF_POLLING)
3514 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3515 else
3516 #endif
3517 CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3518 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
3519
3520 /* Enable transmitter. */
3521 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
3522
3523 /*
3524 * If this is an Intel 21143 and we're not using the
3525 * MII port, program the LED control pins so we get
3526 * link and activity indications.
3527 */
3528 if (sc->dc_flags & DC_TULIP_LEDS) {
3529 CSR_WRITE_4(sc, DC_WATCHDOG,
3530 DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY);
3531 CSR_WRITE_4(sc, DC_WATCHDOG, 0);
3532 }
3533
3534 /*
3535 * Load the RX/multicast filter. We do this sort of late
3536 * because the filter programming scheme on the 21143 and
3537 * some clones requires DMAing a setup frame via the TX
3538 * engine, and we need the transmitter enabled for that.
3539 */
3540 dc_setfilt(sc);
3541
3542 /* Enable receiver. */
3543 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
3544 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
3545
3546 mii_mediachg(mii);
3547 dc_setcfg(sc, sc->dc_if_media);
3548
3549 ifp->if_flags |= IFF_RUNNING;
3550 ifp->if_flags &= ~IFF_OACTIVE;
3551
3552 /* Don't start the ticker if this is a homePNA link. */
3553 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
3554 sc->dc_link = 1;
3555 else {
3556 if (sc->dc_flags & DC_21143_NWAY)
3557 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc);
3558 else
3559 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc);
3560 }
3561
3562 #ifdef SRM_MEDIA
3563 if(sc->dc_srm_media) {
3564 struct ifreq ifr;
3565
3566 ifr.ifr_media = sc->dc_srm_media;
3567 ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA);
3568 sc->dc_srm_media = 0;
3569 }
3570 #endif
3571 DC_UNLOCK(sc);
3572 }
3573
3574 /*
3575 * Set media options.
3576 */
3577 static int
3578 dc_ifmedia_upd(struct ifnet *ifp)
3579 {
3580 struct dc_softc *sc;
3581 struct mii_data *mii;
3582 struct ifmedia *ifm;
3583
3584 sc = ifp->if_softc;
3585 mii = device_get_softc(sc->dc_miibus);
3586 mii_mediachg(mii);
3587 ifm = &mii->mii_media;
3588
3589 if (DC_IS_DAVICOM(sc) &&
3590 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
3591 dc_setcfg(sc, ifm->ifm_media);
3592 else
3593 sc->dc_link = 0;
3594
3595 return (0);
3596 }
3597
3598 /*
3599 * Report current media status.
3600 */
3601 static void
3602 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3603 {
3604 struct dc_softc *sc;
3605 struct mii_data *mii;
3606 struct ifmedia *ifm;
3607
3608 sc = ifp->if_softc;
3609 mii = device_get_softc(sc->dc_miibus);
3610 mii_pollstat(mii);
3611 ifm = &mii->mii_media;
3612 if (DC_IS_DAVICOM(sc)) {
3613 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
3614 ifmr->ifm_active = ifm->ifm_media;
3615 ifmr->ifm_status = 0;
3616 return;
3617 }
3618 }
3619 ifmr->ifm_active = mii->mii_media_active;
3620 ifmr->ifm_status = mii->mii_media_status;
3621 }
3622
3623 static int
3624 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3625 {
3626 struct dc_softc *sc = ifp->if_softc;
3627 struct ifreq *ifr = (struct ifreq *)data;
3628 struct mii_data *mii;
3629 int error = 0;
3630
3631 DC_LOCK(sc);
3632
3633 switch (command) {
3634 case SIOCSIFFLAGS:
3635 if (ifp->if_flags & IFF_UP) {
3636 int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) &
3637 (IFF_PROMISC | IFF_ALLMULTI);
3638
3639 if (ifp->if_flags & IFF_RUNNING) {
3640 if (need_setfilt)
3641 dc_setfilt(sc);
3642 } else {
3643 sc->dc_txthresh = 0;
3644 dc_init(sc);
3645 }
3646 } else {
3647 if (ifp->if_flags & IFF_RUNNING)
3648 dc_stop(sc);
3649 }
3650 sc->dc_if_flags = ifp->if_flags;
3651 error = 0;
3652 break;
3653 case SIOCADDMULTI:
3654 case SIOCDELMULTI:
3655 dc_setfilt(sc);
3656 error = 0;
3657 break;
3658 case SIOCGIFMEDIA:
3659 case SIOCSIFMEDIA:
3660 mii = device_get_softc(sc->dc_miibus);
3661 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3662 #ifdef SRM_MEDIA
3663 if (sc->dc_srm_media)
3664 sc->dc_srm_media = 0;
3665 #endif
3666 break;
3667 case SIOCSIFCAP:
3668 ifp->if_capenable &= ~IFCAP_POLLING;
3669 ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING;
3670 break;
3671 default:
3672 error = ether_ioctl(ifp, command, data);
3673 break;
3674 }
3675
3676 DC_UNLOCK(sc);
3677
3678 return (error);
3679 }
3680
3681 static void
3682 dc_watchdog(struct ifnet *ifp)
3683 {
3684 struct dc_softc *sc;
3685
3686 sc = ifp->if_softc;
3687
3688 DC_LOCK(sc);
3689
3690 ifp->if_oerrors++;
3691 printf("dc%d: watchdog timeout\n", sc->dc_unit);
3692
3693 dc_stop(sc);
3694 dc_reset(sc);
3695 dc_init(sc);
3696
3697 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3698 dc_start(ifp);
3699
3700 DC_UNLOCK(sc);
3701 }
3702
3703 /*
3704 * Stop the adapter and free any mbufs allocated to the
3705 * RX and TX lists.
3706 */
3707 static void
3708 dc_stop(struct dc_softc *sc)
3709 {
3710 struct ifnet *ifp;
3711 struct dc_list_data *ld;
3712 struct dc_chain_data *cd;
3713 int i;
3714 u_int32_t ctl;
3715
3716 DC_LOCK(sc);
3717
3718 ifp = &sc->arpcom.ac_if;
3719 ifp->if_timer = 0;
3720 ld = sc->dc_ldata;
3721 cd = &sc->dc_cdata;
3722
3723 callout_stop(&sc->dc_stat_ch);
3724
3725 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3726 #ifdef DEVICE_POLLING
3727 ether_poll_deregister(ifp);
3728 #endif
3729
3730 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON));
3731 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3732 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3733 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3734 sc->dc_link = 0;
3735
3736 /*
3737 * Free data in the RX lists.
3738 */
3739 for (i = 0; i < DC_RX_LIST_CNT; i++) {
3740 if (cd->dc_rx_chain[i] != NULL) {
3741 m_freem(cd->dc_rx_chain[i]);
3742 cd->dc_rx_chain[i] = NULL;
3743 }
3744 }
3745 bzero(&ld->dc_rx_list, sizeof(ld->dc_rx_list));
3746
3747 /*
3748 * Free the TX list buffers.
3749 */
3750 for (i = 0; i < DC_TX_LIST_CNT; i++) {
3751 if (cd->dc_tx_chain[i] != NULL) {
3752 ctl = le32toh(ld->dc_tx_list[i].dc_ctl);
3753 if ((ctl & DC_TXCTL_SETUP) ||
3754 !(ctl & DC_TXCTL_LASTFRAG)) {
3755 cd->dc_tx_chain[i] = NULL;
3756 continue;
3757 }
3758 bus_dmamap_unload(sc->dc_mtag, cd->dc_tx_map[i]);
3759 m_freem(cd->dc_tx_chain[i]);
3760 cd->dc_tx_chain[i] = NULL;
3761 }
3762 }
3763 bzero(&ld->dc_tx_list, sizeof(ld->dc_tx_list));
3764
3765 DC_UNLOCK(sc);
3766 }
3767
3768 /*
3769 * Device suspend routine. Stop the interface and save some PCI
3770 * settings in case the BIOS doesn't restore them properly on
3771 * resume.
3772 */
3773 static int
3774 dc_suspend(device_t dev)
3775 {
3776 struct dc_softc *sc;
3777 int s;
3778
3779 s = splimp();
3780
3781 sc = device_get_softc(dev);
3782 dc_stop(sc);
3783 sc->suspended = 1;
3784
3785 splx(s);
3786 return (0);
3787 }
3788
3789 /*
3790 * Device resume routine. Restore some PCI settings in case the BIOS
3791 * doesn't, re-enable busmastering, and restart the interface if
3792 * appropriate.
3793 */
3794 static int
3795 dc_resume(device_t dev)
3796 {
3797 struct dc_softc *sc;
3798 struct ifnet *ifp;
3799 int s;
3800
3801 s = splimp();
3802
3803 sc = device_get_softc(dev);
3804 ifp = &sc->arpcom.ac_if;
3805
3806 /* reinitialize interface if necessary */
3807 if (ifp->if_flags & IFF_UP)
3808 dc_init(sc);
3809
3810 sc->suspended = 0;
3811
3812 splx(s);
3813 return (0);
3814 }
3815
3816 /*
3817 * Stop all chip I/O so that the kernel's probe routines don't
3818 * get confused by errant DMAs when rebooting.
3819 */
3820 static void
3821 dc_shutdown(device_t dev)
3822 {
3823 struct dc_softc *sc;
3824
3825 sc = device_get_softc(dev);
3826
3827 dc_stop(sc);
3828 }
Cache object: c37720bdc81fed050156c97c92cbd8ca
|