FreeBSD/Linux Kernel Cross Reference
sys/pci/if_xl.c
1 /*
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD: src/sys/pci/if_xl.c,v 1.5.2.18 1999/09/05 08:21:14 peter Exp $
33 */
34
35 /*
36 * 3Com 3c90x Etherlink XL PCI NIC driver
37 *
38 * Supports the 3Com "boomerang" and "cyclone" PCI
39 * bus-master chips (3c90x cards and embedded controllers) including
40 * the following:
41 *
42 * 3Com 3c900-TPO 10Mbps/RJ-45
43 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC
44 * 3Com 3c905-TX 10/100Mbps/RJ-45
45 * 3Com 3c905-T4 10/100Mbps/RJ-45
46 * 3Com 3c900B-TPO 10Mbps/RJ-45
47 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC
48 * 3Com 3c905B-TX 10/100Mbps/RJ-45
49 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic
50 * 3Com 3c980-TX 10/100Mbps server adapter
51 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
52 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
53 * Dell Latitude laptop docking station embedded 3c905-TX
54 *
55 * Written by Bill Paul <wpaul@ctr.columbia.edu>
56 * Electrical Engineering Department
57 * Columbia University, New York City
58 */
59
60 /*
61 * The 3c90x series chips use a bus-master DMA interface for transfering
62 * packets to and from the controller chip. Some of the "vortex" cards
63 * (3c59x) also supported a bus master mode, however for those chips
64 * you could only DMA packets to/from a contiguous memory buffer. For
65 * transmission this would mean copying the contents of the queued mbuf
66 * chain into a an mbuf cluster and then DMAing the cluster. This extra
67 * copy would sort of defeat the purpose of the bus master support for
68 * any packet that doesn't fit into a single mbuf.
69 *
70 * By contrast, the 3c90x cards support a fragment-based bus master
71 * mode where mbuf chains can be encapsulated using TX descriptors.
72 * This is similar to other PCI chips such as the Texas Instruments
73 * ThunderLAN and the Intel 82557/82558.
74 *
75 * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
76 * bus master chips because they maintain the old PIO interface for
77 * backwards compatibility, but starting with the 3c905B and the
78 * "cyclone" chips, the compatibility interface has been dropped.
79 * Since using bus master DMA is a big win, we use this driver to
80 * support the PCI "boomerang" chips even though they work with the
81 * "vortex" driver in order to obtain better performance.
82 *
83 * This driver is in the /sys/pci directory because it only supports
84 * PCI-based NICs.
85 */
86
87 #include "bpfilter.h"
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/sockio.h>
92 #include <sys/mbuf.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96
97 #include <net/if.h>
98 #include <net/if_arp.h>
99 #include <net/ethernet.h>
100 #include <net/if_dl.h>
101 #include <net/if_media.h>
102 #include <net/if_types.h>
103
104 #ifdef INET
105 #include <netinet/in.h>
106 #include <netinet/in_systm.h>
107 #include <netinet/in_var.h>
108 #include <netinet/ip.h>
109 #include <netinet/if_ether.h>
110 #endif
111
112 #if NBPFILTER > 0
113 #include <net/bpf.h>
114 #endif
115
116 #include <vm/vm.h> /* for vtophys */
117 #include <vm/pmap.h> /* for vtophys */
118 #include <machine/clock.h> /* for DELAY */
119
120 #include <pci/pcireg.h>
121 #include <pci/pcivar.h>
122
123 /*
124 * The following #define causes the code to use PIO to access the
125 * chip's registers instead of memory mapped mode. The reason PIO mode
126 * is on by default is that the Etherlink XL manual seems to indicate
127 * that only the newer revision chips (3c905B) support both PIO and
128 * memory mapped access. Since we want to be compatible with the older
129 * bus master chips, we use PIO here. If you comment this out, the
130 * driver will use memory mapped I/O, which may be faster but which
131 * might not work on some devices.
132 */
133 #define XL_USEIOSPACE
134
135 /*
136 * This #define controls the behavior of autonegotiation during the
137 * bootstrap phase. It's possible to have the driver initiate an
138 * autonegotiation session and then set a timeout which will cause the
139 * autoneg results to be polled later, usually once the kernel has
140 * finished booting. This is clever and all, but it can have bad side
141 * effects in some cases, particularly where NFS is involved. For
142 * example, if we're booting diskless with an NFS rootfs, the network
143 * interface has to be up and running before we hit the mountroot()
144 * code, otherwise mounting the rootfs will fail and we'll probably
145 * panic.
146 *
147 * Consequently, the 'backgrounded' autoneg behavior is turned off
148 * by default and we actually sit and wait 5 seconds for autonegotiation
149 * to complete before proceeding with the other device probes. If you
150 * choose to use the other behavior, you can uncomment this #define and
151 * recompile.
152 */
153 /* #define XL_BACKGROUND_AUTONEG */
154
155 #include <pci/if_xlreg.h>
156
157 #if !defined(lint)
158 static const char rcsid[] =
159 "$FreeBSD: src/sys/pci/if_xl.c,v 1.5.2.18 1999/09/05 08:21:14 peter Exp $";
160 #endif
161
162 /*
163 * Various supported device vendors/types and their names.
164 */
165 static struct xl_type xl_devs[] = {
166 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
167 "3Com 3c900 Etherlink XL 10BaseT" },
168 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
169 "3Com 3c900 Etherlink XL 10BaseT Combo" },
170 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
171 "3Com 3c905 Fast Etherlink XL 10/100BaseTX" },
172 { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
173 "3Com 3c905 Fast Etherlink XL 10/100BaseT4" },
174 { TC_VENDORID, TC_DEVICEID_CYCLONE_10BT,
175 "3Com 3c900B Etherlink XL 10BaseT" },
176 { TC_VENDORID, TC_DEVICEID_CYCLONE_10BT_COMBO,
177 "3Com 3c900B Etherlink XL 10BaseT Combo" },
178 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT,
179 "3Com 3c905B Fast Etherlink XL 10/100BaseTX" },
180 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
181 "3Com 3c905B Fast Etherlink XL 10/100BaseT4" },
182 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
183 "3Com 3c905B Fast Etherlink XL 10/100BaseFX/SC" },
184 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT_SERV,
185 "3Com 3c980 Fast Etherlink XL 10/100BaseTX" },
186 { 0, 0, NULL }
187 };
188
189 /*
190 * Various supported PHY vendors/types and their names. Note that
191 * this driver will work with pretty much any MII-compliant PHY,
192 * so failure to positively identify the chip is not a fatal error.
193 */
194
195 static struct xl_type xl_phys[] = {
196 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
197 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
198 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
199 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
200 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
201 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
202 { 0, 0, "<MII-compliant physical interface>" }
203 };
204
205 static unsigned long xl_count = 0;
206 static char *xl_probe __P((pcici_t, pcidi_t));
207 static void xl_attach __P((pcici_t, int));
208
209 static int xl_newbuf __P((struct xl_softc *,
210 struct xl_chain_onefrag *));
211 static void xl_stats_update __P((void *));
212 static int xl_encap __P((struct xl_softc *, struct xl_chain *,
213 struct mbuf * ));
214
215 static void xl_rxeof __P((struct xl_softc *));
216 static void xl_txeof __P((struct xl_softc *));
217 static void xl_txeoc __P((struct xl_softc *));
218 static void xl_intr __P((void *));
219 static void xl_start __P((struct ifnet *));
220 static int xl_ioctl __P((struct ifnet *, int, caddr_t));
221 static void xl_init __P((void *));
222 static void xl_stop __P((struct xl_softc *));
223 static void xl_watchdog __P((struct ifnet *));
224 static void xl_shutdown __P((int, void *));
225 static int xl_ifmedia_upd __P((struct ifnet *));
226 static void xl_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
227
228 static int xl_eeprom_wait __P((struct xl_softc *));
229 static int xl_read_eeprom __P((struct xl_softc *, caddr_t, int,
230 int, int));
231 static void xl_mii_sync __P((struct xl_softc *));
232 static void xl_mii_send __P((struct xl_softc *, u_int32_t, int));
233 static int xl_mii_readreg __P((struct xl_softc *, struct xl_mii_frame *));
234 static int xl_mii_writereg __P((struct xl_softc *, struct xl_mii_frame *));
235 static u_int16_t xl_phy_readreg __P((struct xl_softc *, int));
236 static void xl_phy_writereg __P((struct xl_softc *, int, int));
237
238 static void xl_autoneg_xmit __P((struct xl_softc *));
239 static void xl_autoneg_mii __P((struct xl_softc *, int, int));
240 static void xl_setmode_mii __P((struct xl_softc *, int));
241 static void xl_getmode_mii __P((struct xl_softc *));
242 static void xl_setmode __P((struct xl_softc *, int));
243 static u_int8_t xl_calchash __P((caddr_t));
244 static void xl_setmulti __P((struct xl_softc *));
245 static void xl_reset __P((struct xl_softc *));
246 static int xl_list_rx_init __P((struct xl_softc *));
247 static int xl_list_tx_init __P((struct xl_softc *));
248 static void xl_wait __P((struct xl_softc *));
249 static void xl_mediacheck __P((struct xl_softc *));
250 #ifdef notdef
251 static void xl_testpacket __P((struct xl_softc *));
252 #endif
253
254 /*
255 * Murphy's law says that it's possible the chip can wedge and
256 * the 'command in progress' bit may never clear. Hence, we wait
257 * only a finite amount of time to avoid getting caught in an
258 * infinite loop. Normally this delay routine would be a macro,
259 * but it isn't called during normal operation so we can afford
260 * to make it a function.
261 */
262 static void xl_wait(sc)
263 struct xl_softc *sc;
264 {
265 register int i;
266
267 for (i = 0; i < XL_TIMEOUT; i++) {
268 DELAY(10);
269 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
270 break;
271 }
272 if (i == XL_TIMEOUT)
273 printf("xl%d: command never completed!\n", sc->xl_unit);
274
275 return;
276 }
277
278 /*
279 * MII access routines are provided for adapters with external
280 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
281 * autoneg logic that's faked up to look like a PHY (3c905B-TX).
282 * Note: if you don't perform the MDIO operations just right,
283 * it's possible to end up with code that works correctly with
284 * some chips/CPUs/processor speeds/bus speeds/etc but not
285 * with others.
286 */
287 #define MII_SET(x) \
288 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
289 CSR_READ_2(sc, XL_W4_PHY_MGMT) | x)
290
291 #define MII_CLR(x) \
292 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
293 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~x)
294
295 /*
296 * Sync the PHYs by setting data bit and strobing the clock 32 times.
297 */
298 static void xl_mii_sync(sc)
299 struct xl_softc *sc;
300 {
301 register int i;
302
303 XL_SEL_WIN(4);
304 MII_SET(XL_MII_DIR|XL_MII_DATA);
305
306 for (i = 0; i < 32; i++) {
307 MII_SET(XL_MII_CLK);
308 DELAY(1);
309 MII_CLR(XL_MII_CLK);
310 DELAY(1);
311 }
312
313 return;
314 }
315
316 /*
317 * Clock a series of bits through the MII.
318 */
319 static void xl_mii_send(sc, bits, cnt)
320 struct xl_softc *sc;
321 u_int32_t bits;
322 int cnt;
323 {
324 int i;
325
326 XL_SEL_WIN(4);
327 MII_CLR(XL_MII_CLK);
328
329 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
330 if (bits & i) {
331 MII_SET(XL_MII_DATA);
332 } else {
333 MII_CLR(XL_MII_DATA);
334 }
335 DELAY(1);
336 MII_CLR(XL_MII_CLK);
337 DELAY(1);
338 MII_SET(XL_MII_CLK);
339 }
340 }
341
342 /*
343 * Read an PHY register through the MII.
344 */
345 static int xl_mii_readreg(sc, frame)
346 struct xl_softc *sc;
347 struct xl_mii_frame *frame;
348
349 {
350 int i, ack, s;
351
352 s = splimp();
353
354 /*
355 * Set up frame for RX.
356 */
357 frame->mii_stdelim = XL_MII_STARTDELIM;
358 frame->mii_opcode = XL_MII_READOP;
359 frame->mii_turnaround = 0;
360 frame->mii_data = 0;
361
362 /*
363 * Select register window 4.
364 */
365
366 XL_SEL_WIN(4);
367
368 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
369 /*
370 * Turn on data xmit.
371 */
372 MII_SET(XL_MII_DIR);
373
374 xl_mii_sync(sc);
375
376 /*
377 * Send command/address info.
378 */
379 xl_mii_send(sc, frame->mii_stdelim, 2);
380 xl_mii_send(sc, frame->mii_opcode, 2);
381 xl_mii_send(sc, frame->mii_phyaddr, 5);
382 xl_mii_send(sc, frame->mii_regaddr, 5);
383
384 /* Idle bit */
385 MII_CLR((XL_MII_CLK|XL_MII_DATA));
386 DELAY(1);
387 MII_SET(XL_MII_CLK);
388 DELAY(1);
389
390 /* Turn off xmit. */
391 MII_CLR(XL_MII_DIR);
392
393 /* Check for ack */
394 MII_CLR(XL_MII_CLK);
395 DELAY(1);
396 MII_SET(XL_MII_CLK);
397 DELAY(1);
398 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
399
400 /*
401 * Now try reading data bits. If the ack failed, we still
402 * need to clock through 16 cycles to keep the PHY(s) in sync.
403 */
404 if (ack) {
405 for(i = 0; i < 16; i++) {
406 MII_CLR(XL_MII_CLK);
407 DELAY(1);
408 MII_SET(XL_MII_CLK);
409 DELAY(1);
410 }
411 goto fail;
412 }
413
414 for (i = 0x8000; i; i >>= 1) {
415 MII_CLR(XL_MII_CLK);
416 DELAY(1);
417 if (!ack) {
418 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
419 frame->mii_data |= i;
420 DELAY(1);
421 }
422 MII_SET(XL_MII_CLK);
423 DELAY(1);
424 }
425
426 fail:
427
428 MII_CLR(XL_MII_CLK);
429 DELAY(1);
430 MII_SET(XL_MII_CLK);
431 DELAY(1);
432
433 splx(s);
434
435 if (ack)
436 return(1);
437 return(0);
438 }
439
440 /*
441 * Write to a PHY register through the MII.
442 */
443 static int xl_mii_writereg(sc, frame)
444 struct xl_softc *sc;
445 struct xl_mii_frame *frame;
446
447 {
448 int s;
449
450
451
452 s = splimp();
453 /*
454 * Set up frame for TX.
455 */
456
457 frame->mii_stdelim = XL_MII_STARTDELIM;
458 frame->mii_opcode = XL_MII_WRITEOP;
459 frame->mii_turnaround = XL_MII_TURNAROUND;
460
461 /*
462 * Select the window 4.
463 */
464 XL_SEL_WIN(4);
465
466 /*
467 * Turn on data output.
468 */
469 MII_SET(XL_MII_DIR);
470
471 xl_mii_sync(sc);
472
473 xl_mii_send(sc, frame->mii_stdelim, 2);
474 xl_mii_send(sc, frame->mii_opcode, 2);
475 xl_mii_send(sc, frame->mii_phyaddr, 5);
476 xl_mii_send(sc, frame->mii_regaddr, 5);
477 xl_mii_send(sc, frame->mii_turnaround, 2);
478 xl_mii_send(sc, frame->mii_data, 16);
479
480 /* Idle bit. */
481 MII_SET(XL_MII_CLK);
482 DELAY(1);
483 MII_CLR(XL_MII_CLK);
484 DELAY(1);
485
486 /*
487 * Turn off xmit.
488 */
489 MII_CLR(XL_MII_DIR);
490
491 splx(s);
492
493 return(0);
494 }
495
496 static u_int16_t xl_phy_readreg(sc, reg)
497 struct xl_softc *sc;
498 int reg;
499 {
500 struct xl_mii_frame frame;
501
502 bzero((char *)&frame, sizeof(frame));
503
504 frame.mii_phyaddr = sc->xl_phy_addr;
505 frame.mii_regaddr = reg;
506 xl_mii_readreg(sc, &frame);
507
508 return(frame.mii_data);
509 }
510
511 static void xl_phy_writereg(sc, reg, data)
512 struct xl_softc *sc;
513 int reg;
514 int data;
515 {
516 struct xl_mii_frame frame;
517
518 bzero((char *)&frame, sizeof(frame));
519
520 frame.mii_phyaddr = sc->xl_phy_addr;
521 frame.mii_regaddr = reg;
522 frame.mii_data = data;
523
524 xl_mii_writereg(sc, &frame);
525
526 return;
527 }
528
529 /*
530 * The EEPROM is slow: give it time to come ready after issuing
531 * it a command.
532 */
533 static int xl_eeprom_wait(sc)
534 struct xl_softc *sc;
535 {
536 int i;
537
538 for (i = 0; i < 100; i++) {
539 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
540 DELAY(162);
541 else
542 break;
543 }
544
545 if (i == 100) {
546 printf("xl%d: eeprom failed to come ready\n", sc->xl_unit);
547 return(1);
548 }
549
550 return(0);
551 }
552
553 /*
554 * Read a sequence of words from the EEPROM. Note that ethernet address
555 * data is stored in the EEPROM in network byte order.
556 */
557 static int xl_read_eeprom(sc, dest, off, cnt, swap)
558 struct xl_softc *sc;
559 caddr_t dest;
560 int off;
561 int cnt;
562 int swap;
563 {
564 int err = 0, i;
565 u_int16_t word = 0, *ptr;
566
567 XL_SEL_WIN(0);
568
569 if (xl_eeprom_wait(sc))
570 return(1);
571
572 for (i = 0; i < cnt; i++) {
573 CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_READ | (off + i));
574 err = xl_eeprom_wait(sc);
575 if (err)
576 break;
577 word = CSR_READ_2(sc, XL_W0_EE_DATA);
578 ptr = (u_int16_t *)(dest + (i * 2));
579 if (swap)
580 *ptr = ntohs(word);
581 else
582 *ptr = word;
583 }
584
585 return(err ? 1 : 0);
586 }
587
588 /*
589 * This routine is taken from the 3Com Etherlink XL manual,
590 * page 10-7. It calculates a CRC of the supplied multicast
591 * group address and returns the lower 8 bits, which are used
592 * as the multicast filter position.
593 * Note: the 3c905B currently only supports a 64-bit hash table,
594 * which means we really only need 6 bits, but the manual indicates
595 * that future chip revisions will have a 256-bit hash table,
596 * hence the routine is set up to calculate 8 bits of position
597 * info in case we need it some day.
598 * Note II, The Sequel: _CURRENT_ versions of the 3c905B have a
599 * 256 bit hash table. This means we have to use all 8 bits regardless.
600 * On older cards, the upper 2 bits will be ignored. Grrrr....
601 */
602 static u_int8_t xl_calchash(addr)
603 caddr_t addr;
604 {
605 u_int32_t crc, carry;
606 int i, j;
607 u_int8_t c;
608
609 /* Compute CRC for the address value. */
610 crc = 0xFFFFFFFF; /* initial value */
611
612 for (i = 0; i < 6; i++) {
613 c = *(addr + i);
614 for (j = 0; j < 8; j++) {
615 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
616 crc <<= 1;
617 c >>= 1;
618 if (carry)
619 crc = (crc ^ 0x04c11db6) | carry;
620 }
621 }
622
623 /* return the filter bit position */
624 return(crc & 0x000000FF);
625 }
626
627 static void xl_setmulti(sc)
628 struct xl_softc *sc;
629 {
630 struct ifnet *ifp;
631 int h, i;
632 struct ether_multi *enm;
633 struct ether_multistep step;
634 u_int8_t rxfilt;
635
636 ifp = &sc->arpcom.ac_if;
637
638 XL_SEL_WIN(5);
639 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
640
641 if (ifp->if_flags & IFF_ALLMULTI) {
642 rxfilt |= XL_RXFILTER_ALLMULTI;
643 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
644 return;
645 }
646
647 if (sc->arpcom.ac_multicnt == 0) {
648 /* disable multicast */
649 rxfilt &= ~(XL_RXFILTER_ALLMULTI | XL_RXFILTER_MULTIHASH);
650 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
651 return;
652 }
653
654 if (ifp->if_flags & IFF_ALLMULTI || sc->xl_type == XL_TYPE_90X) {
655 rxfilt |= XL_RXFILTER_ALLMULTI;
656 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
657 } else {
658 /* first, zot all the existing hash bits */
659 for (i = 0; i < XL_HASHFILT_SIZE; i++)
660 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
661 /* now program new ones */
662 rxfilt |= XL_RXFILTER_MULTIHASH;
663 rxfilt &= ~XL_RXFILTER_ALLMULTI;
664 ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
665 while(enm != NULL) {
666 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
667 ETHER_ADDR_LEN)) {
668 rxfilt &= ~XL_RXFILTER_MULTIHASH;
669 rxfilt |= XL_RXFILTER_ALLMULTI;
670 CSR_WRITE_2(sc, XL_COMMAND,
671 XL_CMD_RX_SET_FILT|rxfilt);
672 break;
673 } else {
674 h = xl_calchash(enm->enm_addrlo);
675 CSR_WRITE_2(sc, XL_COMMAND,
676 XL_CMD_RX_SET_HASH|XL_HASH_SET|h);
677 }
678 ETHER_NEXT_MULTI(step, enm);
679 }
680 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
681 }
682
683 return;
684 }
685 #ifdef notdef
686 static void xl_testpacket(sc)
687 struct xl_softc *sc;
688 {
689 struct mbuf *m;
690 struct ifnet *ifp;
691
692 ifp = &sc->arpcom.ac_if;
693
694 MGETHDR(m, M_DONTWAIT, MT_DATA);
695
696 if (m == NULL)
697 return;
698
699 bcopy(&sc->arpcom.ac_enaddr,
700 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
701 bcopy(&sc->arpcom.ac_enaddr,
702 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
703 mtod(m, struct ether_header *)->ether_type = htons(3);
704 mtod(m, unsigned char *)[14] = 0;
705 mtod(m, unsigned char *)[15] = 0;
706 mtod(m, unsigned char *)[16] = 0xE3;
707 m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
708 IF_ENQUEUE(&ifp->if_snd, m);
709 xl_start(ifp);
710
711 return;
712 }
713 #endif
714
715 /*
716 * Initiate an autonegotiation session.
717 */
718 static void xl_autoneg_xmit(sc)
719 struct xl_softc *sc;
720 {
721 u_int16_t phy_sts;
722
723 xl_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
724 DELAY(500);
725 while(xl_phy_readreg(sc, XL_PHY_GENCTL)
726 & PHY_BMCR_RESET);
727
728 phy_sts = xl_phy_readreg(sc, PHY_BMCR);
729 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
730 xl_phy_writereg(sc, PHY_BMCR, phy_sts);
731
732 return;
733 }
734
735 /*
736 * Invoke autonegotiation on a PHY. Also used with the 3Com internal
737 * autoneg logic which is mapped onto the MII.
738 */
739 static void xl_autoneg_mii(sc, flag, verbose)
740 struct xl_softc *sc;
741 int flag;
742 int verbose;
743 {
744 u_int16_t phy_sts = 0, media, advert, ability;
745 struct ifnet *ifp;
746 struct ifmedia *ifm;
747
748 ifm = &sc->ifmedia;
749 ifp = &sc->arpcom.ac_if;
750
751 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
752
753 /*
754 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
755 * bit cleared in the status register, but has the 'autoneg enabled'
756 * bit set in the control register. This is a contradiction, and
757 * I'm not sure how to handle it. If you want to force an attempt
758 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
759 * and see what happens.
760 */
761 #ifndef FORCE_AUTONEG_TFOUR
762 /*
763 * First, see if autoneg is supported. If not, there's
764 * no point in continuing.
765 */
766 phy_sts = xl_phy_readreg(sc, PHY_BMSR);
767 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
768 if (verbose)
769 printf("xl%d: autonegotiation not supported\n",
770 sc->xl_unit);
771 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
772 media = xl_phy_readreg(sc, PHY_BMCR);
773 media &= ~PHY_BMCR_SPEEDSEL;
774 media &= ~PHY_BMCR_DUPLEX;
775 xl_phy_writereg(sc, PHY_BMCR, media);
776 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
777 (CSR_READ_1(sc, XL_W3_MAC_CTRL) &
778 ~XL_MACCTRL_DUPLEX));
779 return;
780 }
781 #endif
782
783 switch (flag) {
784 case XL_FLAG_FORCEDELAY:
785 /*
786 * XXX Never use this option anywhere but in the probe
787 * routine: making the kernel stop dead in its tracks
788 * for three whole seconds after we've gone multi-user
789 * is really bad manners.
790 */
791 xl_autoneg_xmit(sc);
792 DELAY(5000000);
793 break;
794 case XL_FLAG_SCHEDDELAY:
795 /*
796 * Wait for the transmitter to go idle before starting
797 * an autoneg session, otherwise xl_start() may clobber
798 * our timeout, and we don't want to allow transmission
799 * during an autoneg session since that can screw it up.
800 */
801 if (sc->xl_cdata.xl_tx_head != NULL) {
802 sc->xl_want_auto = 1;
803 return;
804 }
805 xl_autoneg_xmit(sc);
806 ifp->if_timer = 5;
807 sc->xl_autoneg = 1;
808 sc->xl_want_auto = 0;
809 return;
810 break;
811 case XL_FLAG_DELAYTIMEO:
812 ifp->if_timer = 0;
813 sc->xl_autoneg = 0;
814 break;
815 default:
816 printf("xl%d: invalid autoneg flag: %d\n", sc->xl_unit, flag);
817 return;
818 }
819
820 if (xl_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
821 if (verbose)
822 printf("xl%d: autoneg complete, ", sc->xl_unit);
823 phy_sts = xl_phy_readreg(sc, PHY_BMSR);
824 } else {
825 if (verbose)
826 printf("xl%d: autoneg not complete, ", sc->xl_unit);
827 }
828
829 media = xl_phy_readreg(sc, PHY_BMCR);
830
831 /* Link is good. Report modes and set duplex mode. */
832 if (xl_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
833 if (verbose)
834 printf("link status good ");
835 advert = xl_phy_readreg(sc, XL_PHY_ANAR);
836 ability = xl_phy_readreg(sc, XL_PHY_LPAR);
837
838 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
839 ifm->ifm_media = IFM_ETHER|IFM_100_T4;
840 media |= PHY_BMCR_SPEEDSEL;
841 media &= ~PHY_BMCR_DUPLEX;
842 printf("(100baseT4)\n");
843 } else if (advert & PHY_ANAR_100BTXFULL &&
844 ability & PHY_ANAR_100BTXFULL) {
845 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
846 media |= PHY_BMCR_SPEEDSEL;
847 media |= PHY_BMCR_DUPLEX;
848 printf("(full-duplex, 100Mbps)\n");
849 } else if (advert & PHY_ANAR_100BTXHALF &&
850 ability & PHY_ANAR_100BTXHALF) {
851 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
852 media |= PHY_BMCR_SPEEDSEL;
853 media &= ~PHY_BMCR_DUPLEX;
854 printf("(half-duplex, 100Mbps)\n");
855 } else if (advert & PHY_ANAR_10BTFULL &&
856 ability & PHY_ANAR_10BTFULL) {
857 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
858 media &= ~PHY_BMCR_SPEEDSEL;
859 media |= PHY_BMCR_DUPLEX;
860 printf("(full-duplex, 10Mbps)\n");
861 } else if (advert & PHY_ANAR_10BTHALF &&
862 ability & PHY_ANAR_10BTHALF) {
863 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
864 media &= ~PHY_BMCR_SPEEDSEL;
865 media &= ~PHY_BMCR_DUPLEX;
866 printf("(half-duplex, 10Mbps)\n");
867 }
868
869 /* Set ASIC's duplex mode to match the PHY. */
870 XL_SEL_WIN(3);
871 if (media & PHY_BMCR_DUPLEX)
872 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
873 else
874 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
875 (CSR_READ_1(sc, XL_W3_MAC_CTRL) &
876 ~XL_MACCTRL_DUPLEX));
877 xl_phy_writereg(sc, PHY_BMCR, media);
878 } else {
879 if (verbose)
880 printf("no carrier (forcing half-duplex, 10Mbps)\n");
881 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
882 media &= ~PHY_BMCR_SPEEDSEL;
883 media &= ~PHY_BMCR_DUPLEX;
884 xl_phy_writereg(sc, PHY_BMCR, media);
885 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
886 (CSR_READ_1(sc, XL_W3_MAC_CTRL) &
887 ~XL_MACCTRL_DUPLEX));
888 }
889
890 xl_init(sc);
891
892 if (sc->xl_tx_pend) {
893 sc->xl_autoneg = 0;
894 sc->xl_tx_pend = 0;
895 xl_start(ifp);
896 }
897
898 return;
899 }
900
901 static void xl_getmode_mii(sc)
902 struct xl_softc *sc;
903 {
904 u_int16_t bmsr;
905 struct ifnet *ifp;
906
907 ifp = &sc->arpcom.ac_if;
908
909 bmsr = xl_phy_readreg(sc, PHY_BMSR);
910 if (bootverbose)
911 printf("xl%d: PHY status word: %x\n", sc->xl_unit, bmsr);
912
913 /* fallback */
914 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
915
916 if (bmsr & PHY_BMSR_10BTHALF) {
917 if (bootverbose)
918 printf("xl%d: 10Mbps half-duplex mode supported\n",
919 sc->xl_unit);
920 ifmedia_add(&sc->ifmedia,
921 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
922 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
923 }
924
925 if (bmsr & PHY_BMSR_10BTFULL) {
926 if (bootverbose)
927 printf("xl%d: 10Mbps full-duplex mode supported\n",
928 sc->xl_unit);
929 ifmedia_add(&sc->ifmedia,
930 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
931 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
932 }
933
934 if (bmsr & PHY_BMSR_100BTXHALF) {
935 if (bootverbose)
936 printf("xl%d: 100Mbps half-duplex mode supported\n",
937 sc->xl_unit);
938 ifp->if_baudrate = 100000000;
939 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
940 ifmedia_add(&sc->ifmedia,
941 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
942 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
943 }
944
945 if (bmsr & PHY_BMSR_100BTXFULL) {
946 if (bootverbose)
947 printf("xl%d: 100Mbps full-duplex mode supported\n",
948 sc->xl_unit);
949 ifp->if_baudrate = 100000000;
950 ifmedia_add(&sc->ifmedia,
951 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
952 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
953 }
954
955 /* Some also support 100BaseT4. */
956 if (bmsr & PHY_BMSR_100BT4) {
957 if (bootverbose)
958 printf("xl%d: 100baseT4 mode supported\n", sc->xl_unit);
959 ifp->if_baudrate = 100000000;
960 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
961 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
962 #ifdef FORCE_AUTONEG_TFOUR
963 if (bootverbose)
964 printf("xl%d: forcing on autoneg support for BT4\n",
965 sc->xl_unit);
966 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
967 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
968 #endif
969 }
970
971 if (bmsr & PHY_BMSR_CANAUTONEG) {
972 if (bootverbose)
973 printf("xl%d: autoneg supported\n", sc->xl_unit);
974 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
975 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
976 }
977
978 return;
979 }
980
981 /*
982 * Set speed and duplex mode.
983 */
984 static void xl_setmode_mii(sc, media)
985 struct xl_softc *sc;
986 int media;
987 {
988 u_int16_t bmcr;
989 u_int32_t icfg;
990 struct ifnet *ifp;
991
992 ifp = &sc->arpcom.ac_if;
993
994 /*
995 * If an autoneg session is in progress, stop it.
996 */
997 if (sc->xl_autoneg) {
998 printf("xl%d: canceling autoneg session\n", sc->xl_unit);
999 ifp->if_timer = sc->xl_autoneg = sc->xl_want_auto = 0;
1000 bmcr = xl_phy_readreg(sc, PHY_BMCR);
1001 bmcr &= ~PHY_BMCR_AUTONEGENBL;
1002 xl_phy_writereg(sc, PHY_BMCR, bmcr);
1003 }
1004
1005 printf("xl%d: selecting MII, ", sc->xl_unit);
1006
1007 XL_SEL_WIN(3);
1008 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
1009 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1010 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4)
1011 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
1012 if (sc->xl_media & XL_MEDIAOPT_BTX) {
1013 if (sc->xl_type == XL_TYPE_905B)
1014 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
1015 else
1016 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
1017 }
1018 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1019 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1020
1021 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1022 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1023 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1024 return;
1025 }
1026
1027 bmcr = xl_phy_readreg(sc, PHY_BMCR);
1028
1029 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
1030 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
1031
1032 if (IFM_SUBTYPE(media) == IFM_100_T4) {
1033 printf("100Mbps/T4, half-duplex\n");
1034 bmcr |= PHY_BMCR_SPEEDSEL;
1035 bmcr &= ~PHY_BMCR_DUPLEX;
1036 }
1037
1038 if (IFM_SUBTYPE(media) == IFM_100_TX) {
1039 printf("100Mbps, ");
1040 bmcr |= PHY_BMCR_SPEEDSEL;
1041 }
1042
1043 if (IFM_SUBTYPE(media) == IFM_10_T) {
1044 printf("10Mbps, ");
1045 bmcr &= ~PHY_BMCR_SPEEDSEL;
1046 }
1047
1048 if ((media & IFM_GMASK) == IFM_FDX) {
1049 printf("full duplex\n");
1050 bmcr |= PHY_BMCR_DUPLEX;
1051 XL_SEL_WIN(3);
1052 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
1053 } else {
1054 printf("half duplex\n");
1055 bmcr &= ~PHY_BMCR_DUPLEX;
1056 XL_SEL_WIN(3);
1057 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
1058 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
1059 }
1060
1061 xl_phy_writereg(sc, PHY_BMCR, bmcr);
1062
1063 return;
1064 }
1065
1066 static void xl_setmode(sc, media)
1067 struct xl_softc *sc;
1068 int media;
1069 {
1070 u_int32_t icfg;
1071 u_int16_t mediastat;
1072
1073 printf("xl%d: selecting ", sc->xl_unit);
1074
1075 XL_SEL_WIN(4);
1076 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
1077 XL_SEL_WIN(3);
1078 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
1079
1080 if (sc->xl_media & XL_MEDIAOPT_BT) {
1081 if (IFM_SUBTYPE(media) == IFM_10_T) {
1082 printf("10baseT transceiver, ");
1083 sc->xl_xcvr = XL_XCVR_10BT;
1084 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1085 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
1086 mediastat |= XL_MEDIASTAT_LINKBEAT|
1087 XL_MEDIASTAT_JABGUARD;
1088 mediastat &= ~XL_MEDIASTAT_SQEENB;
1089 }
1090 }
1091
1092 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1093 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1094 printf("100baseFX port, ");
1095 sc->xl_xcvr = XL_XCVR_100BFX;
1096 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1097 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1098 mediastat |= XL_MEDIASTAT_LINKBEAT;
1099 mediastat &= ~XL_MEDIASTAT_SQEENB;
1100 }
1101 }
1102
1103 if (sc->xl_media & XL_MEDIAOPT_AUI) {
1104 if (IFM_SUBTYPE(media) == IFM_10_5) {
1105 printf("AUI port, ");
1106 sc->xl_xcvr = XL_XCVR_AUI;
1107 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1108 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
1109 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1110 XL_MEDIASTAT_JABGUARD);
1111 mediastat |= ~XL_MEDIASTAT_SQEENB;
1112 }
1113 }
1114
1115 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1116 if (IFM_SUBTYPE(media) == IFM_10_2) {
1117 printf("BNC port, ");
1118 sc->xl_xcvr = XL_XCVR_COAX;
1119 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1120 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
1121 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1122 XL_MEDIASTAT_JABGUARD|
1123 XL_MEDIASTAT_SQEENB);
1124 }
1125 }
1126
1127 if ((media & IFM_GMASK) == IFM_FDX ||
1128 IFM_SUBTYPE(media) == IFM_100_FX) {
1129 printf("full duplex\n");
1130 XL_SEL_WIN(3);
1131 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
1132 } else {
1133 printf("half duplex\n");
1134 XL_SEL_WIN(3);
1135 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
1136 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
1137 }
1138
1139 if (IFM_SUBTYPE(media) == IFM_10_2)
1140 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
1141 else
1142 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1143 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1144 XL_SEL_WIN(4);
1145 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
1146 DELAY(800);
1147 XL_SEL_WIN(7);
1148
1149 return;
1150 }
1151
1152 static void xl_reset(sc)
1153 struct xl_softc *sc;
1154 {
1155 XL_SEL_WIN(0);
1156 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET);
1157 xl_wait(sc);
1158
1159 /* Wait a little while for the chip to get its brains in order. */
1160 DELAY(1000);
1161 return;
1162 }
1163
1164 /*
1165 * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
1166 * IDs against our list and return a device name if we find a match.
1167 */
1168 static char *
1169 xl_probe(config_id, device_id)
1170 pcici_t config_id;
1171 pcidi_t device_id;
1172 {
1173 struct xl_type *t;
1174
1175 t = xl_devs;
1176
1177 while(t->xl_name != NULL) {
1178 if ((device_id & 0xFFFF) == t->xl_vid &&
1179 ((device_id >> 16) & 0xFFFF) == t->xl_did) {
1180 return(t->xl_name);
1181 }
1182 t++;
1183 }
1184
1185 return(NULL);
1186 }
1187
1188 /*
1189 * This routine is a kludge to work around possible hardware faults
1190 * or manufacturing defects that can cause the media options register
1191 * (or reset options register, as it's called for the first generation
1192 * 3cx90x adapters) to return an incorrect result. I have encountered
1193 * one Dell Latitude laptop docking station with an integrated 3c905-TX
1194 * which doesn't have any of the 'mediaopt' bits set. This screws up
1195 * the attach routine pretty badly because it doesn't know what media
1196 * to look for. If we find ourselves in this predicament, this routine
1197 * will try to guess the media options values and warn the user of a
1198 * possible manufacturing defect with his adapter/system/whatever.
1199 */
1200 static void xl_mediacheck(sc)
1201 struct xl_softc *sc;
1202 {
1203 u_int16_t devid;
1204
1205 /*
1206 * If some of the media options bits are set, assume they are
1207 * correct. If not, try to figure it out down below.
1208 * XXX I should check for 10baseFL, but I don't have an adapter
1209 * to test with.
1210 */
1211 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
1212 /*
1213 * Check the XCVR value. If it's not in the normal range
1214 * of values, we need to fake it up here.
1215 */
1216 if (sc->xl_xcvr <= XL_XCVR_AUTO)
1217 return;
1218 else {
1219 printf("xl%d: bogus xcvr value "
1220 "in EEPROM (%x)\n", sc->xl_unit, sc->xl_xcvr);
1221 printf("xl%d: choosing new default based "
1222 "on card type\n", sc->xl_unit);
1223 }
1224 } else {
1225 printf("xl%d: WARNING: no media options bits set in "
1226 "the media options register!!\n", sc->xl_unit);
1227 printf("xl%d: this could be a manufacturing defect in "
1228 "your adapter or system\n", sc->xl_unit);
1229 printf("xl%d: attempting to guess media type; you "
1230 "should probably consult your vendor\n", sc->xl_unit);
1231 }
1232
1233
1234 /*
1235 * Read the device ID from the EEPROM.
1236 * This is what's loaded into the PCI device ID register, so it has
1237 * to be correct otherwise we wouldn't have gotten this far.
1238 */
1239 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
1240
1241 switch(devid) {
1242 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TP */
1243 case TC_DEVICEID_CYCLONE_10BT: /* 3c900B-TP */
1244 sc->xl_media = XL_MEDIAOPT_BT;
1245 sc->xl_xcvr = XL_XCVR_10BT;
1246 printf("xl%d: guessing 10BaseT transceiver\n", sc->xl_unit);
1247 break;
1248 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */
1249 case TC_DEVICEID_CYCLONE_10BT_COMBO: /* 3c900B-COMBO */
1250 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1251 sc->xl_xcvr = XL_XCVR_10BT;
1252 printf("xl%d: guessing COMBO (AUI/BNC/TP)\n", sc->xl_unit);
1253 break;
1254 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
1255 sc->xl_media = XL_MEDIAOPT_MII;
1256 sc->xl_xcvr = XL_XCVR_MII;
1257 printf("xl%d: guessing MII\n", sc->xl_unit);
1258 break;
1259 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */
1260 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */
1261 sc->xl_media = XL_MEDIAOPT_BT4;
1262 sc->xl_xcvr = XL_XCVR_MII;
1263 printf("xl%d: guessing 100BaseT4/MII\n", sc->xl_unit);
1264 break;
1265 case TC_DEVICEID_CYCLONE_10_100BT: /* 3c905B-TX */
1266 case TC_DEVICEID_CYCLONE_10_100BT_SERV: /* 3c980-TX */
1267 sc->xl_media = XL_MEDIAOPT_BTX;
1268 sc->xl_xcvr = XL_XCVR_AUTO;
1269 printf("xl%d: guessing 10/100 internal\n", sc->xl_unit);
1270 break;
1271 default:
1272 printf("xl%d: unknown device ID: %x -- "
1273 "defaulting to 10baseT\n", sc->xl_unit, devid);
1274 sc->xl_media = XL_MEDIAOPT_BT;
1275 break;
1276 }
1277
1278 return;
1279 }
1280
1281 /*
1282 * Attach the interface. Allocate softc structures, do ifmedia
1283 * setup and ethernet/BPF attach.
1284 */
1285 static void
1286 xl_attach(config_id, unit)
1287 pcici_t config_id;
1288 int unit;
1289 {
1290 int s, i;
1291 #ifndef XL_USEIOSPACE
1292 vm_offset_t pbase, vbase;
1293 #endif
1294 u_char eaddr[ETHER_ADDR_LEN];
1295 u_int32_t command;
1296 struct xl_softc *sc;
1297 struct ifnet *ifp;
1298 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1299 unsigned int round;
1300 caddr_t roundptr;
1301 struct xl_type *p;
1302 u_int16_t phy_vid, phy_did, phy_sts;
1303
1304 s = splimp();
1305
1306 sc = malloc(sizeof(struct xl_softc), M_DEVBUF, M_NOWAIT);
1307 if (sc == NULL) {
1308 printf("xl%d: no memory for softc struct!\n", unit);
1309 return;
1310 }
1311 bzero(sc, sizeof(struct xl_softc));
1312
1313 /*
1314 * If this is a 3c905B, we have to check one extra thing.
1315 * The 905B supports power management and may be placed in
1316 * a low-power mode (D3 mode), typically by certain operating
1317 * systems which shall not be named. The PCI BIOS is supposed
1318 * to reset the NIC and bring it out of low-power mode, but
1319 * some do not. Consequently, we have to see if this chip
1320 * supports power management, and if so, make sure it's not
1321 * in low-power mode. If power management is available, the
1322 * capid byte will be 0x01.
1323 *
1324 * I _think_ that what actually happens is that the chip
1325 * loses its PCI configuration during the transition from
1326 * D3 back to D0; this means that it should be possible for
1327 * us to save the PCI iobase, membase and IRQ, put the chip
1328 * back in the D0 state, then restore the PCI config ourselves.
1329 */
1330
1331 command = pci_conf_read(config_id, XL_PCI_CAPID) & 0x000000FF;
1332 if (command == 0x01) {
1333
1334 command = pci_conf_read(config_id, XL_PCI_PWRMGMTCTRL);
1335 if (command & XL_PSTATE_MASK) {
1336 u_int32_t iobase, membase, irq;
1337
1338 /* Save important PCI config data. */
1339 iobase = pci_conf_read(config_id, XL_PCI_LOIO);
1340 membase = pci_conf_read(config_id, XL_PCI_LOMEM);
1341 irq = pci_conf_read(config_id, XL_PCI_INTLINE);
1342
1343 /* Reset the power state. */
1344 printf("xl%d: chip is in D%d power mode "
1345 "-- setting to D0\n", unit, command & XL_PSTATE_MASK);
1346 command &= 0xFFFFFFFC;
1347 pci_conf_write(config_id, XL_PCI_PWRMGMTCTRL, command);
1348
1349 /* Restore PCI config data. */
1350 pci_conf_write(config_id, XL_PCI_LOIO, iobase);
1351 pci_conf_write(config_id, XL_PCI_LOMEM, membase);
1352 pci_conf_write(config_id, XL_PCI_INTLINE, irq);
1353 }
1354 }
1355
1356 /*
1357 * Map control/status registers.
1358 */
1359 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1360 command |= (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE
1361 |PCI_COMMAND_MASTER_ENABLE);
1362 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command);
1363 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1364
1365 #ifdef XL_USEIOSPACE
1366 if (!(command & PCI_COMMAND_IO_ENABLE)) {
1367 printf("xl%d: failed to enable I/O ports!\n", unit);
1368 free(sc, M_DEVBUF);
1369 goto fail;
1370 }
1371
1372 sc->iobase = pci_conf_read(config_id, XL_PCI_LOIO) & 0xFFFFFFE0;
1373 #else
1374 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
1375 printf("xl%d: failed to enable memory mapping!\n", unit);
1376 goto fail;
1377 }
1378
1379 if (!pci_map_mem(config_id, XL_PCI_LOMEM, &vbase, &pbase)) {
1380 printf ("xl%d: couldn't map memory\n", unit);
1381 goto fail;
1382 }
1383 sc->csr = (volatile caddr_t)vbase;
1384 #endif
1385
1386 /* Allocate interrupt */
1387 if (!pci_map_int(config_id, xl_intr, sc, &net_imask)) {
1388 printf("xl%d: couldn't map interrupt\n", unit);
1389 goto fail;
1390 }
1391
1392 /* Reset the adapter. */
1393 xl_reset(sc);
1394
1395 /*
1396 * Get station address from the EEPROM.
1397 */
1398 if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
1399 printf("xl%d: failed to read station address\n", sc->xl_unit);
1400 free(sc, M_DEVBUF);
1401 goto fail;
1402 }
1403
1404 /*
1405 * A 3Com chip was detected. Inform the world.
1406 */
1407 printf("xl%d: Ethernet address: %6D\n", unit, eaddr, ":");
1408
1409 sc->xl_unit = unit;
1410 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1411
1412 sc->xl_ldata_ptr = malloc(sizeof(struct xl_list_data) + 8,
1413 M_DEVBUF, M_NOWAIT);
1414 if (sc->xl_ldata_ptr == NULL) {
1415 free(sc, M_DEVBUF);
1416 printf("xl%d: no memory for list buffers!\n", unit);
1417 return;
1418 }
1419
1420 sc->xl_ldata = (struct xl_list_data *)sc->xl_ldata_ptr;
1421 round = (unsigned int)sc->xl_ldata_ptr & 0xF;
1422 roundptr = sc->xl_ldata_ptr;
1423 for (i = 0; i < 8; i++) {
1424 if (round % 8) {
1425 round++;
1426 roundptr++;
1427 } else
1428 break;
1429 }
1430 sc->xl_ldata = (struct xl_list_data *)roundptr;
1431 bzero(sc->xl_ldata, sizeof(struct xl_list_data));
1432
1433 ifp = &sc->arpcom.ac_if;
1434 ifp->if_softc = sc;
1435 ifp->if_unit = unit;
1436 ifp->if_name = "xl";
1437 ifp->if_mtu = ETHERMTU;
1438 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1439 ifp->if_ioctl = xl_ioctl;
1440 ifp->if_output = ether_output;
1441 ifp->if_start = xl_start;
1442 ifp->if_watchdog = xl_watchdog;
1443 ifp->if_init = xl_init;
1444 ifp->if_baudrate = 10000000;
1445
1446 /*
1447 * Figure out the card type. 3c905B adapters have the
1448 * 'supportsNoTxLength' bit set in the capabilities
1449 * word in the EEPROM.
1450 */
1451 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
1452 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH)
1453 sc->xl_type = XL_TYPE_905B;
1454 else
1455 sc->xl_type = XL_TYPE_90X;
1456
1457 /*
1458 * Now we have to see what sort of media we have.
1459 * This includes probing for an MII interace and a
1460 * possible PHY.
1461 */
1462 XL_SEL_WIN(3);
1463 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
1464 if (bootverbose)
1465 printf("xl%d: media options word: %x\n", sc->xl_unit,
1466 sc->xl_media);
1467
1468 xl_read_eeprom(sc, (char *)&sc->xl_xcvr, XL_EE_ICFG_0, 2, 0);
1469 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
1470 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
1471
1472 xl_mediacheck(sc);
1473
1474 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
1475 || sc->xl_media & XL_MEDIAOPT_BT4) {
1476 /*
1477 * In theory I shouldn't need this, but... if this
1478 * card supports an MII, either an external one or
1479 * an internal fake one, select it in the internal
1480 * config register before trying to probe it.
1481 */
1482 u_int32_t icfg;
1483
1484 XL_SEL_WIN(3);
1485 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
1486 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1487 if (sc->xl_media & XL_MEDIAOPT_MII ||
1488 sc->xl_media & XL_MEDIAOPT_BT4)
1489 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
1490 if (sc->xl_media & XL_MEDIAOPT_BTX)
1491 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
1492 if (sc->xl_media & XL_MEDIAOPT_BFX)
1493 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1494 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1495
1496 if (bootverbose)
1497 printf("xl%d: probing for a PHY\n", sc->xl_unit);
1498 for (i = XL_PHYADDR_MIN; i < XL_PHYADDR_MAX + 1; i++) {
1499 if (bootverbose)
1500 printf("xl%d: checking address: %d\n",
1501 sc->xl_unit, i);
1502 sc->xl_phy_addr = i;
1503 xl_phy_writereg(sc, XL_PHY_GENCTL, PHY_BMCR_RESET);
1504 DELAY(500);
1505 while(xl_phy_readreg(sc, XL_PHY_GENCTL)
1506 & PHY_BMCR_RESET);
1507 if ((phy_sts = xl_phy_readreg(sc, XL_PHY_GENSTS)))
1508 break;
1509 }
1510 if (phy_sts) {
1511 phy_vid = xl_phy_readreg(sc, XL_PHY_VENID);
1512 phy_did = xl_phy_readreg(sc, XL_PHY_DEVID);
1513 if (bootverbose)
1514 printf("xl%d: found PHY at address %d, ",
1515 sc->xl_unit, sc->xl_phy_addr);
1516 if (bootverbose)
1517 printf("vendor id: %x device id: %x\n",
1518 phy_vid, phy_did);
1519 p = xl_phys;
1520 while(p->xl_vid) {
1521 if (phy_vid == p->xl_vid &&
1522 (phy_did | 0x000F) == p->xl_did) {
1523 sc->xl_pinfo = p;
1524 break;
1525 }
1526 p++;
1527 }
1528 if (sc->xl_pinfo == NULL)
1529 sc->xl_pinfo = &xl_phys[PHY_UNKNOWN];
1530 if (bootverbose)
1531 printf("xl%d: PHY type: %s\n",
1532 sc->xl_unit, sc->xl_pinfo->xl_name);
1533 } else {
1534 printf("xl%d: MII without any phy!\n", sc->xl_unit);
1535 }
1536 }
1537
1538 /*
1539 * Do ifmedia setup.
1540 */
1541 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
1542
1543 if (sc->xl_media & XL_MEDIAOPT_BT) {
1544 if (bootverbose)
1545 printf("xl%d: found 10baseT\n", sc->xl_unit);
1546 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1547 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1548 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1549 ifmedia_add(&sc->ifmedia,
1550 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1551 }
1552
1553 if (sc->xl_media & XL_MEDIAOPT_AUI) {
1554 if (bootverbose)
1555 printf("xl%d: found AUI\n", sc->xl_unit);
1556 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1557 }
1558
1559 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1560 if (bootverbose)
1561 printf("xl%d: found BNC\n", sc->xl_unit);
1562 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
1563 }
1564
1565 /*
1566 * Technically we could use xl_getmode_mii() to scan the
1567 * modes, but the built-in BTX mode on the 3c905B implies
1568 * 10/100 full/half duplex support anyway, so why not just
1569 * do it and get it over with.
1570 */
1571 if (sc->xl_media & XL_MEDIAOPT_BTX) {
1572 if (bootverbose)
1573 printf("xl%d: found 100baseTX\n", sc->xl_unit);
1574 ifp->if_baudrate = 100000000;
1575 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
1576 ifmedia_add(&sc->ifmedia,
1577 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
1578 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1579 ifmedia_add(&sc->ifmedia,
1580 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
1581 if (sc->xl_pinfo != NULL)
1582 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1583 }
1584
1585 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1586 if (bootverbose)
1587 printf("xl%d: found 100baseFX\n", sc->xl_unit);
1588 ifp->if_baudrate = 100000000;
1589 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
1590 }
1591
1592 /*
1593 * If there's an MII, we have to probe its modes
1594 * separately.
1595 */
1596 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4) {
1597 if (bootverbose)
1598 printf("xl%d: found MII\n", sc->xl_unit);
1599 xl_getmode_mii(sc);
1600 }
1601
1602 /* Choose a default media. */
1603 switch(sc->xl_xcvr) {
1604 case XL_XCVR_10BT:
1605 media = IFM_ETHER|IFM_10_T;
1606 xl_setmode(sc, media);
1607 break;
1608 case XL_XCVR_AUI:
1609 media = IFM_ETHER|IFM_10_5;
1610 xl_setmode(sc, media);
1611 break;
1612 case XL_XCVR_COAX:
1613 media = IFM_ETHER|IFM_10_2;
1614 xl_setmode(sc, media);
1615 break;
1616 case XL_XCVR_AUTO:
1617 #ifdef XL_BACKGROUND_AUTONEG
1618 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
1619 #else
1620 xl_autoneg_mii(sc, XL_FLAG_FORCEDELAY, 1);
1621 #endif
1622 media = sc->ifmedia.ifm_media;
1623 break;
1624 case XL_XCVR_100BTX:
1625 case XL_XCVR_MII:
1626 #ifdef XL_BACKGROUND_AUTONEG
1627 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
1628 #else
1629 xl_autoneg_mii(sc, XL_FLAG_FORCEDELAY, 1);
1630 #endif
1631 media = sc->ifmedia.ifm_media;
1632 break;
1633 case XL_XCVR_100BFX:
1634 media = IFM_ETHER|IFM_100_FX;
1635 break;
1636 default:
1637 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit,
1638 sc->xl_xcvr);
1639 /*
1640 * This will probably be wrong, but it prevents
1641 * the ifmedia code from panicking.
1642 */
1643 media = IFM_ETHER|IFM_10_T;
1644 break;
1645 }
1646
1647 ifmedia_set(&sc->ifmedia, media);
1648
1649 /*
1650 * Call MI attach routines.
1651 */
1652 if_attach(ifp);
1653 ether_ifattach(ifp);
1654
1655 #if NBPFILTER > 0
1656 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1657 #endif
1658 at_shutdown(xl_shutdown, sc, SHUTDOWN_POST_SYNC);
1659
1660 fail:
1661 splx(s);
1662 return;
1663 }
1664
1665 /*
1666 * Initialize the transmit descriptors.
1667 */
1668 static int xl_list_tx_init(sc)
1669 struct xl_softc *sc;
1670 {
1671 struct xl_chain_data *cd;
1672 struct xl_list_data *ld;
1673 int i;
1674
1675 cd = &sc->xl_cdata;
1676 ld = sc->xl_ldata;
1677 for (i = 0; i < XL_TX_LIST_CNT; i++) {
1678 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1679 if (i == (XL_TX_LIST_CNT - 1))
1680 cd->xl_tx_chain[i].xl_next = NULL;
1681 else
1682 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1683 }
1684
1685 cd->xl_tx_free = &cd->xl_tx_chain[0];
1686 cd->xl_tx_tail = cd->xl_tx_head = NULL;
1687
1688 return(0);
1689 }
1690
1691 /*
1692 * Initialize the RX descriptors and allocate mbufs for them. Note that
1693 * we arrange the descriptors in a closed ring, so that the last descriptor
1694 * points back to the first.
1695 */
1696 static int xl_list_rx_init(sc)
1697 struct xl_softc *sc;
1698 {
1699 struct xl_chain_data *cd;
1700 struct xl_list_data *ld;
1701 int i;
1702
1703 cd = &sc->xl_cdata;
1704 ld = sc->xl_ldata;
1705
1706 for (i = 0; i < XL_RX_LIST_CNT; i++) {
1707 cd->xl_rx_chain[i].xl_ptr =
1708 (struct xl_list_onefrag *)&ld->xl_rx_list[i];
1709 if (xl_newbuf(sc, &cd->xl_rx_chain[i]) == ENOBUFS)
1710 return(ENOBUFS);
1711 if (i == (XL_RX_LIST_CNT - 1)) {
1712 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[0];
1713 ld->xl_rx_list[i].xl_next =
1714 vtophys(&ld->xl_rx_list[0]);
1715 } else {
1716 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[i + 1];
1717 ld->xl_rx_list[i].xl_next =
1718 vtophys(&ld->xl_rx_list[i + 1]);
1719 }
1720 }
1721
1722 cd->xl_rx_head = &cd->xl_rx_chain[0];
1723
1724 return(0);
1725 }
1726
1727 /*
1728 * Initialize an RX descriptor and attach an MBUF cluster.
1729 */
1730 static int xl_newbuf(sc, c)
1731 struct xl_softc *sc;
1732 struct xl_chain_onefrag *c;
1733 {
1734 struct mbuf *m_new = NULL;
1735
1736 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1737 if (m_new == NULL) {
1738 printf("xl%d: no memory for rx list -- packet dropped!\n",
1739 sc->xl_unit);
1740 return(ENOBUFS);
1741 }
1742
1743 MCLGET(m_new, M_DONTWAIT);
1744 if (!(m_new->m_flags & M_EXT)) {
1745 printf("xl%d: no memory for rx list -- packet dropped!\n",
1746 sc->xl_unit);
1747 m_freem(m_new);
1748 return(ENOBUFS);
1749 }
1750
1751 c->xl_mbuf = m_new;
1752 c->xl_ptr->xl_status = 0;
1753 c->xl_ptr->xl_frag.xl_addr = vtophys(mtod(m_new, caddr_t));
1754 c->xl_ptr->xl_frag.xl_len = MCLBYTES | XL_LAST_FRAG;
1755
1756 return(0);
1757 }
1758
1759 /*
1760 * A frame has been uploaded: pass the resulting mbuf chain up to
1761 * the higher level protocols.
1762 */
1763 static void xl_rxeof(sc)
1764 struct xl_softc *sc;
1765 {
1766 struct ether_header *eh;
1767 struct mbuf *m;
1768 struct ifnet *ifp;
1769 struct xl_chain_onefrag *cur_rx;
1770 int total_len = 0;
1771 u_int16_t rxstat;
1772
1773 ifp = &sc->arpcom.ac_if;
1774
1775 again:
1776
1777 while((rxstat = sc->xl_cdata.xl_rx_head->xl_ptr->xl_status)) {
1778 cur_rx = sc->xl_cdata.xl_rx_head;
1779 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
1780
1781 /*
1782 * If an error occurs, update stats, clear the
1783 * status word and leave the mbuf cluster in place:
1784 * it should simply get re-used next time this descriptor
1785 * comes up in the ring.
1786 */
1787 if (rxstat & XL_RXSTAT_UP_ERROR) {
1788 ifp->if_ierrors++;
1789 cur_rx->xl_ptr->xl_status = 0;
1790 continue;
1791 }
1792
1793 /*
1794 * If there error bit was not set, the upload complete
1795 * bit should be set which means we have a valid packet.
1796 * If not, something truly strange has happened.
1797 */
1798 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1799 printf("xl%d: bad receive status -- packet dropped",
1800 sc->xl_unit);
1801 ifp->if_ierrors++;
1802 cur_rx->xl_ptr->xl_status = 0;
1803 continue;
1804 }
1805
1806 /* No errors; receive the packet. */
1807 m = cur_rx->xl_mbuf;
1808 total_len = cur_rx->xl_ptr->xl_status & XL_RXSTAT_LENMASK;
1809
1810 /*
1811 * Try to conjure up a new mbuf cluster. If that
1812 * fails, it means we have an out of memory condition and
1813 * should leave the buffer in place and continue. This will
1814 * result in a lost packet, but there's little else we
1815 * can do in this situation.
1816 */
1817 if (xl_newbuf(sc, cur_rx) == ENOBUFS) {
1818 ifp->if_ierrors++;
1819 cur_rx->xl_ptr->xl_status = 0;
1820 continue;
1821 }
1822
1823 eh = mtod(m, struct ether_header *);
1824 m->m_pkthdr.rcvif = ifp;
1825 #if NBPFILTER > 0
1826 /*
1827 * Handle BPF listeners. Let the BPF user see the packet, but
1828 * don't pass it up to the ether_input() layer unless it's
1829 * a broadcast packet, multicast packet, matches our ethernet
1830 * address or the interface is in promiscuous mode.
1831 */
1832 if (ifp->if_bpf) {
1833 m->m_pkthdr.len = m->m_len = total_len;
1834 bpf_mtap(ifp, m);
1835 if (ifp->if_flags & IFF_PROMISC &&
1836 (bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1837 ETHER_ADDR_LEN) &&
1838 (eh->ether_dhost[0] & 1) == 0)) {
1839 m_freem(m);
1840 continue;
1841 }
1842 }
1843 #endif
1844 /* Remove header from mbuf and pass it on. */
1845 m->m_pkthdr.len = m->m_len =
1846 total_len - sizeof(struct ether_header);
1847 m->m_data += sizeof(struct ether_header);
1848 ether_input(ifp, eh, m);
1849 }
1850
1851 /*
1852 * Handle the 'end of channel' condition. When the upload
1853 * engine hits the end of the RX ring, it will stall. This
1854 * is our cue to flush the RX ring, reload the uplist pointer
1855 * register and unstall the engine.
1856 * XXX This is actually a little goofy. With the ThunderLAN
1857 * chip, you get an interrupt when the receiver hits the end
1858 * of the receive ring, which tells you exactly when you
1859 * you need to reload the ring pointer. Here we have to
1860 * fake it. I'm mad at myself for not being clever enough
1861 * to avoid the use of a goto here.
1862 */
1863 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1864 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1865 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1866 xl_wait(sc);
1867 CSR_WRITE_4(sc, XL_UPLIST_PTR,
1868 vtophys(&sc->xl_ldata->xl_rx_list[0]));
1869 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
1870 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1871 goto again;
1872 }
1873
1874 return;
1875 }
1876
1877 /*
1878 * A frame was downloaded to the chip. It's safe for us to clean up
1879 * the list buffers.
1880 */
1881 static void xl_txeof(sc)
1882 struct xl_softc *sc;
1883 {
1884 struct xl_chain *cur_tx;
1885 struct ifnet *ifp;
1886
1887 ifp = &sc->arpcom.ac_if;
1888
1889 /* Clear the timeout timer. */
1890 ifp->if_timer = 0;
1891
1892 /*
1893 * Go through our tx list and free mbufs for those
1894 * frames that have been uploaded. Note: the 3c905B
1895 * sets a special bit in the status word to let us
1896 * know that a frame has been downloaded, but the
1897 * original 3c900/3c905 adapters don't do that.
1898 * Consequently, we have to use a different test if
1899 * xl_type != XL_TYPE_905B.
1900 */
1901 while(sc->xl_cdata.xl_tx_head != NULL) {
1902 cur_tx = sc->xl_cdata.xl_tx_head;
1903 if ((sc->xl_type == XL_TYPE_905B &&
1904 !(cur_tx->xl_ptr->xl_status & XL_TXSTAT_DL_COMPLETE)) ||
1905 CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
1906 break;
1907 }
1908 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
1909
1910 m_freem(cur_tx->xl_mbuf);
1911 cur_tx->xl_mbuf = NULL;
1912
1913 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
1914 sc->xl_cdata.xl_tx_free = cur_tx;
1915 }
1916
1917 if (sc->xl_cdata.xl_tx_head == NULL) {
1918 ifp->if_flags &= ~IFF_OACTIVE;
1919 sc->xl_cdata.xl_tx_tail = NULL;
1920 if (sc->xl_want_auto)
1921 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
1922 } else {
1923 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
1924 !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
1925 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1926 vtophys(sc->xl_cdata.xl_tx_head->xl_ptr));
1927 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1928 }
1929 }
1930
1931 return;
1932 }
1933
1934 /*
1935 * TX 'end of channel' interrupt handler. Actually, we should
1936 * only get a 'TX complete' interrupt if there's a transmit error,
1937 * so this is really TX error handler.
1938 */
1939 static void xl_txeoc(sc)
1940 struct xl_softc *sc;
1941 {
1942 u_int8_t txstat;
1943
1944 while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
1945 if (txstat & XL_TXSTATUS_UNDERRUN ||
1946 txstat & XL_TXSTATUS_JABBER ||
1947 txstat & XL_TXSTATUS_RECLAIM) {
1948 printf("xl%d: transmission error: %x\n",
1949 sc->xl_unit, txstat);
1950 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1951 xl_wait(sc);
1952 if (sc->xl_cdata.xl_tx_head != NULL)
1953 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1954 vtophys(sc->xl_cdata.xl_tx_head->xl_ptr));
1955 /*
1956 * Remember to set this for the
1957 * first generation 3c90X chips.
1958 */
1959 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1960 if (sc->xl_type == XL_TYPE_905B) {
1961 CSR_WRITE_2(sc, XL_COMMAND,
1962 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1963 }
1964 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1965 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1966 } else {
1967 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1968 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1969 }
1970 /*
1971 * Write an arbitrary byte to the TX_STATUS register
1972 * to clear this interrupt/error and advance to the next.
1973 */
1974 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
1975 }
1976
1977 return;
1978 }
1979
1980 static void xl_intr(arg)
1981 void *arg;
1982 {
1983 struct xl_softc *sc;
1984 struct ifnet *ifp;
1985 u_int16_t status;
1986
1987 sc = arg;
1988 ifp = &sc->arpcom.ac_if;
1989
1990 /* Disable interrupts. */
1991 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
1992
1993 for (;;) {
1994
1995 status = CSR_READ_2(sc, XL_STATUS);
1996
1997 if ((status & XL_INTRS) == 0)
1998 break;
1999
2000 if (status & XL_STAT_UP_COMPLETE) {
2001 xl_rxeof(sc);
2002 CSR_WRITE_2(sc, XL_COMMAND,
2003 XL_CMD_INTR_ACK|XL_STAT_UP_COMPLETE);
2004 }
2005
2006 if (status & XL_STAT_DOWN_COMPLETE) {
2007 xl_txeof(sc);
2008 CSR_WRITE_2(sc, XL_COMMAND,
2009 XL_CMD_INTR_ACK|XL_STAT_DOWN_COMPLETE);
2010 }
2011
2012 if (status & XL_STAT_TX_COMPLETE) {
2013 ifp->if_oerrors++;
2014 xl_txeoc(sc);
2015 CSR_WRITE_2(sc, XL_COMMAND,
2016 XL_CMD_INTR_ACK|XL_STAT_TX_COMPLETE);
2017 }
2018
2019 if (status & XL_STAT_ADFAIL) {
2020 xl_reset(sc);
2021 xl_init(sc);
2022 CSR_WRITE_2(sc, XL_COMMAND,
2023 XL_CMD_INTR_ACK|XL_STAT_ADFAIL);
2024 }
2025
2026 if (status & XL_STAT_STATSOFLOW) {
2027 sc->xl_stats_no_timeout = 1;
2028 xl_stats_update(sc);
2029 sc->xl_stats_no_timeout = 0;
2030 }
2031
2032 CSR_WRITE_2(sc, XL_STATUS, XL_CMD_INTR_ACK|XL_STAT_INTREQ|
2033 XL_STAT_INTLATCH);
2034 }
2035
2036 /* Re-enable interrupts. */
2037 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2038
2039 XL_SEL_WIN(7);
2040
2041 if (ifp->if_snd.ifq_head != NULL) {
2042 xl_start(ifp);
2043 }
2044
2045 return;
2046 }
2047
2048 static void xl_stats_update(xsc)
2049 void *xsc;
2050 {
2051 struct xl_softc *sc;
2052 struct ifnet *ifp;
2053 struct xl_stats xl_stats;
2054 u_int8_t *p;
2055 int i;
2056
2057 bzero((char *)&xl_stats, sizeof(struct xl_stats));
2058
2059 sc = xsc;
2060 ifp = &sc->arpcom.ac_if;
2061
2062 p = (u_int8_t *)&xl_stats;
2063
2064 /* Read all the stats registers. */
2065 XL_SEL_WIN(6);
2066
2067 for (i = 0; i < 16; i++)
2068 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
2069
2070 ifp->if_ipackets += xl_rx_goodframes(xl_stats);
2071 ifp->if_opackets += xl_tx_goodframes(xl_stats);
2072
2073 ifp->if_ierrors += xl_stats.xl_rx_overrun;
2074
2075 ifp->if_collisions += xl_stats.xl_tx_multi_collision +
2076 xl_stats.xl_tx_single_collision +
2077 xl_stats.xl_tx_late_collision;
2078
2079 /*
2080 * Boomerang and cyclone chips have an extra stats counter
2081 * in window 4 (BadSSD). We have to read this too in order
2082 * to clear out all the stats registers and avoid a statsoflow
2083 * interrupt.
2084 */
2085 XL_SEL_WIN(4);
2086 CSR_READ_1(sc, XL_W4_BADSSD);
2087
2088 XL_SEL_WIN(7);
2089
2090 if (!sc->xl_stats_no_timeout)
2091 timeout(xl_stats_update, sc, hz);
2092
2093 return;
2094 }
2095
2096 /*
2097 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2098 * pointers to the fragment pointers.
2099 */
2100 static int xl_encap(sc, c, m_head)
2101 struct xl_softc *sc;
2102 struct xl_chain *c;
2103 struct mbuf *m_head;
2104 {
2105 int frag = 0;
2106 struct xl_frag *f = NULL;
2107 int total_len;
2108 struct mbuf *m;
2109
2110 /*
2111 * Start packing the mbufs in this chain into
2112 * the fragment pointers. Stop when we run out
2113 * of fragments or hit the end of the mbuf chain.
2114 */
2115 m = m_head;
2116 total_len = 0;
2117
2118 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
2119 if (m->m_len != 0) {
2120 if (frag == XL_MAXFRAGS)
2121 break;
2122 total_len+= m->m_len;
2123 c->xl_ptr->xl_frag[frag].xl_addr =
2124 vtophys(mtod(m, vm_offset_t));
2125 c->xl_ptr->xl_frag[frag].xl_len = m->m_len;
2126 frag++;
2127 }
2128 }
2129
2130 /*
2131 * Handle special case: we used up all 63 fragments,
2132 * but we have more mbufs left in the chain. Copy the
2133 * data into an mbuf cluster. Note that we don't
2134 * bother clearing the values in the other fragment
2135 * pointers/counters; it wouldn't gain us anything,
2136 * and would waste cycles.
2137 */
2138 if (m != NULL) {
2139 struct mbuf *m_new = NULL;
2140
2141 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
2142 if (m_new == NULL) {
2143 printf("xl%d: no memory for tx list", sc->xl_unit);
2144 return(1);
2145 }
2146 if (m_head->m_pkthdr.len > MHLEN) {
2147 MCLGET(m_new, M_DONTWAIT);
2148 if (!(m_new->m_flags & M_EXT)) {
2149 m_freem(m_new);
2150 printf("xl%d: no memory for tx list",
2151 sc->xl_unit);
2152 return(1);
2153 }
2154 }
2155 m_copydata(m_head, 0, m_head->m_pkthdr.len,
2156 mtod(m_new, caddr_t));
2157 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
2158 m_freem(m_head);
2159 m_head = m_new;
2160 f = &c->xl_ptr->xl_frag[0];
2161 f->xl_addr = vtophys(mtod(m_new, caddr_t));
2162 f->xl_len = total_len = m_new->m_len;
2163 frag = 1;
2164 }
2165
2166 c->xl_mbuf = m_head;
2167 c->xl_ptr->xl_frag[frag - 1].xl_len |= XL_LAST_FRAG;
2168 c->xl_ptr->xl_status = total_len;
2169 c->xl_ptr->xl_next = 0;
2170
2171 return(0);
2172 }
2173
2174 /*
2175 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2176 * to the mbuf data regions directly in the transmit lists. We also save a
2177 * copy of the pointers since the transmit list fragment pointers are
2178 * physical addresses.
2179 */
2180
2181 static void xl_start(ifp)
2182 struct ifnet *ifp;
2183 {
2184 struct xl_softc *sc;
2185 struct mbuf *m_head = NULL;
2186 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
2187
2188 sc = ifp->if_softc;
2189
2190 if (sc->xl_autoneg) {
2191 sc->xl_tx_pend = 1;
2192 return;
2193 }
2194
2195 /*
2196 * Check for an available queue slot. If there are none,
2197 * punt.
2198 */
2199 if (sc->xl_cdata.xl_tx_free == NULL) {
2200 xl_txeoc(sc);
2201 xl_txeof(sc);
2202 if (sc->xl_cdata.xl_tx_free == NULL) {
2203 ifp->if_flags |= IFF_OACTIVE;
2204 return;
2205 }
2206 }
2207
2208 start_tx = sc->xl_cdata.xl_tx_free;
2209
2210 while(sc->xl_cdata.xl_tx_free != NULL) {
2211 IF_DEQUEUE(&ifp->if_snd, m_head);
2212 if (m_head == NULL)
2213 break;
2214
2215 /* Pick a descriptor off the free list. */
2216 cur_tx = sc->xl_cdata.xl_tx_free;
2217 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
2218
2219 cur_tx->xl_next = NULL;
2220
2221 /* Pack the data into the descriptor. */
2222 xl_encap(sc, cur_tx, m_head);
2223
2224 /* Chain it together. */
2225 if (prev != NULL) {
2226 prev->xl_next = cur_tx;
2227 prev->xl_ptr->xl_next = vtophys(cur_tx->xl_ptr);
2228 }
2229 prev = cur_tx;
2230
2231 #if NBPFILTER > 0
2232 /*
2233 * If there's a BPF listener, bounce a copy of this frame
2234 * to him.
2235 */
2236 if (ifp->if_bpf)
2237 bpf_mtap(ifp, cur_tx->xl_mbuf);
2238 #endif
2239 }
2240
2241 /*
2242 * If there are no packets queued, bail.
2243 */
2244 if (cur_tx == NULL)
2245 return;
2246
2247 /*
2248 * Place the request for the upload interrupt
2249 * in the last descriptor in the chain. This way, if
2250 * we're chaining several packets at once, we'll only
2251 * get an interupt once for the whole chain rather than
2252 * once for each packet.
2253 */
2254 cur_tx->xl_ptr->xl_status |= XL_TXSTAT_DL_INTR;
2255
2256 /*
2257 * Queue the packets. If the TX channel is clear, update
2258 * the downlist pointer register.
2259 */
2260 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2261 xl_wait(sc);
2262
2263 if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
2264 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
2265 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
2266 vtophys(start_tx->xl_ptr);
2267 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
2268 ~XL_TXSTAT_DL_INTR;
2269 sc->xl_cdata.xl_tx_tail = cur_tx;
2270 } else {
2271 sc->xl_cdata.xl_tx_head = start_tx;
2272 sc->xl_cdata.xl_tx_tail = cur_tx;
2273 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, vtophys(start_tx->xl_ptr));
2274 }
2275 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2276
2277 XL_SEL_WIN(7);
2278
2279 /*
2280 * Set a timeout in case the chip goes out to lunch.
2281 */
2282 ifp->if_timer = 5;
2283
2284 /*
2285 * XXX Under certain conditions, usually on slower machines
2286 * where interrupts may be dropped, it's possible for the
2287 * adapter to chew up all the buffers in the receive ring
2288 * and stall, without us being able to do anything about it.
2289 * To guard against this, we need to make a pass over the
2290 * RX queue to make sure there aren't any packets pending.
2291 * Doing it here means we can flush the receive ring at the
2292 * same time the chip is DMAing the transmit descriptors we
2293 * just gave it.
2294 *
2295 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
2296 * nature of their chips in all their marketing literature;
2297 * we may as well take advantage of it. :)
2298 */
2299 xl_rxeof(sc);
2300
2301 return;
2302 }
2303
2304 static void xl_init(xsc)
2305 void *xsc;
2306 {
2307 struct xl_softc *sc = xsc;
2308 struct ifnet *ifp = &sc->arpcom.ac_if;
2309 int s, i;
2310 u_int16_t rxfilt = 0;
2311 u_int16_t phy_bmcr = 0;
2312
2313 if (sc->xl_autoneg)
2314 return;
2315
2316
2317 s = splimp();
2318
2319 /*
2320 * XXX Hack for the 3c905B: the built-in autoneg logic's state
2321 * gets reset by xl_init() when we don't want it to. Try
2322 * to preserve it. (For 3c905 cards with real external PHYs,
2323 * the BMCR register doesn't change, but this doesn't hurt.)
2324 */
2325 if (sc->xl_pinfo != NULL)
2326 phy_bmcr = xl_phy_readreg(sc, PHY_BMCR);
2327
2328 /*
2329 * Cancel pending I/O and free all RX/TX buffers.
2330 */
2331 xl_stop(sc);
2332
2333 xl_wait(sc);
2334
2335 /* Init our MAC address */
2336 XL_SEL_WIN(2);
2337 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2338 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
2339 sc->arpcom.ac_enaddr[i]);
2340 }
2341
2342 /* Clear the station mask. */
2343 for (i = 0; i < 3; i++)
2344 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
2345
2346 #ifdef notdef
2347 /* Reset TX and RX. */
2348 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2349 xl_wait(sc);
2350 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2351 xl_wait(sc);
2352 #endif
2353
2354 /* Init circular RX list. */
2355 if (xl_list_rx_init(sc) == ENOBUFS) {
2356 printf("xl%d: initialization failed: no "
2357 "memory for rx buffers\n", sc->xl_unit);
2358 xl_stop(sc);
2359 return;
2360 }
2361
2362 /* Init TX descriptors. */
2363 xl_list_tx_init(sc);
2364
2365 /*
2366 * Set the TX freethresh value.
2367 * Note that this has no effect on 3c905B "cyclone"
2368 * cards but is required for 3c900/3c905 "boomerang"
2369 * cards in order to enable the download engine.
2370 */
2371 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2372
2373 /*
2374 * If this is a 3c905B, also set the tx reclaim threshold.
2375 * This helps cut down on the number of tx reclaim errors
2376 * that could happen on a busy network. The chip multiplies
2377 * the register value by 16 to obtain the actual threshold
2378 * in bytes, so we divide by 16 when setting the value here.
2379 * The existing threshold value can be examined by reading
2380 * the register at offset 9 in window 5.
2381 */
2382 if (sc->xl_type == XL_TYPE_905B) {
2383 CSR_WRITE_2(sc, XL_COMMAND,
2384 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2385 }
2386
2387 /* Set RX filter bits. */
2388 XL_SEL_WIN(5);
2389 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2390
2391 /* Set the individual bit to receive frames for this host only. */
2392 rxfilt |= XL_RXFILTER_INDIVIDUAL;
2393
2394 /* If we want promiscuous mode, set the allframes bit. */
2395 if (ifp->if_flags & IFF_PROMISC) {
2396 rxfilt |= XL_RXFILTER_ALLFRAMES;
2397 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2398 } else {
2399 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2400 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2401 }
2402
2403 /*
2404 * Set capture broadcast bit to capture broadcast frames.
2405 */
2406 if (ifp->if_flags & IFF_BROADCAST) {
2407 rxfilt |= XL_RXFILTER_BROADCAST;
2408 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2409 } else {
2410 rxfilt &= ~XL_RXFILTER_BROADCAST;
2411 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2412 }
2413
2414 /*
2415 * Program the multicast filter, if necessary.
2416 */
2417 xl_setmulti(sc);
2418
2419 /*
2420 * Load the address of the RX list. We have to
2421 * stall the upload engine before we can manipulate
2422 * the uplist pointer register, then unstall it when
2423 * we're finished. We also have to wait for the
2424 * stall command to complete before proceeding.
2425 * Note that we have to do this after any RX resets
2426 * have completed since the uplist register is cleared
2427 * by a reset.
2428 */
2429 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2430 xl_wait(sc);
2431 CSR_WRITE_4(sc, XL_UPLIST_PTR, vtophys(&sc->xl_ldata->xl_rx_list[0]));
2432 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2433
2434 /*
2435 * If the coax transceiver is on, make sure to enable
2436 * the DC-DC converter.
2437 */
2438 XL_SEL_WIN(3);
2439 if (sc->xl_xcvr == XL_XCVR_COAX)
2440 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2441 else
2442 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2443
2444
2445 /* Clear out the stats counters. */
2446 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2447 sc->xl_stats_no_timeout = 1;
2448 xl_stats_update(sc);
2449 sc->xl_stats_no_timeout = 0;
2450 XL_SEL_WIN(4);
2451 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2452 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2453
2454 /*
2455 * Enable interrupts.
2456 */
2457 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2458 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2459 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2460
2461 /* Set the RX early threshold */
2462 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2463 CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2464
2465 /* Enable receiver and transmitter. */
2466 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2467 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2468
2469 /* Restore state of BMCR */
2470 if (sc->xl_pinfo != NULL)
2471 xl_phy_writereg(sc, PHY_BMCR, phy_bmcr);
2472
2473 /* Select window 7 for normal operations. */
2474 XL_SEL_WIN(7);
2475
2476 ifp->if_flags |= IFF_RUNNING;
2477 ifp->if_flags &= ~IFF_OACTIVE;
2478
2479 (void)splx(s);
2480
2481 timeout(xl_stats_update, sc, hz);
2482
2483 return;
2484 }
2485
2486 /*
2487 * Set media options.
2488 */
2489 static int xl_ifmedia_upd(ifp)
2490 struct ifnet *ifp;
2491 {
2492 struct xl_softc *sc;
2493 struct ifmedia *ifm;
2494
2495 sc = ifp->if_softc;
2496 ifm = &sc->ifmedia;
2497
2498 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2499 return(EINVAL);
2500
2501 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2502 || sc->xl_media & XL_MEDIAOPT_BT4) {
2503 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
2504 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
2505 else
2506 xl_setmode_mii(sc, ifm->ifm_media);
2507 } else {
2508 xl_setmode(sc, ifm->ifm_media);
2509 }
2510
2511 return(0);
2512 }
2513
2514 /*
2515 * Report current media status.
2516 */
2517 static void xl_ifmedia_sts(ifp, ifmr)
2518 struct ifnet *ifp;
2519 struct ifmediareq *ifmr;
2520 {
2521 struct xl_softc *sc;
2522 u_int16_t advert = 0, ability = 0;
2523 u_int32_t icfg;
2524
2525 sc = ifp->if_softc;
2526
2527 XL_SEL_WIN(3);
2528 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2529 icfg >>= XL_ICFG_CONNECTOR_BITS;
2530
2531 ifmr->ifm_active = IFM_ETHER;
2532
2533 switch(icfg) {
2534 case XL_XCVR_10BT:
2535 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2536 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2537 ifmr->ifm_active |= IFM_FDX;
2538 else
2539 ifmr->ifm_active |= IFM_HDX;
2540 break;
2541 case XL_XCVR_AUI:
2542 ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2543 break;
2544 case XL_XCVR_COAX:
2545 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2546 break;
2547 /*
2548 * XXX MII and BTX/AUTO should be separate cases.
2549 */
2550
2551 case XL_XCVR_100BTX:
2552 case XL_XCVR_AUTO:
2553 case XL_XCVR_MII:
2554 if (!(xl_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
2555 if (xl_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
2556 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
2557 else
2558 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2559 XL_SEL_WIN(3);
2560 if (CSR_READ_2(sc, XL_W3_MAC_CTRL) &
2561 XL_MACCTRL_DUPLEX)
2562 ifmr->ifm_active |= IFM_FDX;
2563 else
2564 ifmr->ifm_active |= IFM_HDX;
2565 break;
2566 }
2567 ability = xl_phy_readreg(sc, XL_PHY_LPAR);
2568 advert = xl_phy_readreg(sc, XL_PHY_ANAR);
2569 if (advert & PHY_ANAR_100BT4 &&
2570 ability & PHY_ANAR_100BT4) {
2571 ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
2572 } else if (advert & PHY_ANAR_100BTXFULL &&
2573 ability & PHY_ANAR_100BTXFULL) {
2574 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
2575 } else if (advert & PHY_ANAR_100BTXHALF &&
2576 ability & PHY_ANAR_100BTXHALF) {
2577 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
2578 } else if (advert & PHY_ANAR_10BTFULL &&
2579 ability & PHY_ANAR_10BTFULL) {
2580 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
2581 } else if (advert & PHY_ANAR_10BTHALF &&
2582 ability & PHY_ANAR_10BTHALF) {
2583 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
2584 }
2585 break;
2586 case XL_XCVR_100BFX:
2587 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2588 break;
2589 default:
2590 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit, icfg);
2591 break;
2592 }
2593
2594 return;
2595 }
2596
2597 static int xl_ioctl(ifp, command, data)
2598 struct ifnet *ifp;
2599 int command;
2600 caddr_t data;
2601 {
2602 struct xl_softc *sc = ifp->if_softc;
2603 struct ifreq *ifr = (struct ifreq *) data;
2604 int s, error = 0;
2605
2606 s = splimp();
2607
2608 switch(command) {
2609 case SIOCSIFADDR:
2610 case SIOCGIFADDR:
2611 case SIOCSIFMTU:
2612 error = ether_ioctl(ifp, command, data);
2613 break;
2614 case SIOCSIFFLAGS:
2615 if (ifp->if_flags & IFF_UP) {
2616 xl_init(sc);
2617 } else {
2618 if (ifp->if_flags & IFF_RUNNING)
2619 xl_stop(sc);
2620 }
2621 error = 0;
2622 break;
2623 case SIOCADDMULTI:
2624 case SIOCDELMULTI:
2625 if (command == SIOCADDMULTI)
2626 error = ether_addmulti(ifr, &sc->arpcom);
2627 else
2628 error = ether_delmulti(ifr, &sc->arpcom);
2629 if (error == ENETRESET) {
2630 xl_setmulti(sc);
2631 error = 0;
2632 }
2633 break;
2634 case SIOCGIFMEDIA:
2635 case SIOCSIFMEDIA:
2636 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2637 break;
2638 default:
2639 error = EINVAL;
2640 break;
2641 }
2642
2643 (void)splx(s);
2644
2645 return(error);
2646 }
2647
2648 static void xl_watchdog(ifp)
2649 struct ifnet *ifp;
2650 {
2651 struct xl_softc *sc;
2652 u_int16_t status = 0;
2653
2654 sc = ifp->if_softc;
2655
2656 if (sc->xl_autoneg) {
2657 xl_autoneg_mii(sc, XL_FLAG_DELAYTIMEO, 1);
2658 return;
2659 }
2660
2661 ifp->if_oerrors++;
2662 XL_SEL_WIN(4);
2663 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2664 printf("xl%d: watchdog timeout\n", sc->xl_unit);
2665
2666 if (status & XL_MEDIASTAT_CARRIER)
2667 printf("xl%d: no carrier - transceiver cable problem?\n",
2668 sc->xl_unit);
2669 xl_txeoc(sc);
2670 xl_txeof(sc);
2671 xl_rxeof(sc);
2672 xl_init(sc);
2673
2674 if (ifp->if_snd.ifq_head != NULL)
2675 xl_start(ifp);
2676
2677 return;
2678 }
2679
2680 /*
2681 * Stop the adapter and free any mbufs allocated to the
2682 * RX and TX lists.
2683 */
2684 static void xl_stop(sc)
2685 struct xl_softc *sc;
2686 {
2687 register int i;
2688 struct ifnet *ifp;
2689
2690 ifp = &sc->arpcom.ac_if;
2691 ifp->if_timer = 0;
2692
2693 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2694 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2695 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2696 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2697 xl_wait(sc);
2698 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2699 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2700 DELAY(800);
2701 #ifdef notdef
2702 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2703 xl_wait(sc);
2704 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2705 xl_wait(sc);
2706 #endif
2707 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2708
2709 /* Stop the stats updater. */
2710 untimeout(xl_stats_update, sc);
2711
2712 /*
2713 * Free data in the RX lists.
2714 */
2715 for (i = 0; i < XL_RX_LIST_CNT; i++) {
2716 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2717 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2718 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2719 }
2720 }
2721 bzero((char *)&sc->xl_ldata->xl_rx_list,
2722 sizeof(sc->xl_ldata->xl_rx_list));
2723 /*
2724 * Free the TX list buffers.
2725 */
2726 for (i = 0; i < XL_TX_LIST_CNT; i++) {
2727 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2728 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2729 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2730 }
2731 }
2732 bzero((char *)&sc->xl_ldata->xl_tx_list,
2733 sizeof(sc->xl_ldata->xl_tx_list));
2734
2735 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2736
2737 return;
2738 }
2739
2740 /*
2741 * Stop all chip I/O so that the kernel's probe routines don't
2742 * get confused by errant DMAs when rebooting.
2743 */
2744 static void xl_shutdown(howto, arg)
2745 int howto;
2746 void *arg;
2747 {
2748 struct xl_softc *sc = (struct xl_softc *)arg;
2749
2750 xl_stop(sc);
2751
2752 return;
2753 }
2754
2755
2756 static struct pci_device xl_device = {
2757 "xl",
2758 xl_probe,
2759 xl_attach,
2760 &xl_count,
2761 NULL
2762 };
2763 DATA_SET(pcidevice_set, xl_device);
Cache object: 2d428500c7402cb2012ef2ca132786c6
|