FreeBSD/Linux Kernel Cross Reference
sys/pci/if_ax.c
1 /*
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD$
33 */
34
35 /*
36 * ASIX AX88140A and AX88141 fast ethernet PCI NIC driver.
37 *
38 * Written by Bill Paul <wpaul@ctr.columbia.edu>
39 * Electrical Engineering Department
40 * Columbia University, New York City
41 */
42
43 /*
44 * The ASIX Electronics AX88140A is still another DEC 21x4x clone. It's
45 * a reasonably close copy of the tulip, except for the receiver filter
46 * programming. Where the DEC chip has a special setup frame that
47 * needs to be downloaded into the transmit DMA engine, the ASIX chip
48 * has a less complicated setup frame which is written into one of
49 * the registers.
50 */
51
52 #include "bpfilter.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/sockio.h>
57 #include <sys/mbuf.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61
62 #include <net/if.h>
63 #include <net/if_arp.h>
64 #include <net/ethernet.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67
68 #if NBPFILTER > 0
69 #include <net/bpf.h>
70 #endif
71
72 #include <vm/vm.h> /* for vtophys */
73 #include <vm/pmap.h> /* for vtophys */
74 #include <machine/clock.h> /* for DELAY */
75 #include <machine/bus_pio.h>
76 #include <machine/bus_memio.h>
77 #include <machine/bus.h>
78
79 #include <pci/pcireg.h>
80 #include <pci/pcivar.h>
81
82 #define AX_USEIOSPACE
83
84 /* #define AX_BACKGROUND_AUTONEG */
85
86 #include <pci/if_axreg.h>
87
88 #ifndef lint
89 static const char rcsid[] =
90 "$FreeBSD$";
91 #endif
92
93 /*
94 * Various supported device vendors/types and their names.
95 */
96 static struct ax_type ax_devs[] = {
97 { AX_VENDORID, AX_DEVICEID_AX88140A,
98 "ASIX AX88140A 10/100BaseTX" },
99 { AX_VENDORID, AX_DEVICEID_AX88140A,
100 "ASIX AX88141 10/100BaseTX" },
101 { 0, 0, NULL }
102 };
103
104 /*
105 * Various supported PHY vendors/types and their names. Note that
106 * this driver will work with pretty much any MII-compliant PHY,
107 * so failure to positively identify the chip is not a fatal error.
108 */
109
110 static struct ax_type ax_phys[] = {
111 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
112 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
113 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
114 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
115 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
116 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
117 { 0, 0, "<MII-compliant physical interface>" }
118 };
119
120 static unsigned long ax_count = 0;
121 static const char *ax_probe __P((pcici_t, pcidi_t));
122 static void ax_attach __P((pcici_t, int));
123
124 static int ax_newbuf __P((struct ax_softc *,
125 struct ax_chain_onefrag *));
126 static int ax_encap __P((struct ax_softc *, struct ax_chain *,
127 struct mbuf *));
128
129 static void ax_rxeof __P((struct ax_softc *));
130 static void ax_rxeoc __P((struct ax_softc *));
131 static void ax_txeof __P((struct ax_softc *));
132 static void ax_txeoc __P((struct ax_softc *));
133 static void ax_intr __P((void *));
134 static void ax_start __P((struct ifnet *));
135 static int ax_ioctl __P((struct ifnet *, u_long, caddr_t));
136 static void ax_init __P((void *));
137 static void ax_stop __P((struct ax_softc *));
138 static void ax_watchdog __P((struct ifnet *));
139 static void ax_shutdown __P((int, void *));
140 static int ax_ifmedia_upd __P((struct ifnet *));
141 static void ax_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
142
143 static void ax_delay __P((struct ax_softc *));
144 static void ax_eeprom_idle __P((struct ax_softc *));
145 static void ax_eeprom_putbyte __P((struct ax_softc *, int));
146 static void ax_eeprom_getword __P((struct ax_softc *, int, u_int16_t *));
147 static void ax_read_eeprom __P((struct ax_softc *, caddr_t, int,
148 int, int));
149
150 static void ax_mii_writebit __P((struct ax_softc *, int));
151 static int ax_mii_readbit __P((struct ax_softc *));
152 static void ax_mii_sync __P((struct ax_softc *));
153 static void ax_mii_send __P((struct ax_softc *, u_int32_t, int));
154 static int ax_mii_readreg __P((struct ax_softc *, struct ax_mii_frame *));
155 static int ax_mii_writereg __P((struct ax_softc *, struct ax_mii_frame *));
156 static u_int16_t ax_phy_readreg __P((struct ax_softc *, int));
157 static void ax_phy_writereg __P((struct ax_softc *, int, int));
158
159 static void ax_autoneg_xmit __P((struct ax_softc *));
160 static void ax_autoneg_mii __P((struct ax_softc *, int, int));
161 static void ax_setmode_mii __P((struct ax_softc *, int));
162 static void ax_setmode __P((struct ax_softc *, int, int));
163 static void ax_getmode_mii __P((struct ax_softc *));
164 static void ax_setcfg __P((struct ax_softc *, int));
165 static u_int32_t ax_calchash __P((caddr_t));
166 static void ax_setmulti __P((struct ax_softc *));
167 static void ax_reset __P((struct ax_softc *));
168 static int ax_list_rx_init __P((struct ax_softc *));
169 static int ax_list_tx_init __P((struct ax_softc *));
170
171 #define AX_SETBIT(sc, reg, x) \
172 CSR_WRITE_4(sc, reg, \
173 CSR_READ_4(sc, reg) | x)
174
175 #define AX_CLRBIT(sc, reg, x) \
176 CSR_WRITE_4(sc, reg, \
177 CSR_READ_4(sc, reg) & ~x)
178
179 #define SIO_SET(x) \
180 CSR_WRITE_4(sc, AX_SIO, \
181 CSR_READ_4(sc, AX_SIO) | x)
182
183 #define SIO_CLR(x) \
184 CSR_WRITE_4(sc, AX_SIO, \
185 CSR_READ_4(sc, AX_SIO) & ~x)
186
187 static void ax_delay(sc)
188 struct ax_softc *sc;
189 {
190 int idx;
191
192 for (idx = (300 / 33) + 1; idx > 0; idx--)
193 CSR_READ_4(sc, AX_BUSCTL);
194 }
195
196 static void ax_eeprom_idle(sc)
197 struct ax_softc *sc;
198 {
199 register int i;
200
201 CSR_WRITE_4(sc, AX_SIO, AX_SIO_EESEL);
202 ax_delay(sc);
203 AX_SETBIT(sc, AX_SIO, AX_SIO_ROMCTL_READ);
204 ax_delay(sc);
205 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CS);
206 ax_delay(sc);
207 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CLK);
208 ax_delay(sc);
209
210 for (i = 0; i < 25; i++) {
211 AX_CLRBIT(sc, AX_SIO, AX_SIO_EE_CLK);
212 ax_delay(sc);
213 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CLK);
214 ax_delay(sc);
215 }
216
217 AX_CLRBIT(sc, AX_SIO, AX_SIO_EE_CLK);
218 ax_delay(sc);
219 AX_CLRBIT(sc, AX_SIO, AX_SIO_EE_CS);
220 ax_delay(sc);
221 CSR_WRITE_4(sc, AX_SIO, 0x00000000);
222
223 return;
224 }
225
226 /*
227 * Send a read command and address to the EEPROM, check for ACK.
228 */
229 static void ax_eeprom_putbyte(sc, addr)
230 struct ax_softc *sc;
231 int addr;
232 {
233 register int d, i;
234
235 d = addr | AX_EECMD_READ;
236
237 /*
238 * Feed in each bit and stobe the clock.
239 */
240 for (i = 0x400; i; i >>= 1) {
241 if (d & i) {
242 SIO_SET(AX_SIO_EE_DATAIN);
243 } else {
244 SIO_CLR(AX_SIO_EE_DATAIN);
245 }
246 ax_delay(sc);
247 SIO_SET(AX_SIO_EE_CLK);
248 ax_delay(sc);
249 SIO_CLR(AX_SIO_EE_CLK);
250 ax_delay(sc);
251 }
252
253 return;
254 }
255
256 /*
257 * Read a word of data stored in the EEPROM at address 'addr.'
258 */
259 static void ax_eeprom_getword(sc, addr, dest)
260 struct ax_softc *sc;
261 int addr;
262 u_int16_t *dest;
263 {
264 register int i;
265 u_int16_t word = 0;
266
267 /* Force EEPROM to idle state. */
268 ax_eeprom_idle(sc);
269
270 /* Enter EEPROM access mode. */
271 CSR_WRITE_4(sc, AX_SIO, AX_SIO_EESEL);
272 ax_delay(sc);
273 AX_SETBIT(sc, AX_SIO, AX_SIO_ROMCTL_READ);
274 ax_delay(sc);
275 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CS);
276 ax_delay(sc);
277 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CLK);
278 ax_delay(sc);
279
280 /*
281 * Send address of word we want to read.
282 */
283 ax_eeprom_putbyte(sc, addr);
284
285 /*
286 * Start reading bits from EEPROM.
287 */
288 for (i = 0x8000; i; i >>= 1) {
289 SIO_SET(AX_SIO_EE_CLK);
290 ax_delay(sc);
291 if (CSR_READ_4(sc, AX_SIO) & AX_SIO_EE_DATAOUT)
292 word |= i;
293 ax_delay(sc);
294 SIO_CLR(AX_SIO_EE_CLK);
295 ax_delay(sc);
296 }
297
298 /* Turn off EEPROM access mode. */
299 ax_eeprom_idle(sc);
300
301 *dest = word;
302
303 return;
304 }
305
306 /*
307 * Read a sequence of words from the EEPROM.
308 */
309 static void ax_read_eeprom(sc, dest, off, cnt, swap)
310 struct ax_softc *sc;
311 caddr_t dest;
312 int off;
313 int cnt;
314 int swap;
315 {
316 int i;
317 u_int16_t word = 0, *ptr;
318
319 for (i = 0; i < cnt; i++) {
320 ax_eeprom_getword(sc, off + i, &word);
321 ptr = (u_int16_t *)(dest + (i * 2));
322 if (swap)
323 *ptr = ntohs(word);
324 else
325 *ptr = word;
326 }
327
328 return;
329 }
330
331 /*
332 * Write a bit to the MII bus.
333 */
334 static void ax_mii_writebit(sc, bit)
335 struct ax_softc *sc;
336 int bit;
337 {
338 if (bit)
339 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_WRITE|AX_SIO_MII_DATAOUT);
340 else
341 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_WRITE);
342
343 AX_SETBIT(sc, AX_SIO, AX_SIO_MII_CLK);
344 AX_CLRBIT(sc, AX_SIO, AX_SIO_MII_CLK);
345
346 return;
347 }
348
349 /*
350 * Read a bit from the MII bus.
351 */
352 static int ax_mii_readbit(sc)
353 struct ax_softc *sc;
354 {
355 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_READ|AX_SIO_MII_DIR);
356 CSR_READ_4(sc, AX_SIO);
357 AX_SETBIT(sc, AX_SIO, AX_SIO_MII_CLK);
358 AX_CLRBIT(sc, AX_SIO, AX_SIO_MII_CLK);
359 if (CSR_READ_4(sc, AX_SIO) & AX_SIO_MII_DATAIN)
360 return(1);
361
362 return(0);
363 }
364
365 /*
366 * Sync the PHYs by setting data bit and strobing the clock 32 times.
367 */
368 static void ax_mii_sync(sc)
369 struct ax_softc *sc;
370 {
371 register int i;
372
373 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_WRITE);
374
375 for (i = 0; i < 32; i++)
376 ax_mii_writebit(sc, 1);
377
378 return;
379 }
380
381 /*
382 * Clock a series of bits through the MII.
383 */
384 static void ax_mii_send(sc, bits, cnt)
385 struct ax_softc *sc;
386 u_int32_t bits;
387 int cnt;
388 {
389 int i;
390
391 for (i = (0x1 << (cnt - 1)); i; i >>= 1)
392 ax_mii_writebit(sc, bits & i);
393 }
394
395 /*
396 * Read an PHY register through the MII.
397 */
398 static int ax_mii_readreg(sc, frame)
399 struct ax_softc *sc;
400 struct ax_mii_frame *frame;
401
402 {
403 int i, ack, s;
404
405 s = splimp();
406
407 /*
408 * Set up frame for RX.
409 */
410 frame->mii_stdelim = AX_MII_STARTDELIM;
411 frame->mii_opcode = AX_MII_READOP;
412 frame->mii_turnaround = 0;
413 frame->mii_data = 0;
414
415 /*
416 * Sync the PHYs.
417 */
418 ax_mii_sync(sc);
419
420 /*
421 * Send command/address info.
422 */
423 ax_mii_send(sc, frame->mii_stdelim, 2);
424 ax_mii_send(sc, frame->mii_opcode, 2);
425 ax_mii_send(sc, frame->mii_phyaddr, 5);
426 ax_mii_send(sc, frame->mii_regaddr, 5);
427
428 #ifdef notdef
429 /* Idle bit */
430 ax_mii_writebit(sc, 1);
431 ax_mii_writebit(sc, 0);
432 #endif
433
434 /* Check for ack */
435 ack = ax_mii_readbit(sc);
436
437 /*
438 * Now try reading data bits. If the ack failed, we still
439 * need to clock through 16 cycles to keep the PHY(s) in sync.
440 */
441 if (ack) {
442 for(i = 0; i < 16; i++) {
443 ax_mii_readbit(sc);
444 }
445 goto fail;
446 }
447
448 for (i = 0x8000; i; i >>= 1) {
449 if (!ack) {
450 if (ax_mii_readbit(sc))
451 frame->mii_data |= i;
452 }
453 }
454
455 fail:
456
457 ax_mii_writebit(sc, 0);
458 ax_mii_writebit(sc, 0);
459
460 splx(s);
461
462 if (ack)
463 return(1);
464 return(0);
465 }
466
467 /*
468 * Write to a PHY register through the MII.
469 */
470 static int ax_mii_writereg(sc, frame)
471 struct ax_softc *sc;
472 struct ax_mii_frame *frame;
473
474 {
475 int s;
476
477 s = splimp();
478 /*
479 * Set up frame for TX.
480 */
481
482 frame->mii_stdelim = AX_MII_STARTDELIM;
483 frame->mii_opcode = AX_MII_WRITEOP;
484 frame->mii_turnaround = AX_MII_TURNAROUND;
485
486 /*
487 * Sync the PHYs.
488 */
489 ax_mii_sync(sc);
490
491 ax_mii_send(sc, frame->mii_stdelim, 2);
492 ax_mii_send(sc, frame->mii_opcode, 2);
493 ax_mii_send(sc, frame->mii_phyaddr, 5);
494 ax_mii_send(sc, frame->mii_regaddr, 5);
495 ax_mii_send(sc, frame->mii_turnaround, 2);
496 ax_mii_send(sc, frame->mii_data, 16);
497
498 /* Idle bit. */
499 ax_mii_writebit(sc, 0);
500 ax_mii_writebit(sc, 0);
501
502 splx(s);
503
504 return(0);
505 }
506
507 static u_int16_t ax_phy_readreg(sc, reg)
508 struct ax_softc *sc;
509 int reg;
510 {
511 struct ax_mii_frame frame;
512
513 bzero((char *)&frame, sizeof(frame));
514
515 frame.mii_phyaddr = sc->ax_phy_addr;
516 frame.mii_regaddr = reg;
517 ax_mii_readreg(sc, &frame);
518
519 return(frame.mii_data);
520 }
521
522 static void ax_phy_writereg(sc, reg, data)
523 struct ax_softc *sc;
524 int reg;
525 int data;
526 {
527 struct ax_mii_frame frame;
528
529 bzero((char *)&frame, sizeof(frame));
530
531 frame.mii_phyaddr = sc->ax_phy_addr;
532 frame.mii_regaddr = reg;
533 frame.mii_data = data;
534
535 ax_mii_writereg(sc, &frame);
536
537 return;
538 }
539
540 /*
541 * Calculate CRC of a multicast group address, return the lower 6 bits.
542 */
543 static u_int32_t ax_calchash(addr)
544 caddr_t addr;
545 {
546 u_int32_t crc, carry;
547 int i, j;
548 u_int8_t c;
549
550 /* Compute CRC for the address value. */
551 crc = 0xFFFFFFFF; /* initial value */
552
553 for (i = 0; i < 6; i++) {
554 c = *(addr + i);
555 for (j = 0; j < 8; j++) {
556 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
557 crc <<= 1;
558 c >>= 1;
559 if (carry)
560 crc = (crc ^ 0x04c11db6) | carry;
561 }
562 }
563
564 /* return the filter bit position */
565 return((crc >> 26) & 0x0000003F);
566 }
567
568 static void ax_setmulti(sc)
569 struct ax_softc *sc;
570 {
571 struct ifnet *ifp;
572 int h = 0;
573 u_int32_t hashes[2] = { 0, 0 };
574 struct ifmultiaddr *ifma;
575 u_int32_t rxfilt;
576
577 ifp = &sc->arpcom.ac_if;
578
579 rxfilt = CSR_READ_4(sc, AX_NETCFG);
580
581 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
582 rxfilt |= AX_NETCFG_RX_ALLMULTI;
583 CSR_WRITE_4(sc, AX_NETCFG, rxfilt);
584 return;
585 } else
586 rxfilt &= ~AX_NETCFG_RX_ALLMULTI;
587
588 /* first, zot all the existing hash bits */
589 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR0);
590 CSR_WRITE_4(sc, AX_FILTDATA, 0);
591 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR1);
592 CSR_WRITE_4(sc, AX_FILTDATA, 0);
593
594 /* now program new ones */
595 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
596 ifma = ifma->ifma_link.le_next) {
597 if (ifma->ifma_addr->sa_family != AF_LINK)
598 continue;
599 h = ax_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
600 if (h < 32)
601 hashes[0] |= (1 << h);
602 else
603 hashes[1] |= (1 << (h - 32));
604 }
605
606 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR0);
607 CSR_WRITE_4(sc, AX_FILTDATA, hashes[0]);
608 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR1);
609 CSR_WRITE_4(sc, AX_FILTDATA, hashes[1]);
610 CSR_WRITE_4(sc, AX_NETCFG, rxfilt);
611
612 return;
613 }
614
615 /*
616 * Initiate an autonegotiation session.
617 */
618 static void ax_autoneg_xmit(sc)
619 struct ax_softc *sc;
620 {
621 u_int16_t phy_sts;
622
623 ax_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
624 DELAY(500);
625 while(ax_phy_readreg(sc, PHY_BMCR)
626 & PHY_BMCR_RESET);
627
628 phy_sts = ax_phy_readreg(sc, PHY_BMCR);
629 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
630 ax_phy_writereg(sc, PHY_BMCR, phy_sts);
631
632 return;
633 }
634
635 /*
636 * Invoke autonegotiation on a PHY.
637 */
638 static void ax_autoneg_mii(sc, flag, verbose)
639 struct ax_softc *sc;
640 int flag;
641 int verbose;
642 {
643 u_int16_t phy_sts = 0, media, advert, ability;
644 struct ifnet *ifp;
645 struct ifmedia *ifm;
646
647 ifm = &sc->ifmedia;
648 ifp = &sc->arpcom.ac_if;
649
650 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
651
652 /*
653 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
654 * bit cleared in the status register, but has the 'autoneg enabled'
655 * bit set in the control register. This is a contradiction, and
656 * I'm not sure how to handle it. If you want to force an attempt
657 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
658 * and see what happens.
659 */
660 #ifndef FORCE_AUTONEG_TFOUR
661 /*
662 * First, see if autoneg is supported. If not, there's
663 * no point in continuing.
664 */
665 phy_sts = ax_phy_readreg(sc, PHY_BMSR);
666 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
667 if (verbose)
668 printf("ax%d: autonegotiation not supported\n",
669 sc->ax_unit);
670 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
671 return;
672 }
673 #endif
674
675 switch (flag) {
676 case AX_FLAG_FORCEDELAY:
677 /*
678 * XXX Never use this option anywhere but in the probe
679 * routine: making the kernel stop dead in its tracks
680 * for three whole seconds after we've gone multi-user
681 * is really bad manners.
682 */
683 ax_autoneg_xmit(sc);
684 DELAY(5000000);
685 break;
686 case AX_FLAG_SCHEDDELAY:
687 /*
688 * Wait for the transmitter to go idle before starting
689 * an autoneg session, otherwise ax_start() may clobber
690 * our timeout, and we don't want to allow transmission
691 * during an autoneg session since that can screw it up.
692 */
693 if (sc->ax_cdata.ax_tx_head != NULL) {
694 sc->ax_want_auto = 1;
695 return;
696 }
697 ax_autoneg_xmit(sc);
698 ifp->if_timer = 5;
699 sc->ax_autoneg = 1;
700 sc->ax_want_auto = 0;
701 return;
702 break;
703 case AX_FLAG_DELAYTIMEO:
704 ifp->if_timer = 0;
705 sc->ax_autoneg = 0;
706 break;
707 default:
708 printf("ax%d: invalid autoneg flag: %d\n", sc->ax_unit, flag);
709 return;
710 }
711
712 if (ax_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
713 if (verbose)
714 printf("ax%d: autoneg complete, ", sc->ax_unit);
715 phy_sts = ax_phy_readreg(sc, PHY_BMSR);
716 } else {
717 if (verbose)
718 printf("ax%d: autoneg not complete, ", sc->ax_unit);
719 }
720
721 media = ax_phy_readreg(sc, PHY_BMCR);
722
723 /* Link is good. Report modes and set duplex mode. */
724 if (ax_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
725 if (verbose)
726 printf("link status good ");
727 advert = ax_phy_readreg(sc, PHY_ANAR);
728 ability = ax_phy_readreg(sc, PHY_LPAR);
729
730 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
731 ifm->ifm_media = IFM_ETHER|IFM_100_T4;
732 media |= PHY_BMCR_SPEEDSEL;
733 media &= ~PHY_BMCR_DUPLEX;
734 printf("(100baseT4)\n");
735 } else if (advert & PHY_ANAR_100BTXFULL &&
736 ability & PHY_ANAR_100BTXFULL) {
737 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
738 media |= PHY_BMCR_SPEEDSEL;
739 media |= PHY_BMCR_DUPLEX;
740 printf("(full-duplex, 100Mbps)\n");
741 } else if (advert & PHY_ANAR_100BTXHALF &&
742 ability & PHY_ANAR_100BTXHALF) {
743 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
744 media |= PHY_BMCR_SPEEDSEL;
745 media &= ~PHY_BMCR_DUPLEX;
746 printf("(half-duplex, 100Mbps)\n");
747 } else if (advert & PHY_ANAR_10BTFULL &&
748 ability & PHY_ANAR_10BTFULL) {
749 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
750 media &= ~PHY_BMCR_SPEEDSEL;
751 media |= PHY_BMCR_DUPLEX;
752 printf("(full-duplex, 10Mbps)\n");
753 } else if (advert & PHY_ANAR_10BTHALF &&
754 ability & PHY_ANAR_10BTHALF) {
755 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
756 media &= ~PHY_BMCR_SPEEDSEL;
757 media &= ~PHY_BMCR_DUPLEX;
758 printf("(half-duplex, 10Mbps)\n");
759 }
760
761 media &= ~PHY_BMCR_AUTONEGENBL;
762
763 /* Set ASIC's duplex mode to match the PHY. */
764 ax_setcfg(sc, media);
765 ax_phy_writereg(sc, PHY_BMCR, media);
766 } else {
767 if (verbose)
768 printf("no carrier\n");
769 }
770
771 ax_init(sc);
772
773 if (sc->ax_tx_pend) {
774 sc->ax_autoneg = 0;
775 sc->ax_tx_pend = 0;
776 ax_start(ifp);
777 }
778
779 return;
780 }
781
782 static void ax_getmode_mii(sc)
783 struct ax_softc *sc;
784 {
785 u_int16_t bmsr;
786 struct ifnet *ifp;
787
788 ifp = &sc->arpcom.ac_if;
789
790 bmsr = ax_phy_readreg(sc, PHY_BMSR);
791 if (bootverbose)
792 printf("ax%d: PHY status word: %x\n", sc->ax_unit, bmsr);
793
794 /* fallback */
795 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
796
797 if (bmsr & PHY_BMSR_10BTHALF) {
798 if (bootverbose)
799 printf("ax%d: 10Mbps half-duplex mode supported\n",
800 sc->ax_unit);
801 ifmedia_add(&sc->ifmedia,
802 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
803 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
804 }
805
806 if (bmsr & PHY_BMSR_10BTFULL) {
807 if (bootverbose)
808 printf("ax%d: 10Mbps full-duplex mode supported\n",
809 sc->ax_unit);
810 ifmedia_add(&sc->ifmedia,
811 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
812 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
813 }
814
815 if (bmsr & PHY_BMSR_100BTXHALF) {
816 if (bootverbose)
817 printf("ax%d: 100Mbps half-duplex mode supported\n",
818 sc->ax_unit);
819 ifp->if_baudrate = 100000000;
820 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
821 ifmedia_add(&sc->ifmedia,
822 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
823 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
824 }
825
826 if (bmsr & PHY_BMSR_100BTXFULL) {
827 if (bootverbose)
828 printf("ax%d: 100Mbps full-duplex mode supported\n",
829 sc->ax_unit);
830 ifp->if_baudrate = 100000000;
831 ifmedia_add(&sc->ifmedia,
832 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
833 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
834 }
835
836 /* Some also support 100BaseT4. */
837 if (bmsr & PHY_BMSR_100BT4) {
838 if (bootverbose)
839 printf("ax%d: 100baseT4 mode supported\n", sc->ax_unit);
840 ifp->if_baudrate = 100000000;
841 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
842 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
843 #ifdef FORCE_AUTONEG_TFOUR
844 if (bootverbose)
845 printf("ax%d: forcing on autoneg support for BT4\n",
846 sc->ax_unit);
847 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL):
848 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
849 #endif
850 }
851
852 if (bmsr & PHY_BMSR_CANAUTONEG) {
853 if (bootverbose)
854 printf("ax%d: autoneg supported\n", sc->ax_unit);
855 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
856 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
857 }
858
859 return;
860 }
861
862 /*
863 * Set speed and duplex mode.
864 */
865 static void ax_setmode_mii(sc, media)
866 struct ax_softc *sc;
867 int media;
868 {
869 u_int16_t bmcr;
870 struct ifnet *ifp;
871
872 ifp = &sc->arpcom.ac_if;
873
874 /*
875 * If an autoneg session is in progress, stop it.
876 */
877 if (sc->ax_autoneg) {
878 printf("ax%d: canceling autoneg session\n", sc->ax_unit);
879 ifp->if_timer = sc->ax_autoneg = sc->ax_want_auto = 0;
880 bmcr = ax_phy_readreg(sc, PHY_BMCR);
881 bmcr &= ~PHY_BMCR_AUTONEGENBL;
882 ax_phy_writereg(sc, PHY_BMCR, bmcr);
883 }
884
885 printf("ax%d: selecting MII, ", sc->ax_unit);
886
887 bmcr = ax_phy_readreg(sc, PHY_BMCR);
888
889 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
890 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
891
892 if (IFM_SUBTYPE(media) == IFM_100_T4) {
893 printf("100Mbps/T4, half-duplex\n");
894 bmcr |= PHY_BMCR_SPEEDSEL;
895 bmcr &= ~PHY_BMCR_DUPLEX;
896 }
897
898 if (IFM_SUBTYPE(media) == IFM_100_TX) {
899 printf("100Mbps, ");
900 bmcr |= PHY_BMCR_SPEEDSEL;
901 }
902
903 if (IFM_SUBTYPE(media) == IFM_10_T) {
904 printf("10Mbps, ");
905 bmcr &= ~PHY_BMCR_SPEEDSEL;
906 }
907
908 if ((media & IFM_GMASK) == IFM_FDX) {
909 printf("full duplex\n");
910 bmcr |= PHY_BMCR_DUPLEX;
911 } else {
912 printf("half duplex\n");
913 bmcr &= ~PHY_BMCR_DUPLEX;
914 }
915
916 ax_setcfg(sc, bmcr);
917 ax_phy_writereg(sc, PHY_BMCR, bmcr);
918
919 return;
920 }
921
922 /*
923 * Set speed and duplex mode on internal transceiver.
924 */
925 static void ax_setmode(sc, media, verbose)
926 struct ax_softc *sc;
927 int media;
928 int verbose;
929 {
930 struct ifnet *ifp;
931 u_int32_t mode;
932
933 ifp = &sc->arpcom.ac_if;
934
935 if (verbose)
936 printf("ax%d: selecting internal xcvr, ", sc->ax_unit);
937
938 mode = CSR_READ_4(sc, AX_NETCFG);
939
940 mode &= ~(AX_NETCFG_FULLDUPLEX|AX_NETCFG_PORTSEL|
941 AX_NETCFG_PCS|AX_NETCFG_SCRAMBLER|AX_NETCFG_SPEEDSEL);
942
943 if (IFM_SUBTYPE(media) == IFM_100_T4) {
944 if (verbose)
945 printf("100Mbps/T4, half-duplex\n");
946 mode |= AX_NETCFG_PORTSEL|AX_NETCFG_PCS|AX_NETCFG_SCRAMBLER;
947 }
948
949 if (IFM_SUBTYPE(media) == IFM_100_TX) {
950 if (verbose)
951 printf("100Mbps, ");
952 mode |= AX_NETCFG_PORTSEL|AX_NETCFG_PCS|AX_NETCFG_SCRAMBLER;
953 }
954
955 if (IFM_SUBTYPE(media) == IFM_10_T) {
956 if (verbose)
957 printf("10Mbps, ");
958 mode &= ~AX_NETCFG_PORTSEL;
959 mode |= AX_NETCFG_SPEEDSEL;
960 }
961
962 if ((media & IFM_GMASK) == IFM_FDX) {
963 if (verbose)
964 printf("full duplex\n");
965 mode |= AX_NETCFG_FULLDUPLEX;
966 } else {
967 if (verbose)
968 printf("half duplex\n");
969 mode &= ~AX_NETCFG_FULLDUPLEX;
970 }
971
972 CSR_WRITE_4(sc, AX_NETCFG, mode);
973
974 return;
975 }
976
977 /*
978 * In order to fiddle with the
979 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
980 * first have to put the transmit and/or receive logic in the idle state.
981 */
982 static void ax_setcfg(sc, bmcr)
983 struct ax_softc *sc;
984 int bmcr;
985 {
986 int i, restart = 0;
987
988 if (CSR_READ_4(sc, AX_NETCFG) & (AX_NETCFG_TX_ON|AX_NETCFG_RX_ON)) {
989 restart = 1;
990 AX_CLRBIT(sc, AX_NETCFG, (AX_NETCFG_TX_ON|AX_NETCFG_RX_ON));
991
992 for (i = 0; i < AX_TIMEOUT; i++) {
993 DELAY(10);
994 if (CSR_READ_4(sc, AX_ISR) & AX_ISR_TX_IDLE)
995 break;
996 }
997
998 if (i == AX_TIMEOUT)
999 printf("ax%d: failed to force tx and "
1000 "rx to idle state\n", sc->ax_unit);
1001
1002 }
1003
1004 if (bmcr & PHY_BMCR_SPEEDSEL)
1005 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_SPEEDSEL);
1006 else
1007 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_SPEEDSEL);
1008
1009 if (bmcr & PHY_BMCR_DUPLEX)
1010 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_FULLDUPLEX);
1011 else
1012 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_FULLDUPLEX);
1013
1014 if (restart)
1015 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_TX_ON|AX_NETCFG_RX_ON);
1016
1017 return;
1018 }
1019
1020 static void ax_reset(sc)
1021 struct ax_softc *sc;
1022 {
1023 register int i;
1024
1025 AX_SETBIT(sc, AX_BUSCTL, AX_BUSCTL_RESET);
1026
1027 for (i = 0; i < AX_TIMEOUT; i++) {
1028 DELAY(10);
1029 if (!(CSR_READ_4(sc, AX_BUSCTL) & AX_BUSCTL_RESET))
1030 break;
1031 }
1032 #ifdef notdef
1033 if (i == AX_TIMEOUT)
1034 printf("ax%d: reset never completed!\n", sc->ax_unit);
1035 #endif
1036 CSR_WRITE_4(sc, AX_BUSCTL, AX_BUSCTL_CONFIG);
1037
1038 /* Wait a little while for the chip to get its brains in order. */
1039 DELAY(1000);
1040 return;
1041 }
1042
1043 /*
1044 * Probe for an ASIX chip. Check the PCI vendor and device
1045 * IDs against our list and return a device name if we find a match.
1046 */
1047 static const char *
1048 ax_probe(config_id, device_id)
1049 pcici_t config_id;
1050 pcidi_t device_id;
1051 {
1052 struct ax_type *t;
1053 u_int32_t rev;
1054
1055 t = ax_devs;
1056
1057 while(t->ax_name != NULL) {
1058 if ((device_id & 0xFFFF) == t->ax_vid &&
1059 ((device_id >> 16) & 0xFFFF) == t->ax_did) {
1060 /* Check the PCI revision */
1061 rev = pci_conf_read(config_id, AX_PCI_REVID) & 0xFF;
1062 if (rev >= AX_REVISION_88141)
1063 t++;
1064 return(t->ax_name);
1065 }
1066 t++;
1067 }
1068
1069 return(NULL);
1070 }
1071
1072 /*
1073 * Attach the interface. Allocate softc structures, do ifmedia
1074 * setup and ethernet/BPF attach.
1075 */
1076 static void
1077 ax_attach(config_id, unit)
1078 pcici_t config_id;
1079 int unit;
1080 {
1081 int s, i;
1082 #ifndef AX_USEIOSPACE
1083 vm_offset_t pbase, vbase;
1084 #endif
1085 u_char eaddr[ETHER_ADDR_LEN];
1086 u_int32_t command;
1087 struct ax_softc *sc;
1088 struct ifnet *ifp;
1089 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1090 unsigned int round;
1091 caddr_t roundptr;
1092 struct ax_type *p;
1093 u_int16_t phy_vid, phy_did, phy_sts;
1094
1095 s = splimp();
1096
1097 sc = malloc(sizeof(struct ax_softc), M_DEVBUF, M_NOWAIT);
1098 if (sc == NULL) {
1099 printf("ax%d: no memory for softc struct!\n", unit);
1100 goto fail;
1101 }
1102 bzero(sc, sizeof(struct ax_softc));
1103
1104 /*
1105 * Handle power management nonsense.
1106 */
1107
1108 command = pci_conf_read(config_id, AX_PCI_CAPID) & 0x000000FF;
1109 if (command == 0x01) {
1110
1111 command = pci_conf_read(config_id, AX_PCI_PWRMGMTCTRL);
1112 if (command & AX_PSTATE_MASK) {
1113 u_int32_t iobase, membase, irq;
1114
1115 /* Save important PCI config data. */
1116 iobase = pci_conf_read(config_id, AX_PCI_LOIO);
1117 membase = pci_conf_read(config_id, AX_PCI_LOMEM);
1118 irq = pci_conf_read(config_id, AX_PCI_INTLINE);
1119
1120 /* Reset the power state. */
1121 printf("ax%d: chip is in D%d power mode "
1122 "-- setting to D0\n", unit, command & AX_PSTATE_MASK);
1123 command &= 0xFFFFFFFC;
1124 pci_conf_write(config_id, AX_PCI_PWRMGMTCTRL, command);
1125
1126 /* Restore PCI config data. */
1127 pci_conf_write(config_id, AX_PCI_LOIO, iobase);
1128 pci_conf_write(config_id, AX_PCI_LOMEM, membase);
1129 pci_conf_write(config_id, AX_PCI_INTLINE, irq);
1130 }
1131 }
1132
1133 /*
1134 * Map control/status registers.
1135 */
1136 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1137 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1138 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command);
1139 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1140
1141 #ifdef AX_USEIOSPACE
1142 if (!(command & PCIM_CMD_PORTEN)) {
1143 printf("ax%d: failed to enable I/O ports!\n", unit);
1144 free(sc, M_DEVBUF);
1145 goto fail;
1146 }
1147
1148 if (!pci_map_port(config_id, AX_PCI_LOIO,
1149 (u_short *)&(sc->ax_bhandle))) {
1150 printf ("ax%d: couldn't map ports\n", unit);
1151 goto fail;
1152 }
1153 #ifdef __i386__
1154 sc->ax_btag = I386_BUS_SPACE_IO;
1155 #endif
1156 #ifdef __alpha__
1157 sc->ax_btag = ALPHA_BUS_SPACE_IO;
1158 #endif
1159 #else
1160 if (!(command & PCIM_CMD_MEMEN)) {
1161 printf("ax%d: failed to enable memory mapping!\n", unit);
1162 goto fail;
1163 }
1164
1165 if (!pci_map_mem(config_id, AX_PCI_LOMEM, &vbase, &pbase)) {
1166 printf ("ax%d: couldn't map memory\n", unit);
1167 goto fail;
1168 }
1169 #ifdef __i386__
1170 sc->ax_btag = I386_BUS_SPACE_MEM;
1171 #endif
1172 #ifdef __alpha__
1173 sc->ax_btag = ALPHA_BUS_SPACE_MEM;
1174 #endif
1175 sc->ax_bhandle = vbase;
1176 #endif
1177
1178 /* Allocate interrupt */
1179 if (!pci_map_int(config_id, ax_intr, sc, &net_imask)) {
1180 printf("ax%d: couldn't map interrupt\n", unit);
1181 goto fail;
1182 }
1183
1184 /* Reset the adapter. */
1185 ax_reset(sc);
1186
1187 /*
1188 * Get station address from the EEPROM.
1189 */
1190 ax_read_eeprom(sc, (caddr_t)&eaddr, AX_EE_NODEADDR, 3, 0);
1191
1192 /*
1193 * An ASIX chip was detected. Inform the world.
1194 */
1195 printf("ax%d: Ethernet address: %6D\n", unit, eaddr, ":");
1196
1197 sc->ax_unit = unit;
1198 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1199
1200 sc->ax_ldata_ptr = malloc(sizeof(struct ax_list_data) + 8,
1201 M_DEVBUF, M_NOWAIT);
1202 if (sc->ax_ldata_ptr == NULL) {
1203 free(sc, M_DEVBUF);
1204 printf("ax%d: no memory for list buffers!\n", unit);
1205 goto fail;
1206 }
1207
1208 sc->ax_ldata = (struct ax_list_data *)sc->ax_ldata_ptr;
1209 round = (unsigned int)sc->ax_ldata_ptr & 0xF;
1210 roundptr = sc->ax_ldata_ptr;
1211 for (i = 0; i < 8; i++) {
1212 if (round % 8) {
1213 round++;
1214 roundptr++;
1215 } else
1216 break;
1217 }
1218 sc->ax_ldata = (struct ax_list_data *)roundptr;
1219 bzero(sc->ax_ldata, sizeof(struct ax_list_data));
1220
1221 ifp = &sc->arpcom.ac_if;
1222 ifp->if_softc = sc;
1223 ifp->if_unit = unit;
1224 ifp->if_name = "ax";
1225 ifp->if_mtu = ETHERMTU;
1226 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1227 ifp->if_ioctl = ax_ioctl;
1228 ifp->if_output = ether_output;
1229 ifp->if_start = ax_start;
1230 ifp->if_watchdog = ax_watchdog;
1231 ifp->if_init = ax_init;
1232 ifp->if_baudrate = 10000000;
1233 ifp->if_snd.ifq_maxlen = AX_TX_LIST_CNT - 1;
1234
1235 if (bootverbose)
1236 printf("ax%d: probing for a PHY\n", sc->ax_unit);
1237 for (i = AX_PHYADDR_MIN; i < AX_PHYADDR_MAX + 1; i++) {
1238 if (bootverbose)
1239 printf("ax%d: checking address: %d\n",
1240 sc->ax_unit, i);
1241 sc->ax_phy_addr = i;
1242 ax_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
1243 DELAY(500);
1244 while(ax_phy_readreg(sc, PHY_BMCR)
1245 & PHY_BMCR_RESET);
1246 if ((phy_sts = ax_phy_readreg(sc, PHY_BMSR)))
1247 break;
1248 }
1249 if (phy_sts) {
1250 phy_vid = ax_phy_readreg(sc, PHY_VENID);
1251 phy_did = ax_phy_readreg(sc, PHY_DEVID);
1252 if (bootverbose)
1253 printf("ax%d: found PHY at address %d, ",
1254 sc->ax_unit, sc->ax_phy_addr);
1255 if (bootverbose)
1256 printf("vendor id: %x device id: %x\n",
1257 phy_vid, phy_did);
1258 p = ax_phys;
1259 while(p->ax_vid) {
1260 if (phy_vid == p->ax_vid &&
1261 (phy_did | 0x000F) == p->ax_did) {
1262 sc->ax_pinfo = p;
1263 break;
1264 }
1265 p++;
1266 }
1267 if (sc->ax_pinfo == NULL)
1268 sc->ax_pinfo = &ax_phys[PHY_UNKNOWN];
1269 if (bootverbose)
1270 printf("ax%d: PHY type: %s\n",
1271 sc->ax_unit, sc->ax_pinfo->ax_name);
1272 } else {
1273 #ifdef DIAGNOSTIC
1274 printf("ax%d: MII without any phy!\n", sc->ax_unit);
1275 #endif
1276 }
1277
1278 /*
1279 * Do ifmedia setup.
1280 */
1281 ifmedia_init(&sc->ifmedia, 0, ax_ifmedia_upd, ax_ifmedia_sts);
1282
1283 if (sc->ax_pinfo != NULL) {
1284 ax_getmode_mii(sc);
1285 ax_autoneg_mii(sc, AX_FLAG_FORCEDELAY, 1);
1286 } else {
1287 ifmedia_add(&sc->ifmedia,
1288 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1289 ifmedia_add(&sc->ifmedia,
1290 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1291 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1292 ifmedia_add(&sc->ifmedia,
1293 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
1294 ifmedia_add(&sc->ifmedia,
1295 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
1296 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
1297 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1298 }
1299
1300 media = sc->ifmedia.ifm_media;
1301 ax_stop(sc);
1302
1303 ifmedia_set(&sc->ifmedia, media);
1304
1305 /*
1306 * Call MI attach routines.
1307 */
1308 if_attach(ifp);
1309 ether_ifattach(ifp);
1310
1311 #if NBPFILTER > 0
1312 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1313 #endif
1314 at_shutdown(ax_shutdown, sc, SHUTDOWN_POST_SYNC);
1315
1316 fail:
1317 splx(s);
1318 return;
1319 }
1320
1321 /*
1322 * Initialize the transmit descriptors.
1323 */
1324 static int ax_list_tx_init(sc)
1325 struct ax_softc *sc;
1326 {
1327 struct ax_chain_data *cd;
1328 struct ax_list_data *ld;
1329 int i;
1330
1331 cd = &sc->ax_cdata;
1332 ld = sc->ax_ldata;
1333 for (i = 0; i < AX_TX_LIST_CNT; i++) {
1334 cd->ax_tx_chain[i].ax_ptr = &ld->ax_tx_list[i];
1335 if (i == (AX_TX_LIST_CNT - 1))
1336 cd->ax_tx_chain[i].ax_nextdesc =
1337 &cd->ax_tx_chain[0];
1338 else
1339 cd->ax_tx_chain[i].ax_nextdesc =
1340 &cd->ax_tx_chain[i + 1];
1341 }
1342
1343 cd->ax_tx_free = &cd->ax_tx_chain[0];
1344 cd->ax_tx_tail = cd->ax_tx_head = NULL;
1345
1346 return(0);
1347 }
1348
1349
1350 /*
1351 * Initialize the RX descriptors and allocate mbufs for them. Note that
1352 * we arrange the descriptors in a closed ring, so that the last descriptor
1353 * points back to the first.
1354 */
1355 static int ax_list_rx_init(sc)
1356 struct ax_softc *sc;
1357 {
1358 struct ax_chain_data *cd;
1359 struct ax_list_data *ld;
1360 int i;
1361
1362 cd = &sc->ax_cdata;
1363 ld = sc->ax_ldata;
1364
1365 for (i = 0; i < AX_RX_LIST_CNT; i++) {
1366 cd->ax_rx_chain[i].ax_ptr =
1367 (volatile struct ax_desc *)&ld->ax_rx_list[i];
1368 if (ax_newbuf(sc, &cd->ax_rx_chain[i]) == ENOBUFS)
1369 return(ENOBUFS);
1370 if (i == (AX_RX_LIST_CNT - 1)) {
1371 cd->ax_rx_chain[i].ax_nextdesc =
1372 &cd->ax_rx_chain[0];
1373 ld->ax_rx_list[i].ax_next =
1374 vtophys(&ld->ax_rx_list[0]);
1375 } else {
1376 cd->ax_rx_chain[i].ax_nextdesc =
1377 &cd->ax_rx_chain[i + 1];
1378 ld->ax_rx_list[i].ax_next =
1379 vtophys(&ld->ax_rx_list[i + 1]);
1380 }
1381 }
1382
1383 cd->ax_rx_head = &cd->ax_rx_chain[0];
1384
1385 return(0);
1386 }
1387
1388 /*
1389 * Initialize an RX descriptor and attach an MBUF cluster.
1390 * Note: the length fields are only 11 bits wide, which means the
1391 * largest size we can specify is 2047. This is important because
1392 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1393 * overflow the field and make a mess.
1394 */
1395 static int ax_newbuf(sc, c)
1396 struct ax_softc *sc;
1397 struct ax_chain_onefrag *c;
1398 {
1399 struct mbuf *m_new = NULL;
1400
1401 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1402 if (m_new == NULL) {
1403 printf("ax%d: no memory for rx list -- packet dropped!\n",
1404 sc->ax_unit);
1405 return(ENOBUFS);
1406 }
1407
1408 MCLGET(m_new, M_DONTWAIT);
1409 if (!(m_new->m_flags & M_EXT)) {
1410 printf("ax%d: no memory for rx list -- packet dropped!\n",
1411 sc->ax_unit);
1412 m_freem(m_new);
1413 return(ENOBUFS);
1414 }
1415
1416 c->ax_mbuf = m_new;
1417 c->ax_ptr->ax_status = AX_RXSTAT;
1418 c->ax_ptr->ax_data = vtophys(mtod(m_new, caddr_t));
1419 c->ax_ptr->ax_ctl = MCLBYTES - 1;
1420
1421 return(0);
1422 }
1423
1424 /*
1425 * A frame has been uploaded: pass the resulting mbuf chain up to
1426 * the higher level protocols.
1427 */
1428 static void ax_rxeof(sc)
1429 struct ax_softc *sc;
1430 {
1431 struct ether_header *eh;
1432 struct mbuf *m;
1433 struct ifnet *ifp;
1434 struct ax_chain_onefrag *cur_rx;
1435 int total_len = 0;
1436 u_int32_t rxstat;
1437
1438 ifp = &sc->arpcom.ac_if;
1439
1440 while(!((rxstat = sc->ax_cdata.ax_rx_head->ax_ptr->ax_status) &
1441 AX_RXSTAT_OWN)) {
1442 #ifdef __alpha__
1443 struct mbuf *m0 = NULL;
1444 #endif
1445 cur_rx = sc->ax_cdata.ax_rx_head;
1446 sc->ax_cdata.ax_rx_head = cur_rx->ax_nextdesc;
1447
1448 /*
1449 * If an error occurs, update stats, clear the
1450 * status word and leave the mbuf cluster in place:
1451 * it should simply get re-used next time this descriptor
1452 * comes up in the ring.
1453 */
1454 if (rxstat & AX_RXSTAT_RXERR) {
1455 ifp->if_ierrors++;
1456 if (rxstat & AX_RXSTAT_COLLSEEN)
1457 ifp->if_collisions++;
1458 cur_rx->ax_ptr->ax_status = AX_RXSTAT;
1459 cur_rx->ax_ptr->ax_ctl = (MCLBYTES - 1);
1460 continue;
1461 }
1462
1463 /* No errors; receive the packet. */
1464 m = cur_rx->ax_mbuf;
1465 total_len = AX_RXBYTES(cur_rx->ax_ptr->ax_status);
1466
1467 total_len -= ETHER_CRC_LEN;
1468
1469 #ifdef __alpha__
1470 /*
1471 * Try to conjure up a new mbuf cluster. If that
1472 * fails, it means we have an out of memory condition and
1473 * should leave the buffer in place and continue. This will
1474 * result in a lost packet, but there's little else we
1475 * can do in this situation.
1476 */
1477 if (ax_newbuf(sc, cur_rx) == ENOBUFS) {
1478 ifp->if_ierrors++;
1479 cur_rx->ax_ptr->ax_status = AX_RXSTAT;
1480 cur_rx->ax_ptr->ax_ctl = (MCLBYTES - 1);
1481 continue;
1482 }
1483
1484 /*
1485 * Sadly, the ASIX chip doesn't decode the last few
1486 * bits of the RX DMA buffer address, so we have to
1487 * cheat in order to obtain proper payload alignment
1488 * on the alpha.
1489 */
1490 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1491 if (m0 == NULL) {
1492 ifp->if_ierrors++;
1493 cur_rx->ax_ptr->ax_status = AX_RXSTAT;
1494 cur_rx->ax_ptr->ax_ctl = (MCLBYTES - 1);
1495 continue;
1496 }
1497
1498 m0->m_data += 2;
1499 if (total_len <= (MHLEN - 2)) {
1500 bcopy(mtod(m, caddr_t), mtod(m0, caddr_t), total_len); m_freem(m);
1501 m = m0;
1502 m->m_pkthdr.len = m->m_len = total_len;
1503 } else {
1504 bcopy(mtod(m, caddr_t), mtod(m0, caddr_t), (MHLEN - 2));
1505 m->m_len = total_len - (MHLEN - 2);
1506 m->m_data += (MHLEN - 2);
1507 m0->m_next = m;
1508 m0->m_len = (MHLEN - 2);
1509 m = m0;
1510 m->m_pkthdr.len = total_len;
1511 }
1512 m->m_pkthdr.rcvif = ifp;
1513 #else
1514 if (total_len < MINCLSIZE) {
1515 m = m_devget(mtod(cur_rx->ax_mbuf, char *),
1516 total_len, 0, ifp, NULL);
1517 cur_rx->ax_ptr->ax_status = AX_RXSTAT;
1518 cur_rx->ax_ptr->ax_ctl = (MCLBYTES - 1);
1519 if (m == NULL) {
1520 ifp->if_ierrors++;
1521 continue;
1522 }
1523 } else {
1524 m = cur_rx->ax_mbuf;
1525 /*
1526 * Try to conjure up a new mbuf cluster. If that
1527 * fails, it means we have an out of memory condition and
1528 * should leave the buffer in place and continue. This will
1529 * result in a lost packet, but there's little else we
1530 * can do in this situation.
1531 */
1532 if (ax_newbuf(sc, cur_rx) == ENOBUFS) {
1533 ifp->if_ierrors++;
1534 cur_rx->ax_ptr->ax_status = AX_RXSTAT;
1535 cur_rx->ax_ptr->ax_ctl = (MCLBYTES - 1);
1536 continue;
1537 }
1538 m->m_pkthdr.rcvif = ifp;
1539 m->m_pkthdr.len = m->m_len = total_len;
1540 }
1541 #endif
1542
1543 ifp->if_ipackets++;
1544 eh = mtod(m, struct ether_header *);
1545 #if NBPFILTER > 0
1546 /*
1547 * Handle BPF listeners. Let the BPF user see the packet, but
1548 * don't pass it up to the ether_input() layer unless it's
1549 * a broadcast packet, multicast packet, matches our ethernet
1550 * address or the interface is in promiscuous mode.
1551 */
1552 if (ifp->if_bpf) {
1553 bpf_mtap(ifp, m);
1554 if (ifp->if_flags & IFF_PROMISC &&
1555 (bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1556 ETHER_ADDR_LEN) &&
1557 (eh->ether_dhost[0] & 1) == 0)) {
1558 m_freem(m);
1559 continue;
1560 }
1561 }
1562 #endif
1563 /* Remove header from mbuf and pass it on. */
1564 m_adj(m, sizeof(struct ether_header));
1565 ether_input(ifp, eh, m);
1566 }
1567
1568 return;
1569 }
1570
1571 void ax_rxeoc(sc)
1572 struct ax_softc *sc;
1573 {
1574
1575 ax_rxeof(sc);
1576 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_RX_ON);
1577 CSR_WRITE_4(sc, AX_RXADDR, vtophys(sc->ax_cdata.ax_rx_head->ax_ptr));
1578 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_RX_ON);
1579 CSR_WRITE_4(sc, AX_RXSTART, 0xFFFFFFFF);
1580
1581 return;
1582 }
1583
1584 /*
1585 * A frame was downloaded to the chip. It's safe for us to clean up
1586 * the list buffers.
1587 */
1588
1589 static void ax_txeof(sc)
1590 struct ax_softc *sc;
1591 {
1592 struct ax_chain *cur_tx;
1593 struct ifnet *ifp;
1594
1595 ifp = &sc->arpcom.ac_if;
1596
1597 /* Clear the timeout timer. */
1598 ifp->if_timer = 0;
1599
1600 if (sc->ax_cdata.ax_tx_head == NULL)
1601 return;
1602
1603 /*
1604 * Go through our tx list and free mbufs for those
1605 * frames that have been transmitted.
1606 */
1607 while(sc->ax_cdata.ax_tx_head->ax_mbuf != NULL) {
1608 u_int32_t txstat;
1609
1610 cur_tx = sc->ax_cdata.ax_tx_head;
1611 txstat = AX_TXSTATUS(cur_tx);
1612
1613 if (txstat & AX_TXSTAT_OWN)
1614 break;
1615
1616 if (txstat & AX_TXSTAT_ERRSUM) {
1617 ifp->if_oerrors++;
1618 if (txstat & AX_TXSTAT_EXCESSCOLL)
1619 ifp->if_collisions++;
1620 if (txstat & AX_TXSTAT_LATECOLL)
1621 ifp->if_collisions++;
1622 }
1623
1624 ifp->if_collisions += (txstat & AX_TXSTAT_COLLCNT) >> 3;
1625
1626 ifp->if_opackets++;
1627 m_freem(cur_tx->ax_mbuf);
1628 cur_tx->ax_mbuf = NULL;
1629
1630 if (sc->ax_cdata.ax_tx_head == sc->ax_cdata.ax_tx_tail) {
1631 sc->ax_cdata.ax_tx_head = NULL;
1632 sc->ax_cdata.ax_tx_tail = NULL;
1633 break;
1634 }
1635
1636 sc->ax_cdata.ax_tx_head = cur_tx->ax_nextdesc;
1637 }
1638
1639 return;
1640 }
1641
1642 /*
1643 * TX 'end of channel' interrupt handler.
1644 */
1645 static void ax_txeoc(sc)
1646 struct ax_softc *sc;
1647 {
1648 struct ifnet *ifp;
1649
1650 ifp = &sc->arpcom.ac_if;
1651
1652 ifp->if_timer = 0;
1653
1654 if (sc->ax_cdata.ax_tx_head == NULL) {
1655 ifp->if_flags &= ~IFF_OACTIVE;
1656 sc->ax_cdata.ax_tx_tail = NULL;
1657 if (sc->ax_want_auto)
1658 ax_autoneg_mii(sc, AX_FLAG_DELAYTIMEO, 1);
1659 }
1660
1661 return;
1662 }
1663
1664 static void ax_intr(arg)
1665 void *arg;
1666 {
1667 struct ax_softc *sc;
1668 struct ifnet *ifp;
1669 u_int32_t status;
1670
1671 sc = arg;
1672 ifp = &sc->arpcom.ac_if;
1673
1674 /* Supress unwanted interrupts */
1675 if (!(ifp->if_flags & IFF_UP)) {
1676 ax_stop(sc);
1677 return;
1678 }
1679
1680 /* Disable interrupts. */
1681 CSR_WRITE_4(sc, AX_IMR, 0x00000000);
1682
1683 for (;;) {
1684 status = CSR_READ_4(sc, AX_ISR);
1685 if (status)
1686 CSR_WRITE_4(sc, AX_ISR, status);
1687
1688 if ((status & AX_INTRS) == 0)
1689 break;
1690
1691 if ((status & AX_ISR_TX_OK) || (status & AX_ISR_TX_EARLY))
1692 ax_txeof(sc);
1693
1694 if (status & AX_ISR_TX_NOBUF)
1695 ax_txeoc(sc);
1696
1697 if (status & AX_ISR_TX_IDLE) {
1698 ax_txeof(sc);
1699 if (sc->ax_cdata.ax_tx_head != NULL) {
1700 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_TX_ON);
1701 CSR_WRITE_4(sc, AX_TXSTART, 0xFFFFFFFF);
1702 }
1703 }
1704
1705 if (status & AX_ISR_TX_UNDERRUN) {
1706 u_int32_t cfg;
1707 cfg = CSR_READ_4(sc, AX_NETCFG);
1708 if ((cfg & AX_NETCFG_TX_THRESH) == AX_TXTHRESH_160BYTES)
1709 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_STORENFWD);
1710 else
1711 CSR_WRITE_4(sc, AX_NETCFG, cfg + 0x4000);
1712 }
1713
1714 if (status & AX_ISR_RX_OK)
1715 ax_rxeof(sc);
1716
1717 if ((status & AX_ISR_RX_WATDOGTIMEO)
1718 || (status & AX_ISR_RX_NOBUF))
1719 ax_rxeoc(sc);
1720
1721 if (status & AX_ISR_BUS_ERR) {
1722 ax_reset(sc);
1723 ax_init(sc);
1724 }
1725 }
1726
1727 /* Re-enable interrupts. */
1728 CSR_WRITE_4(sc, AX_IMR, AX_INTRS);
1729
1730 if (ifp->if_snd.ifq_head != NULL) {
1731 ax_start(ifp);
1732 }
1733
1734 return;
1735 }
1736
1737 /*
1738 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1739 * pointers to the fragment pointers.
1740 */
1741 static int ax_encap(sc, c, m_head)
1742 struct ax_softc *sc;
1743 struct ax_chain *c;
1744 struct mbuf *m_head;
1745 {
1746 int frag = 0;
1747 volatile struct ax_desc *f = NULL;
1748 int total_len;
1749 struct mbuf *m;
1750
1751 /*
1752 * Start packing the mbufs in this chain into
1753 * the fragment pointers. Stop when we run out
1754 * of fragments or hit the end of the mbuf chain.
1755 */
1756 m = m_head;
1757 total_len = 0;
1758
1759 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1760 if (m->m_len != 0) {
1761 if (frag == AX_MAXFRAGS)
1762 break;
1763 total_len += m->m_len;
1764 f = &c->ax_ptr->ax_frag[frag];
1765 f->ax_ctl = m->m_len;
1766 if (frag == 0) {
1767 f->ax_status = 0;
1768 f->ax_ctl |= AX_TXCTL_FIRSTFRAG;
1769 } else
1770 f->ax_status = AX_TXSTAT_OWN;
1771 f->ax_next = vtophys(&c->ax_ptr->ax_frag[frag + 1]);
1772 f->ax_data = vtophys(mtod(m, vm_offset_t));
1773 frag++;
1774 }
1775 }
1776
1777 /*
1778 * Handle special case: we ran out of fragments,
1779 * but we have more mbufs left in the chain. Copy the
1780 * data into an mbuf cluster. Note that we don't
1781 * bother clearing the values in the other fragment
1782 * pointers/counters; it wouldn't gain us anything,
1783 * and would waste cycles.
1784 */
1785 if (m != NULL) {
1786 struct mbuf *m_new = NULL;
1787
1788 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1789 if (m_new == NULL) {
1790 printf("ax%d: no memory for tx list", sc->ax_unit);
1791 return(1);
1792 }
1793 if (m_head->m_pkthdr.len > MHLEN) {
1794 MCLGET(m_new, M_DONTWAIT);
1795 if (!(m_new->m_flags & M_EXT)) {
1796 m_freem(m_new);
1797 printf("ax%d: no memory for tx list",
1798 sc->ax_unit);
1799 return(1);
1800 }
1801 }
1802 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1803 mtod(m_new, caddr_t));
1804 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1805 m_freem(m_head);
1806 m_head = m_new;
1807 f = &c->ax_ptr->ax_frag[0];
1808 f->ax_status = 0;
1809 f->ax_data = vtophys(mtod(m_new, caddr_t));
1810 f->ax_ctl = total_len = m_new->m_len;
1811 f->ax_ctl |= AX_TXCTL_FIRSTFRAG;
1812 frag = 1;
1813 }
1814
1815 c->ax_mbuf = m_head;
1816 c->ax_lastdesc = frag - 1;
1817 AX_TXCTL(c) |= AX_TXCTL_LASTFRAG|AX_TXCTL_FINT;
1818 c->ax_ptr->ax_frag[0].ax_ctl |= AX_TXCTL_FINT;
1819 AX_TXNEXT(c) = vtophys(&c->ax_nextdesc->ax_ptr->ax_frag[0]);
1820 return(0);
1821 }
1822
1823 /*
1824 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1825 * to the mbuf data regions directly in the transmit lists. We also save a
1826 * copy of the pointers since the transmit list fragment pointers are
1827 * physical addresses.
1828 */
1829
1830 static void ax_start(ifp)
1831 struct ifnet *ifp;
1832 {
1833 struct ax_softc *sc;
1834 struct mbuf *m_head = NULL;
1835 struct ax_chain *cur_tx = NULL, *start_tx;
1836
1837 sc = ifp->if_softc;
1838
1839 if (sc->ax_autoneg) {
1840 sc->ax_tx_pend = 1;
1841 return;
1842 }
1843
1844 /*
1845 * Check for an available queue slot. If there are none,
1846 * punt.
1847 */
1848 if (sc->ax_cdata.ax_tx_free->ax_mbuf != NULL) {
1849 ifp->if_flags |= IFF_OACTIVE;
1850 return;
1851 }
1852
1853 start_tx = sc->ax_cdata.ax_tx_free;
1854
1855 while(sc->ax_cdata.ax_tx_free->ax_mbuf == NULL) {
1856 IF_DEQUEUE(&ifp->if_snd, m_head);
1857 if (m_head == NULL)
1858 break;
1859
1860 /* Pick a descriptor off the free list. */
1861 cur_tx = sc->ax_cdata.ax_tx_free;
1862 sc->ax_cdata.ax_tx_free = cur_tx->ax_nextdesc;
1863
1864 /* Pack the data into the descriptor. */
1865 ax_encap(sc, cur_tx, m_head);
1866 if (cur_tx != start_tx)
1867 AX_TXOWN(cur_tx) = AX_TXSTAT_OWN;
1868
1869 #if NBPFILTER > 0
1870 /*
1871 * If there's a BPF listener, bounce a copy of this frame
1872 * to him.
1873 */
1874 if (ifp->if_bpf)
1875 bpf_mtap(ifp, cur_tx->ax_mbuf);
1876 #endif
1877 AX_TXOWN(cur_tx) = AX_TXSTAT_OWN;
1878 CSR_WRITE_4(sc, AX_TXSTART, 0xFFFFFFFF);
1879 }
1880
1881 sc->ax_cdata.ax_tx_tail = cur_tx;
1882 if (sc->ax_cdata.ax_tx_head == NULL)
1883 sc->ax_cdata.ax_tx_head = start_tx;
1884
1885 /*
1886 * Set a timeout in case the chip goes out to lunch.
1887 */
1888 ifp->if_timer = 5;
1889
1890 return;
1891 }
1892
1893 static void ax_init(xsc)
1894 void *xsc;
1895 {
1896 struct ax_softc *sc = xsc;
1897 struct ifnet *ifp = &sc->arpcom.ac_if;
1898 u_int16_t phy_bmcr = 0;
1899 int s;
1900
1901 if (sc->ax_autoneg)
1902 return;
1903
1904 s = splimp();
1905
1906 if (sc->ax_pinfo != NULL)
1907 phy_bmcr = ax_phy_readreg(sc, PHY_BMCR);
1908
1909 /*
1910 * Cancel pending I/O and free all RX/TX buffers.
1911 */
1912 ax_stop(sc);
1913 ax_reset(sc);
1914
1915 /*
1916 * Set cache alignment and burst length.
1917 */
1918 CSR_WRITE_4(sc, AX_BUSCTL, AX_BUSCTL_CONFIG);
1919
1920 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_HEARTBEAT);
1921 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_STORENFWD);
1922
1923 if (sc->ax_pinfo != NULL) {
1924 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_PORTSEL);
1925 ax_setcfg(sc, ax_phy_readreg(sc, PHY_BMCR));
1926 } else
1927 ax_setmode(sc, sc->ifmedia.ifm_media, 0);
1928
1929 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_TX_THRESH);
1930 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_SPEEDSEL);
1931
1932 if (IFM_SUBTYPE(sc->ifmedia.ifm_media) == IFM_10_T)
1933 AX_SETBIT(sc, AX_NETCFG, AX_TXTHRESH_160BYTES);
1934 else
1935 AX_SETBIT(sc, AX_NETCFG, AX_TXTHRESH_72BYTES);
1936
1937 /* Init our MAC address */
1938 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_PAR0);
1939 CSR_WRITE_4(sc, AX_FILTDATA, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
1940 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_PAR1);
1941 CSR_WRITE_4(sc, AX_FILTDATA, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
1942
1943 /* Init circular RX list. */
1944 if (ax_list_rx_init(sc) == ENOBUFS) {
1945 printf("ax%d: initialization failed: no "
1946 "memory for rx buffers\n", sc->ax_unit);
1947 ax_stop(sc);
1948 (void)splx(s);
1949 return;
1950 }
1951
1952 /*
1953 * Init tx descriptors.
1954 */
1955 ax_list_tx_init(sc);
1956
1957 /* If we want promiscuous mode, set the allframes bit. */
1958 if (ifp->if_flags & IFF_PROMISC) {
1959 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_RX_PROMISC);
1960 } else {
1961 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_RX_PROMISC);
1962 }
1963
1964 /*
1965 * Set the capture broadcast bit to capture broadcast frames.
1966 */
1967 if (ifp->if_flags & IFF_BROADCAST) {
1968 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_RX_BROAD);
1969 } else {
1970 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_RX_BROAD);
1971 }
1972
1973 /*
1974 * Load the multicast filter.
1975 */
1976 ax_setmulti(sc);
1977
1978 /*
1979 * Load the address of the RX list.
1980 */
1981 CSR_WRITE_4(sc, AX_RXADDR, vtophys(sc->ax_cdata.ax_rx_head->ax_ptr));
1982 CSR_WRITE_4(sc, AX_TXADDR, vtophys(&sc->ax_ldata->ax_tx_list[0]));
1983
1984 /*
1985 * Enable interrupts.
1986 */
1987 CSR_WRITE_4(sc, AX_IMR, AX_INTRS);
1988 CSR_WRITE_4(sc, AX_ISR, 0xFFFFFFFF);
1989
1990 /* Enable receiver and transmitter. */
1991 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_TX_ON|AX_NETCFG_RX_ON);
1992 CSR_WRITE_4(sc, AX_RXSTART, 0xFFFFFFFF);
1993
1994 /* Restore state of BMCR */
1995 if (sc->ax_pinfo != NULL)
1996 ax_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1997
1998 ifp->if_flags |= IFF_RUNNING;
1999 ifp->if_flags &= ~IFF_OACTIVE;
2000
2001 (void)splx(s);
2002
2003 return;
2004 }
2005
2006 /*
2007 * Set media options.
2008 */
2009 static int ax_ifmedia_upd(ifp)
2010 struct ifnet *ifp;
2011 {
2012 struct ax_softc *sc;
2013 struct ifmedia *ifm;
2014
2015 sc = ifp->if_softc;
2016 ifm = &sc->ifmedia;
2017
2018 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2019 return(EINVAL);
2020
2021 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
2022 ax_autoneg_mii(sc, AX_FLAG_SCHEDDELAY, 1);
2023 else {
2024 if (sc->ax_pinfo == NULL)
2025 ax_setmode(sc, ifm->ifm_media, 1);
2026 else
2027 ax_setmode_mii(sc, ifm->ifm_media);
2028 }
2029
2030 return(0);
2031 }
2032
2033 /*
2034 * Report current media status.
2035 */
2036 static void ax_ifmedia_sts(ifp, ifmr)
2037 struct ifnet *ifp;
2038 struct ifmediareq *ifmr;
2039 {
2040 struct ax_softc *sc;
2041 u_int16_t advert = 0, ability = 0;
2042 u_int32_t media = 0;
2043
2044 sc = ifp->if_softc;
2045
2046 ifmr->ifm_active = IFM_ETHER;
2047
2048 if (sc->ax_pinfo == NULL) {
2049 media = CSR_READ_4(sc, AX_NETCFG);
2050 if (media & AX_NETCFG_PORTSEL)
2051 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
2052 else
2053 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2054 if (media & AX_NETCFG_FULLDUPLEX)
2055 ifmr->ifm_active |= IFM_FDX;
2056 else
2057 ifmr->ifm_active |= IFM_HDX;
2058 return;
2059 }
2060
2061 if (!(ax_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
2062 if (ax_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
2063 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
2064 else
2065 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2066 if (ax_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
2067 ifmr->ifm_active |= IFM_FDX;
2068 else
2069 ifmr->ifm_active |= IFM_HDX;
2070 return;
2071 }
2072
2073 ability = ax_phy_readreg(sc, PHY_LPAR);
2074 advert = ax_phy_readreg(sc, PHY_ANAR);
2075 if (advert & PHY_ANAR_100BT4 &&
2076 ability & PHY_ANAR_100BT4) {
2077 ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
2078 } else if (advert & PHY_ANAR_100BTXFULL &&
2079 ability & PHY_ANAR_100BTXFULL) {
2080 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
2081 } else if (advert & PHY_ANAR_100BTXHALF &&
2082 ability & PHY_ANAR_100BTXHALF) {
2083 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
2084 } else if (advert & PHY_ANAR_10BTFULL &&
2085 ability & PHY_ANAR_10BTFULL) {
2086 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
2087 } else if (advert & PHY_ANAR_10BTHALF &&
2088 ability & PHY_ANAR_10BTHALF) {
2089 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
2090 }
2091
2092 return;
2093 }
2094
2095 static int ax_ioctl(ifp, command, data)
2096 struct ifnet *ifp;
2097 u_long command;
2098 caddr_t data;
2099 {
2100 struct ax_softc *sc = ifp->if_softc;
2101 struct ifreq *ifr = (struct ifreq *) data;
2102 int s, error = 0;
2103
2104 s = splimp();
2105
2106 switch(command) {
2107 case SIOCSIFADDR:
2108 case SIOCGIFADDR:
2109 case SIOCSIFMTU:
2110 error = ether_ioctl(ifp, command, data);
2111 break;
2112 case SIOCSIFFLAGS:
2113 if (ifp->if_flags & IFF_UP) {
2114 ax_init(sc);
2115 } else {
2116 if (ifp->if_flags & IFF_RUNNING)
2117 ax_stop(sc);
2118 }
2119 error = 0;
2120 break;
2121 case SIOCADDMULTI:
2122 case SIOCDELMULTI:
2123 ax_setmulti(sc);
2124 error = 0;
2125 break;
2126 case SIOCGIFMEDIA:
2127 case SIOCSIFMEDIA:
2128 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2129 break;
2130 default:
2131 error = EINVAL;
2132 break;
2133 }
2134
2135 (void)splx(s);
2136
2137 return(error);
2138 }
2139
2140 static void ax_watchdog(ifp)
2141 struct ifnet *ifp;
2142 {
2143 struct ax_softc *sc;
2144
2145 sc = ifp->if_softc;
2146
2147 if (sc->ax_autoneg) {
2148 ax_autoneg_mii(sc, AX_FLAG_DELAYTIMEO, 1);
2149 return;
2150 }
2151
2152 ifp->if_oerrors++;
2153 printf("ax%d: watchdog timeout\n", sc->ax_unit);
2154
2155 if (sc->ax_pinfo != NULL) {
2156 if (!(ax_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
2157 printf("ax%d: no carrier - transceiver "
2158 "cable problem?\n", sc->ax_unit);
2159 }
2160
2161 ax_stop(sc);
2162 ax_reset(sc);
2163 ax_init(sc);
2164
2165 if (ifp->if_snd.ifq_head != NULL)
2166 ax_start(ifp);
2167
2168 return;
2169 }
2170
2171 /*
2172 * Stop the adapter and free any mbufs allocated to the
2173 * RX and TX lists.
2174 */
2175 static void ax_stop(sc)
2176 struct ax_softc *sc;
2177 {
2178 register int i;
2179 struct ifnet *ifp;
2180
2181 ifp = &sc->arpcom.ac_if;
2182 ifp->if_timer = 0;
2183
2184 AX_CLRBIT(sc, AX_NETCFG, (AX_NETCFG_RX_ON|AX_NETCFG_TX_ON));
2185 CSR_WRITE_4(sc, AX_IMR, 0x00000000);
2186 CSR_WRITE_4(sc, AX_TXADDR, 0x00000000);
2187 CSR_WRITE_4(sc, AX_RXADDR, 0x00000000);
2188
2189 /*
2190 * Free data in the RX lists.
2191 */
2192 for (i = 0; i < AX_RX_LIST_CNT; i++) {
2193 if (sc->ax_cdata.ax_rx_chain[i].ax_mbuf != NULL) {
2194 m_freem(sc->ax_cdata.ax_rx_chain[i].ax_mbuf);
2195 sc->ax_cdata.ax_rx_chain[i].ax_mbuf = NULL;
2196 }
2197 }
2198 bzero((char *)&sc->ax_ldata->ax_rx_list,
2199 sizeof(sc->ax_ldata->ax_rx_list));
2200
2201 /*
2202 * Free the TX list buffers.
2203 */
2204 for (i = 0; i < AX_TX_LIST_CNT; i++) {
2205 if (sc->ax_cdata.ax_tx_chain[i].ax_mbuf != NULL) {
2206 m_freem(sc->ax_cdata.ax_tx_chain[i].ax_mbuf);
2207 sc->ax_cdata.ax_tx_chain[i].ax_mbuf = NULL;
2208 }
2209 }
2210
2211 bzero((char *)&sc->ax_ldata->ax_tx_list,
2212 sizeof(sc->ax_ldata->ax_tx_list));
2213
2214 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2215
2216 return;
2217 }
2218
2219 /*
2220 * Stop all chip I/O so that the kernel's probe routines don't
2221 * get confused by errant DMAs when rebooting.
2222 */
2223 static void ax_shutdown(howto, arg)
2224 int howto;
2225 void *arg;
2226 {
2227 struct ax_softc *sc = (struct ax_softc *)arg;
2228
2229 ax_stop(sc);
2230
2231 return;
2232 }
2233
2234 static struct pci_device ax_device = {
2235 "ax",
2236 ax_probe,
2237 ax_attach,
2238 &ax_count,
2239 NULL
2240 };
2241 DATA_SET(pcidevice_set, ax_device);
Cache object: b1386e67854597f4ece81b6dd2764724
|