FreeBSD/Linux Kernel Cross Reference
sys/dev/wb/if_wb.c
1 /*-
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/8.2/sys/dev/wb/if_wb.c 214922 2010-11-07 16:56:29Z marius $");
35
36 /*
37 * Winbond fast ethernet PCI NIC driver
38 *
39 * Supports various cheap network adapters based on the Winbond W89C840F
40 * fast ethernet controller chip. This includes adapters manufactured by
41 * Winbond itself and some made by Linksys.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47 /*
48 * The Winbond W89C840F chip is a bus master; in some ways it resembles
49 * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has
50 * one major difference which is that while the registers do many of
51 * the same things as a tulip adapter, the offsets are different: where
52 * tulip registers are typically spaced 8 bytes apart, the Winbond
53 * registers are spaced 4 bytes apart. The receiver filter is also
54 * programmed differently.
55 *
56 * Like the tulip, the Winbond chip uses small descriptors containing
57 * a status word, a control word and 32-bit areas that can either be used
58 * to point to two external data blocks, or to point to a single block
59 * and another descriptor in a linked list. Descriptors can be grouped
60 * together in blocks to form fixed length rings or can be chained
61 * together in linked lists. A single packet may be spread out over
62 * several descriptors if necessary.
63 *
64 * For the receive ring, this driver uses a linked list of descriptors,
65 * each pointing to a single mbuf cluster buffer, which us large enough
66 * to hold an entire packet. The link list is looped back to created a
67 * closed ring.
68 *
69 * For transmission, the driver creates a linked list of 'super descriptors'
70 * which each contain several individual descriptors linked toghether.
71 * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we
72 * abuse as fragment pointers. This allows us to use a buffer managment
73 * scheme very similar to that used in the ThunderLAN and Etherlink XL
74 * drivers.
75 *
76 * Autonegotiation is performed using the external PHY via the MII bus.
77 * The sample boards I have all use a Davicom PHY.
78 *
79 * Note: the author of the Linux driver for the Winbond chip alludes
80 * to some sort of flaw in the chip's design that seems to mandate some
81 * drastic workaround which signigicantly impairs transmit performance.
82 * I have no idea what he's on about: transmit performance with all
83 * three of my test boards seems fine.
84 */
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/sockio.h>
89 #include <sys/mbuf.h>
90 #include <sys/malloc.h>
91 #include <sys/module.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/queue.h>
95
96 #include <net/if.h>
97 #include <net/if_arp.h>
98 #include <net/ethernet.h>
99 #include <net/if_dl.h>
100 #include <net/if_media.h>
101 #include <net/if_types.h>
102
103 #include <net/bpf.h>
104
105 #include <vm/vm.h> /* for vtophys */
106 #include <vm/pmap.h> /* for vtophys */
107 #include <machine/bus.h>
108 #include <machine/resource.h>
109 #include <sys/bus.h>
110 #include <sys/rman.h>
111
112 #include <dev/pci/pcireg.h>
113 #include <dev/pci/pcivar.h>
114
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117
118 /* "device miibus" required. See GENERIC if you get errors here. */
119 #include "miibus_if.h"
120
121 #define WB_USEIOSPACE
122
123 #include <dev/wb/if_wbreg.h>
124
125 MODULE_DEPEND(wb, pci, 1, 1, 1);
126 MODULE_DEPEND(wb, ether, 1, 1, 1);
127 MODULE_DEPEND(wb, miibus, 1, 1, 1);
128
129 /*
130 * Various supported device vendors/types and their names.
131 */
132 static struct wb_type wb_devs[] = {
133 { WB_VENDORID, WB_DEVICEID_840F,
134 "Winbond W89C840F 10/100BaseTX" },
135 { CP_VENDORID, CP_DEVICEID_RL100,
136 "Compex RL100-ATX 10/100baseTX" },
137 { 0, 0, NULL }
138 };
139
140 static int wb_probe(device_t);
141 static int wb_attach(device_t);
142 static int wb_detach(device_t);
143
144 static void wb_bfree(void *addr, void *args);
145 static int wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *,
146 struct mbuf *);
147 static int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *);
148
149 static void wb_rxeof(struct wb_softc *);
150 static void wb_rxeoc(struct wb_softc *);
151 static void wb_txeof(struct wb_softc *);
152 static void wb_txeoc(struct wb_softc *);
153 static void wb_intr(void *);
154 static void wb_tick(void *);
155 static void wb_start(struct ifnet *);
156 static void wb_start_locked(struct ifnet *);
157 static int wb_ioctl(struct ifnet *, u_long, caddr_t);
158 static void wb_init(void *);
159 static void wb_init_locked(struct wb_softc *);
160 static void wb_stop(struct wb_softc *);
161 static void wb_watchdog(struct ifnet *);
162 static int wb_shutdown(device_t);
163 static int wb_ifmedia_upd(struct ifnet *);
164 static void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *);
165
166 static void wb_eeprom_putbyte(struct wb_softc *, int);
167 static void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *);
168 static void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int);
169 static void wb_mii_sync(struct wb_softc *);
170 static void wb_mii_send(struct wb_softc *, u_int32_t, int);
171 static int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *);
172 static int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *);
173
174 static void wb_setcfg(struct wb_softc *, u_int32_t);
175 static void wb_setmulti(struct wb_softc *);
176 static void wb_reset(struct wb_softc *);
177 static void wb_fixmedia(struct wb_softc *);
178 static int wb_list_rx_init(struct wb_softc *);
179 static int wb_list_tx_init(struct wb_softc *);
180
181 static int wb_miibus_readreg(device_t, int, int);
182 static int wb_miibus_writereg(device_t, int, int, int);
183 static void wb_miibus_statchg(device_t);
184
185 #ifdef WB_USEIOSPACE
186 #define WB_RES SYS_RES_IOPORT
187 #define WB_RID WB_PCI_LOIO
188 #else
189 #define WB_RES SYS_RES_MEMORY
190 #define WB_RID WB_PCI_LOMEM
191 #endif
192
193 static device_method_t wb_methods[] = {
194 /* Device interface */
195 DEVMETHOD(device_probe, wb_probe),
196 DEVMETHOD(device_attach, wb_attach),
197 DEVMETHOD(device_detach, wb_detach),
198 DEVMETHOD(device_shutdown, wb_shutdown),
199
200 /* bus interface, for miibus */
201 DEVMETHOD(bus_print_child, bus_generic_print_child),
202 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
203
204 /* MII interface */
205 DEVMETHOD(miibus_readreg, wb_miibus_readreg),
206 DEVMETHOD(miibus_writereg, wb_miibus_writereg),
207 DEVMETHOD(miibus_statchg, wb_miibus_statchg),
208 { 0, 0 }
209 };
210
211 static driver_t wb_driver = {
212 "wb",
213 wb_methods,
214 sizeof(struct wb_softc)
215 };
216
217 static devclass_t wb_devclass;
218
219 DRIVER_MODULE(wb, pci, wb_driver, wb_devclass, 0, 0);
220 DRIVER_MODULE(miibus, wb, miibus_driver, miibus_devclass, 0, 0);
221
222 #define WB_SETBIT(sc, reg, x) \
223 CSR_WRITE_4(sc, reg, \
224 CSR_READ_4(sc, reg) | (x))
225
226 #define WB_CLRBIT(sc, reg, x) \
227 CSR_WRITE_4(sc, reg, \
228 CSR_READ_4(sc, reg) & ~(x))
229
230 #define SIO_SET(x) \
231 CSR_WRITE_4(sc, WB_SIO, \
232 CSR_READ_4(sc, WB_SIO) | (x))
233
234 #define SIO_CLR(x) \
235 CSR_WRITE_4(sc, WB_SIO, \
236 CSR_READ_4(sc, WB_SIO) & ~(x))
237
238 /*
239 * Send a read command and address to the EEPROM, check for ACK.
240 */
241 static void
242 wb_eeprom_putbyte(sc, addr)
243 struct wb_softc *sc;
244 int addr;
245 {
246 register int d, i;
247
248 d = addr | WB_EECMD_READ;
249
250 /*
251 * Feed in each bit and stobe the clock.
252 */
253 for (i = 0x400; i; i >>= 1) {
254 if (d & i) {
255 SIO_SET(WB_SIO_EE_DATAIN);
256 } else {
257 SIO_CLR(WB_SIO_EE_DATAIN);
258 }
259 DELAY(100);
260 SIO_SET(WB_SIO_EE_CLK);
261 DELAY(150);
262 SIO_CLR(WB_SIO_EE_CLK);
263 DELAY(100);
264 }
265
266 return;
267 }
268
269 /*
270 * Read a word of data stored in the EEPROM at address 'addr.'
271 */
272 static void
273 wb_eeprom_getword(sc, addr, dest)
274 struct wb_softc *sc;
275 int addr;
276 u_int16_t *dest;
277 {
278 register int i;
279 u_int16_t word = 0;
280
281 /* Enter EEPROM access mode. */
282 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
283
284 /*
285 * Send address of word we want to read.
286 */
287 wb_eeprom_putbyte(sc, addr);
288
289 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
290
291 /*
292 * Start reading bits from EEPROM.
293 */
294 for (i = 0x8000; i; i >>= 1) {
295 SIO_SET(WB_SIO_EE_CLK);
296 DELAY(100);
297 if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT)
298 word |= i;
299 SIO_CLR(WB_SIO_EE_CLK);
300 DELAY(100);
301 }
302
303 /* Turn off EEPROM access mode. */
304 CSR_WRITE_4(sc, WB_SIO, 0);
305
306 *dest = word;
307
308 return;
309 }
310
311 /*
312 * Read a sequence of words from the EEPROM.
313 */
314 static void
315 wb_read_eeprom(sc, dest, off, cnt, swap)
316 struct wb_softc *sc;
317 caddr_t dest;
318 int off;
319 int cnt;
320 int swap;
321 {
322 int i;
323 u_int16_t word = 0, *ptr;
324
325 for (i = 0; i < cnt; i++) {
326 wb_eeprom_getword(sc, off + i, &word);
327 ptr = (u_int16_t *)(dest + (i * 2));
328 if (swap)
329 *ptr = ntohs(word);
330 else
331 *ptr = word;
332 }
333
334 return;
335 }
336
337 /*
338 * Sync the PHYs by setting data bit and strobing the clock 32 times.
339 */
340 static void
341 wb_mii_sync(sc)
342 struct wb_softc *sc;
343 {
344 register int i;
345
346 SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN);
347
348 for (i = 0; i < 32; i++) {
349 SIO_SET(WB_SIO_MII_CLK);
350 DELAY(1);
351 SIO_CLR(WB_SIO_MII_CLK);
352 DELAY(1);
353 }
354
355 return;
356 }
357
358 /*
359 * Clock a series of bits through the MII.
360 */
361 static void
362 wb_mii_send(sc, bits, cnt)
363 struct wb_softc *sc;
364 u_int32_t bits;
365 int cnt;
366 {
367 int i;
368
369 SIO_CLR(WB_SIO_MII_CLK);
370
371 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
372 if (bits & i) {
373 SIO_SET(WB_SIO_MII_DATAIN);
374 } else {
375 SIO_CLR(WB_SIO_MII_DATAIN);
376 }
377 DELAY(1);
378 SIO_CLR(WB_SIO_MII_CLK);
379 DELAY(1);
380 SIO_SET(WB_SIO_MII_CLK);
381 }
382 }
383
384 /*
385 * Read an PHY register through the MII.
386 */
387 static int
388 wb_mii_readreg(sc, frame)
389 struct wb_softc *sc;
390 struct wb_mii_frame *frame;
391
392 {
393 int i, ack;
394
395 /*
396 * Set up frame for RX.
397 */
398 frame->mii_stdelim = WB_MII_STARTDELIM;
399 frame->mii_opcode = WB_MII_READOP;
400 frame->mii_turnaround = 0;
401 frame->mii_data = 0;
402
403 CSR_WRITE_4(sc, WB_SIO, 0);
404
405 /*
406 * Turn on data xmit.
407 */
408 SIO_SET(WB_SIO_MII_DIR);
409
410 wb_mii_sync(sc);
411
412 /*
413 * Send command/address info.
414 */
415 wb_mii_send(sc, frame->mii_stdelim, 2);
416 wb_mii_send(sc, frame->mii_opcode, 2);
417 wb_mii_send(sc, frame->mii_phyaddr, 5);
418 wb_mii_send(sc, frame->mii_regaddr, 5);
419
420 /* Idle bit */
421 SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN));
422 DELAY(1);
423 SIO_SET(WB_SIO_MII_CLK);
424 DELAY(1);
425
426 /* Turn off xmit. */
427 SIO_CLR(WB_SIO_MII_DIR);
428 /* Check for ack */
429 SIO_CLR(WB_SIO_MII_CLK);
430 DELAY(1);
431 ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT;
432 SIO_SET(WB_SIO_MII_CLK);
433 DELAY(1);
434 SIO_CLR(WB_SIO_MII_CLK);
435 DELAY(1);
436 SIO_SET(WB_SIO_MII_CLK);
437 DELAY(1);
438
439 /*
440 * Now try reading data bits. If the ack failed, we still
441 * need to clock through 16 cycles to keep the PHY(s) in sync.
442 */
443 if (ack) {
444 for(i = 0; i < 16; i++) {
445 SIO_CLR(WB_SIO_MII_CLK);
446 DELAY(1);
447 SIO_SET(WB_SIO_MII_CLK);
448 DELAY(1);
449 }
450 goto fail;
451 }
452
453 for (i = 0x8000; i; i >>= 1) {
454 SIO_CLR(WB_SIO_MII_CLK);
455 DELAY(1);
456 if (!ack) {
457 if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT)
458 frame->mii_data |= i;
459 DELAY(1);
460 }
461 SIO_SET(WB_SIO_MII_CLK);
462 DELAY(1);
463 }
464
465 fail:
466
467 SIO_CLR(WB_SIO_MII_CLK);
468 DELAY(1);
469 SIO_SET(WB_SIO_MII_CLK);
470 DELAY(1);
471
472 if (ack)
473 return(1);
474 return(0);
475 }
476
477 /*
478 * Write to a PHY register through the MII.
479 */
480 static int
481 wb_mii_writereg(sc, frame)
482 struct wb_softc *sc;
483 struct wb_mii_frame *frame;
484
485 {
486
487 /*
488 * Set up frame for TX.
489 */
490
491 frame->mii_stdelim = WB_MII_STARTDELIM;
492 frame->mii_opcode = WB_MII_WRITEOP;
493 frame->mii_turnaround = WB_MII_TURNAROUND;
494
495 /*
496 * Turn on data output.
497 */
498 SIO_SET(WB_SIO_MII_DIR);
499
500 wb_mii_sync(sc);
501
502 wb_mii_send(sc, frame->mii_stdelim, 2);
503 wb_mii_send(sc, frame->mii_opcode, 2);
504 wb_mii_send(sc, frame->mii_phyaddr, 5);
505 wb_mii_send(sc, frame->mii_regaddr, 5);
506 wb_mii_send(sc, frame->mii_turnaround, 2);
507 wb_mii_send(sc, frame->mii_data, 16);
508
509 /* Idle bit. */
510 SIO_SET(WB_SIO_MII_CLK);
511 DELAY(1);
512 SIO_CLR(WB_SIO_MII_CLK);
513 DELAY(1);
514
515 /*
516 * Turn off xmit.
517 */
518 SIO_CLR(WB_SIO_MII_DIR);
519
520 return(0);
521 }
522
523 static int
524 wb_miibus_readreg(dev, phy, reg)
525 device_t dev;
526 int phy, reg;
527 {
528 struct wb_softc *sc;
529 struct wb_mii_frame frame;
530
531 sc = device_get_softc(dev);
532
533 bzero((char *)&frame, sizeof(frame));
534
535 frame.mii_phyaddr = phy;
536 frame.mii_regaddr = reg;
537 wb_mii_readreg(sc, &frame);
538
539 return(frame.mii_data);
540 }
541
542 static int
543 wb_miibus_writereg(dev, phy, reg, data)
544 device_t dev;
545 int phy, reg, data;
546 {
547 struct wb_softc *sc;
548 struct wb_mii_frame frame;
549
550 sc = device_get_softc(dev);
551
552 bzero((char *)&frame, sizeof(frame));
553
554 frame.mii_phyaddr = phy;
555 frame.mii_regaddr = reg;
556 frame.mii_data = data;
557
558 wb_mii_writereg(sc, &frame);
559
560 return(0);
561 }
562
563 static void
564 wb_miibus_statchg(dev)
565 device_t dev;
566 {
567 struct wb_softc *sc;
568 struct mii_data *mii;
569
570 sc = device_get_softc(dev);
571 mii = device_get_softc(sc->wb_miibus);
572 wb_setcfg(sc, mii->mii_media_active);
573
574 return;
575 }
576
577 /*
578 * Program the 64-bit multicast hash filter.
579 */
580 static void
581 wb_setmulti(sc)
582 struct wb_softc *sc;
583 {
584 struct ifnet *ifp;
585 int h = 0;
586 u_int32_t hashes[2] = { 0, 0 };
587 struct ifmultiaddr *ifma;
588 u_int32_t rxfilt;
589 int mcnt = 0;
590
591 ifp = sc->wb_ifp;
592
593 rxfilt = CSR_READ_4(sc, WB_NETCFG);
594
595 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
596 rxfilt |= WB_NETCFG_RX_MULTI;
597 CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
598 CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF);
599 CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF);
600 return;
601 }
602
603 /* first, zot all the existing hash bits */
604 CSR_WRITE_4(sc, WB_MAR0, 0);
605 CSR_WRITE_4(sc, WB_MAR1, 0);
606
607 /* now program new ones */
608 if_maddr_rlock(ifp);
609 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
610 if (ifma->ifma_addr->sa_family != AF_LINK)
611 continue;
612 h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *)
613 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
614 if (h < 32)
615 hashes[0] |= (1 << h);
616 else
617 hashes[1] |= (1 << (h - 32));
618 mcnt++;
619 }
620 if_maddr_runlock(ifp);
621
622 if (mcnt)
623 rxfilt |= WB_NETCFG_RX_MULTI;
624 else
625 rxfilt &= ~WB_NETCFG_RX_MULTI;
626
627 CSR_WRITE_4(sc, WB_MAR0, hashes[0]);
628 CSR_WRITE_4(sc, WB_MAR1, hashes[1]);
629 CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
630
631 return;
632 }
633
634 /*
635 * The Winbond manual states that in order to fiddle with the
636 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
637 * first have to put the transmit and/or receive logic in the idle state.
638 */
639 static void
640 wb_setcfg(sc, media)
641 struct wb_softc *sc;
642 u_int32_t media;
643 {
644 int i, restart = 0;
645
646 if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) {
647 restart = 1;
648 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON));
649
650 for (i = 0; i < WB_TIMEOUT; i++) {
651 DELAY(10);
652 if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) &&
653 (CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE))
654 break;
655 }
656
657 if (i == WB_TIMEOUT)
658 device_printf(sc->wb_dev,
659 "failed to force tx and rx to idle state\n");
660 }
661
662 if (IFM_SUBTYPE(media) == IFM_10_T)
663 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
664 else
665 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
666
667 if ((media & IFM_GMASK) == IFM_FDX)
668 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
669 else
670 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
671
672 if (restart)
673 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON);
674
675 return;
676 }
677
678 static void
679 wb_reset(sc)
680 struct wb_softc *sc;
681 {
682 register int i;
683 struct mii_data *mii;
684
685 CSR_WRITE_4(sc, WB_NETCFG, 0);
686 CSR_WRITE_4(sc, WB_BUSCTL, 0);
687 CSR_WRITE_4(sc, WB_TXADDR, 0);
688 CSR_WRITE_4(sc, WB_RXADDR, 0);
689
690 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
691 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
692
693 for (i = 0; i < WB_TIMEOUT; i++) {
694 DELAY(10);
695 if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET))
696 break;
697 }
698 if (i == WB_TIMEOUT)
699 device_printf(sc->wb_dev, "reset never completed!\n");
700
701 /* Wait a little while for the chip to get its brains in order. */
702 DELAY(1000);
703
704 if (sc->wb_miibus == NULL)
705 return;
706
707 mii = device_get_softc(sc->wb_miibus);
708 if (mii == NULL)
709 return;
710
711 if (mii->mii_instance) {
712 struct mii_softc *miisc;
713 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
714 mii_phy_reset(miisc);
715 }
716
717 return;
718 }
719
720 static void
721 wb_fixmedia(sc)
722 struct wb_softc *sc;
723 {
724 struct mii_data *mii = NULL;
725 struct ifnet *ifp;
726 u_int32_t media;
727
728 if (sc->wb_miibus == NULL)
729 return;
730
731 mii = device_get_softc(sc->wb_miibus);
732 ifp = sc->wb_ifp;
733
734 mii_pollstat(mii);
735 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) {
736 media = mii->mii_media_active & ~IFM_10_T;
737 media |= IFM_100_TX;
738 } else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
739 media = mii->mii_media_active & ~IFM_100_TX;
740 media |= IFM_10_T;
741 } else
742 return;
743
744 ifmedia_set(&mii->mii_media, media);
745
746 return;
747 }
748
749 /*
750 * Probe for a Winbond chip. Check the PCI vendor and device
751 * IDs against our list and return a device name if we find a match.
752 */
753 static int
754 wb_probe(dev)
755 device_t dev;
756 {
757 struct wb_type *t;
758
759 t = wb_devs;
760
761 while(t->wb_name != NULL) {
762 if ((pci_get_vendor(dev) == t->wb_vid) &&
763 (pci_get_device(dev) == t->wb_did)) {
764 device_set_desc(dev, t->wb_name);
765 return (BUS_PROBE_DEFAULT);
766 }
767 t++;
768 }
769
770 return(ENXIO);
771 }
772
773 /*
774 * Attach the interface. Allocate softc structures, do ifmedia
775 * setup and ethernet/BPF attach.
776 */
777 static int
778 wb_attach(dev)
779 device_t dev;
780 {
781 u_char eaddr[ETHER_ADDR_LEN];
782 struct wb_softc *sc;
783 struct ifnet *ifp;
784 int error = 0, rid;
785
786 sc = device_get_softc(dev);
787 sc->wb_dev = dev;
788
789 mtx_init(&sc->wb_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
790 MTX_DEF);
791 callout_init_mtx(&sc->wb_stat_callout, &sc->wb_mtx, 0);
792
793 /*
794 * Map control/status registers.
795 */
796 pci_enable_busmaster(dev);
797
798 rid = WB_RID;
799 sc->wb_res = bus_alloc_resource_any(dev, WB_RES, &rid, RF_ACTIVE);
800
801 if (sc->wb_res == NULL) {
802 device_printf(dev, "couldn't map ports/memory\n");
803 error = ENXIO;
804 goto fail;
805 }
806
807 sc->wb_btag = rman_get_bustag(sc->wb_res);
808 sc->wb_bhandle = rman_get_bushandle(sc->wb_res);
809
810 /* Allocate interrupt */
811 rid = 0;
812 sc->wb_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
813 RF_SHAREABLE | RF_ACTIVE);
814
815 if (sc->wb_irq == NULL) {
816 device_printf(dev, "couldn't map interrupt\n");
817 error = ENXIO;
818 goto fail;
819 }
820
821 /* Save the cache line size. */
822 sc->wb_cachesize = pci_read_config(dev, WB_PCI_CACHELEN, 4) & 0xFF;
823
824 /* Reset the adapter. */
825 wb_reset(sc);
826
827 /*
828 * Get station address from the EEPROM.
829 */
830 wb_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 0);
831
832 sc->wb_ldata = contigmalloc(sizeof(struct wb_list_data) + 8, M_DEVBUF,
833 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
834
835 if (sc->wb_ldata == NULL) {
836 device_printf(dev, "no memory for list buffers!\n");
837 error = ENXIO;
838 goto fail;
839 }
840
841 bzero(sc->wb_ldata, sizeof(struct wb_list_data));
842
843 ifp = sc->wb_ifp = if_alloc(IFT_ETHER);
844 if (ifp == NULL) {
845 device_printf(dev, "can not if_alloc()\n");
846 error = ENOSPC;
847 goto fail;
848 }
849 ifp->if_softc = sc;
850 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
851 ifp->if_mtu = ETHERMTU;
852 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
853 ifp->if_ioctl = wb_ioctl;
854 ifp->if_start = wb_start;
855 ifp->if_watchdog = wb_watchdog;
856 ifp->if_init = wb_init;
857 ifp->if_snd.ifq_maxlen = WB_TX_LIST_CNT - 1;
858
859 /*
860 * Do MII setup.
861 */
862 error = mii_attach(dev, &sc->wb_miibus, ifp, wb_ifmedia_upd,
863 wb_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
864 if (error != 0) {
865 device_printf(dev, "attaching PHYs failed\n");
866 goto fail;
867 }
868
869 /*
870 * Call MI attach routine.
871 */
872 ether_ifattach(ifp, eaddr);
873
874 /* Hook interrupt last to avoid having to lock softc */
875 error = bus_setup_intr(dev, sc->wb_irq, INTR_TYPE_NET | INTR_MPSAFE,
876 NULL, wb_intr, sc, &sc->wb_intrhand);
877
878 if (error) {
879 device_printf(dev, "couldn't set up irq\n");
880 ether_ifdetach(ifp);
881 goto fail;
882 }
883
884 fail:
885 if (error)
886 wb_detach(dev);
887
888 return(error);
889 }
890
891 /*
892 * Shutdown hardware and free up resources. This can be called any
893 * time after the mutex has been initialized. It is called in both
894 * the error case in attach and the normal detach case so it needs
895 * to be careful about only freeing resources that have actually been
896 * allocated.
897 */
898 static int
899 wb_detach(dev)
900 device_t dev;
901 {
902 struct wb_softc *sc;
903 struct ifnet *ifp;
904
905 sc = device_get_softc(dev);
906 KASSERT(mtx_initialized(&sc->wb_mtx), ("wb mutex not initialized"));
907 ifp = sc->wb_ifp;
908
909 /*
910 * Delete any miibus and phy devices attached to this interface.
911 * This should only be done if attach succeeded.
912 */
913 if (device_is_attached(dev)) {
914 WB_LOCK(sc);
915 wb_stop(sc);
916 WB_UNLOCK(sc);
917 callout_drain(&sc->wb_stat_callout);
918 ether_ifdetach(ifp);
919 }
920 if (sc->wb_miibus)
921 device_delete_child(dev, sc->wb_miibus);
922 bus_generic_detach(dev);
923
924 if (sc->wb_intrhand)
925 bus_teardown_intr(dev, sc->wb_irq, sc->wb_intrhand);
926 if (sc->wb_irq)
927 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->wb_irq);
928 if (sc->wb_res)
929 bus_release_resource(dev, WB_RES, WB_RID, sc->wb_res);
930
931 if (ifp)
932 if_free(ifp);
933
934 if (sc->wb_ldata) {
935 contigfree(sc->wb_ldata, sizeof(struct wb_list_data) + 8,
936 M_DEVBUF);
937 }
938
939 mtx_destroy(&sc->wb_mtx);
940
941 return(0);
942 }
943
944 /*
945 * Initialize the transmit descriptors.
946 */
947 static int
948 wb_list_tx_init(sc)
949 struct wb_softc *sc;
950 {
951 struct wb_chain_data *cd;
952 struct wb_list_data *ld;
953 int i;
954
955 cd = &sc->wb_cdata;
956 ld = sc->wb_ldata;
957
958 for (i = 0; i < WB_TX_LIST_CNT; i++) {
959 cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i];
960 if (i == (WB_TX_LIST_CNT - 1)) {
961 cd->wb_tx_chain[i].wb_nextdesc =
962 &cd->wb_tx_chain[0];
963 } else {
964 cd->wb_tx_chain[i].wb_nextdesc =
965 &cd->wb_tx_chain[i + 1];
966 }
967 }
968
969 cd->wb_tx_free = &cd->wb_tx_chain[0];
970 cd->wb_tx_tail = cd->wb_tx_head = NULL;
971
972 return(0);
973 }
974
975
976 /*
977 * Initialize the RX descriptors and allocate mbufs for them. Note that
978 * we arrange the descriptors in a closed ring, so that the last descriptor
979 * points back to the first.
980 */
981 static int
982 wb_list_rx_init(sc)
983 struct wb_softc *sc;
984 {
985 struct wb_chain_data *cd;
986 struct wb_list_data *ld;
987 int i;
988
989 cd = &sc->wb_cdata;
990 ld = sc->wb_ldata;
991
992 for (i = 0; i < WB_RX_LIST_CNT; i++) {
993 cd->wb_rx_chain[i].wb_ptr =
994 (struct wb_desc *)&ld->wb_rx_list[i];
995 cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i];
996 if (wb_newbuf(sc, &cd->wb_rx_chain[i], NULL) == ENOBUFS)
997 return(ENOBUFS);
998 if (i == (WB_RX_LIST_CNT - 1)) {
999 cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0];
1000 ld->wb_rx_list[i].wb_next =
1001 vtophys(&ld->wb_rx_list[0]);
1002 } else {
1003 cd->wb_rx_chain[i].wb_nextdesc =
1004 &cd->wb_rx_chain[i + 1];
1005 ld->wb_rx_list[i].wb_next =
1006 vtophys(&ld->wb_rx_list[i + 1]);
1007 }
1008 }
1009
1010 cd->wb_rx_head = &cd->wb_rx_chain[0];
1011
1012 return(0);
1013 }
1014
1015 static void
1016 wb_bfree(buf, args)
1017 void *buf;
1018 void *args;
1019 {
1020 return;
1021 }
1022
1023 /*
1024 * Initialize an RX descriptor and attach an MBUF cluster.
1025 */
1026 static int
1027 wb_newbuf(sc, c, m)
1028 struct wb_softc *sc;
1029 struct wb_chain_onefrag *c;
1030 struct mbuf *m;
1031 {
1032 struct mbuf *m_new = NULL;
1033
1034 if (m == NULL) {
1035 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1036 if (m_new == NULL)
1037 return(ENOBUFS);
1038 m_new->m_data = c->wb_buf;
1039 m_new->m_pkthdr.len = m_new->m_len = WB_BUFBYTES;
1040 MEXTADD(m_new, c->wb_buf, WB_BUFBYTES, wb_bfree, c->wb_buf,
1041 NULL, 0, EXT_NET_DRV);
1042 } else {
1043 m_new = m;
1044 m_new->m_len = m_new->m_pkthdr.len = WB_BUFBYTES;
1045 m_new->m_data = m_new->m_ext.ext_buf;
1046 }
1047
1048 m_adj(m_new, sizeof(u_int64_t));
1049
1050 c->wb_mbuf = m_new;
1051 c->wb_ptr->wb_data = vtophys(mtod(m_new, caddr_t));
1052 c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | 1536;
1053 c->wb_ptr->wb_status = WB_RXSTAT;
1054
1055 return(0);
1056 }
1057
1058 /*
1059 * A frame has been uploaded: pass the resulting mbuf chain up to
1060 * the higher level protocols.
1061 */
1062 static void
1063 wb_rxeof(sc)
1064 struct wb_softc *sc;
1065 {
1066 struct mbuf *m = NULL;
1067 struct ifnet *ifp;
1068 struct wb_chain_onefrag *cur_rx;
1069 int total_len = 0;
1070 u_int32_t rxstat;
1071
1072 WB_LOCK_ASSERT(sc);
1073
1074 ifp = sc->wb_ifp;
1075
1076 while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) &
1077 WB_RXSTAT_OWN)) {
1078 struct mbuf *m0 = NULL;
1079
1080 cur_rx = sc->wb_cdata.wb_rx_head;
1081 sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc;
1082
1083 m = cur_rx->wb_mbuf;
1084
1085 if ((rxstat & WB_RXSTAT_MIIERR) ||
1086 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) ||
1087 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > 1536) ||
1088 !(rxstat & WB_RXSTAT_LASTFRAG) ||
1089 !(rxstat & WB_RXSTAT_RXCMP)) {
1090 ifp->if_ierrors++;
1091 wb_newbuf(sc, cur_rx, m);
1092 device_printf(sc->wb_dev,
1093 "receiver babbling: possible chip bug,"
1094 " forcing reset\n");
1095 wb_fixmedia(sc);
1096 wb_reset(sc);
1097 wb_init_locked(sc);
1098 return;
1099 }
1100
1101 if (rxstat & WB_RXSTAT_RXERR) {
1102 ifp->if_ierrors++;
1103 wb_newbuf(sc, cur_rx, m);
1104 break;
1105 }
1106
1107 /* No errors; receive the packet. */
1108 total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status);
1109
1110 /*
1111 * XXX The Winbond chip includes the CRC with every
1112 * received frame, and there's no way to turn this
1113 * behavior off (at least, I can't find anything in
1114 * the manual that explains how to do it) so we have
1115 * to trim off the CRC manually.
1116 */
1117 total_len -= ETHER_CRC_LEN;
1118
1119 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp,
1120 NULL);
1121 wb_newbuf(sc, cur_rx, m);
1122 if (m0 == NULL) {
1123 ifp->if_ierrors++;
1124 break;
1125 }
1126 m = m0;
1127
1128 ifp->if_ipackets++;
1129 WB_UNLOCK(sc);
1130 (*ifp->if_input)(ifp, m);
1131 WB_LOCK(sc);
1132 }
1133 }
1134
1135 static void
1136 wb_rxeoc(sc)
1137 struct wb_softc *sc;
1138 {
1139 wb_rxeof(sc);
1140
1141 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1142 CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0]));
1143 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1144 if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND)
1145 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
1146
1147 return;
1148 }
1149
1150 /*
1151 * A frame was downloaded to the chip. It's safe for us to clean up
1152 * the list buffers.
1153 */
1154 static void
1155 wb_txeof(sc)
1156 struct wb_softc *sc;
1157 {
1158 struct wb_chain *cur_tx;
1159 struct ifnet *ifp;
1160
1161 ifp = sc->wb_ifp;
1162
1163 /* Clear the timeout timer. */
1164 ifp->if_timer = 0;
1165
1166 if (sc->wb_cdata.wb_tx_head == NULL)
1167 return;
1168
1169 /*
1170 * Go through our tx list and free mbufs for those
1171 * frames that have been transmitted.
1172 */
1173 while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) {
1174 u_int32_t txstat;
1175
1176 cur_tx = sc->wb_cdata.wb_tx_head;
1177 txstat = WB_TXSTATUS(cur_tx);
1178
1179 if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT)
1180 break;
1181
1182 if (txstat & WB_TXSTAT_TXERR) {
1183 ifp->if_oerrors++;
1184 if (txstat & WB_TXSTAT_ABORT)
1185 ifp->if_collisions++;
1186 if (txstat & WB_TXSTAT_LATECOLL)
1187 ifp->if_collisions++;
1188 }
1189
1190 ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3;
1191
1192 ifp->if_opackets++;
1193 m_freem(cur_tx->wb_mbuf);
1194 cur_tx->wb_mbuf = NULL;
1195
1196 if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) {
1197 sc->wb_cdata.wb_tx_head = NULL;
1198 sc->wb_cdata.wb_tx_tail = NULL;
1199 break;
1200 }
1201
1202 sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc;
1203 }
1204
1205 return;
1206 }
1207
1208 /*
1209 * TX 'end of channel' interrupt handler.
1210 */
1211 static void
1212 wb_txeoc(sc)
1213 struct wb_softc *sc;
1214 {
1215 struct ifnet *ifp;
1216
1217 ifp = sc->wb_ifp;
1218
1219 ifp->if_timer = 0;
1220
1221 if (sc->wb_cdata.wb_tx_head == NULL) {
1222 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1223 sc->wb_cdata.wb_tx_tail = NULL;
1224 } else {
1225 if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) {
1226 WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN;
1227 ifp->if_timer = 5;
1228 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1229 }
1230 }
1231
1232 return;
1233 }
1234
1235 static void
1236 wb_intr(arg)
1237 void *arg;
1238 {
1239 struct wb_softc *sc;
1240 struct ifnet *ifp;
1241 u_int32_t status;
1242
1243 sc = arg;
1244 WB_LOCK(sc);
1245 ifp = sc->wb_ifp;
1246
1247 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1248 WB_UNLOCK(sc);
1249 return;
1250 }
1251
1252 /* Disable interrupts. */
1253 CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1254
1255 for (;;) {
1256
1257 status = CSR_READ_4(sc, WB_ISR);
1258 if (status)
1259 CSR_WRITE_4(sc, WB_ISR, status);
1260
1261 if ((status & WB_INTRS) == 0)
1262 break;
1263
1264 if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) {
1265 ifp->if_ierrors++;
1266 wb_reset(sc);
1267 if (status & WB_ISR_RX_ERR)
1268 wb_fixmedia(sc);
1269 wb_init_locked(sc);
1270 continue;
1271 }
1272
1273 if (status & WB_ISR_RX_OK)
1274 wb_rxeof(sc);
1275
1276 if (status & WB_ISR_RX_IDLE)
1277 wb_rxeoc(sc);
1278
1279 if (status & WB_ISR_TX_OK)
1280 wb_txeof(sc);
1281
1282 if (status & WB_ISR_TX_NOBUF)
1283 wb_txeoc(sc);
1284
1285 if (status & WB_ISR_TX_IDLE) {
1286 wb_txeof(sc);
1287 if (sc->wb_cdata.wb_tx_head != NULL) {
1288 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1289 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1290 }
1291 }
1292
1293 if (status & WB_ISR_TX_UNDERRUN) {
1294 ifp->if_oerrors++;
1295 wb_txeof(sc);
1296 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1297 /* Jack up TX threshold */
1298 sc->wb_txthresh += WB_TXTHRESH_CHUNK;
1299 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1300 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1301 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1302 }
1303
1304 if (status & WB_ISR_BUS_ERR) {
1305 wb_reset(sc);
1306 wb_init_locked(sc);
1307 }
1308
1309 }
1310
1311 /* Re-enable interrupts. */
1312 CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1313
1314 if (ifp->if_snd.ifq_head != NULL) {
1315 wb_start_locked(ifp);
1316 }
1317
1318 WB_UNLOCK(sc);
1319
1320 return;
1321 }
1322
1323 static void
1324 wb_tick(xsc)
1325 void *xsc;
1326 {
1327 struct wb_softc *sc;
1328 struct mii_data *mii;
1329
1330 sc = xsc;
1331 WB_LOCK_ASSERT(sc);
1332 mii = device_get_softc(sc->wb_miibus);
1333
1334 mii_tick(mii);
1335
1336 callout_reset(&sc->wb_stat_callout, hz, wb_tick, sc);
1337
1338 return;
1339 }
1340
1341 /*
1342 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1343 * pointers to the fragment pointers.
1344 */
1345 static int
1346 wb_encap(sc, c, m_head)
1347 struct wb_softc *sc;
1348 struct wb_chain *c;
1349 struct mbuf *m_head;
1350 {
1351 int frag = 0;
1352 struct wb_desc *f = NULL;
1353 int total_len;
1354 struct mbuf *m;
1355
1356 /*
1357 * Start packing the mbufs in this chain into
1358 * the fragment pointers. Stop when we run out
1359 * of fragments or hit the end of the mbuf chain.
1360 */
1361 m = m_head;
1362 total_len = 0;
1363
1364 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1365 if (m->m_len != 0) {
1366 if (frag == WB_MAXFRAGS)
1367 break;
1368 total_len += m->m_len;
1369 f = &c->wb_ptr->wb_frag[frag];
1370 f->wb_ctl = WB_TXCTL_TLINK | m->m_len;
1371 if (frag == 0) {
1372 f->wb_ctl |= WB_TXCTL_FIRSTFRAG;
1373 f->wb_status = 0;
1374 } else
1375 f->wb_status = WB_TXSTAT_OWN;
1376 f->wb_next = vtophys(&c->wb_ptr->wb_frag[frag + 1]);
1377 f->wb_data = vtophys(mtod(m, vm_offset_t));
1378 frag++;
1379 }
1380 }
1381
1382 /*
1383 * Handle special case: we used up all 16 fragments,
1384 * but we have more mbufs left in the chain. Copy the
1385 * data into an mbuf cluster. Note that we don't
1386 * bother clearing the values in the other fragment
1387 * pointers/counters; it wouldn't gain us anything,
1388 * and would waste cycles.
1389 */
1390 if (m != NULL) {
1391 struct mbuf *m_new = NULL;
1392
1393 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1394 if (m_new == NULL)
1395 return(1);
1396 if (m_head->m_pkthdr.len > MHLEN) {
1397 MCLGET(m_new, M_DONTWAIT);
1398 if (!(m_new->m_flags & M_EXT)) {
1399 m_freem(m_new);
1400 return(1);
1401 }
1402 }
1403 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1404 mtod(m_new, caddr_t));
1405 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1406 m_freem(m_head);
1407 m_head = m_new;
1408 f = &c->wb_ptr->wb_frag[0];
1409 f->wb_status = 0;
1410 f->wb_data = vtophys(mtod(m_new, caddr_t));
1411 f->wb_ctl = total_len = m_new->m_len;
1412 f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG;
1413 frag = 1;
1414 }
1415
1416 if (total_len < WB_MIN_FRAMELEN) {
1417 f = &c->wb_ptr->wb_frag[frag];
1418 f->wb_ctl = WB_MIN_FRAMELEN - total_len;
1419 f->wb_data = vtophys(&sc->wb_cdata.wb_pad);
1420 f->wb_ctl |= WB_TXCTL_TLINK;
1421 f->wb_status = WB_TXSTAT_OWN;
1422 frag++;
1423 }
1424
1425 c->wb_mbuf = m_head;
1426 c->wb_lastdesc = frag - 1;
1427 WB_TXCTL(c) |= WB_TXCTL_LASTFRAG;
1428 WB_TXNEXT(c) = vtophys(&c->wb_nextdesc->wb_ptr->wb_frag[0]);
1429
1430 return(0);
1431 }
1432
1433 /*
1434 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1435 * to the mbuf data regions directly in the transmit lists. We also save a
1436 * copy of the pointers since the transmit list fragment pointers are
1437 * physical addresses.
1438 */
1439
1440 static void
1441 wb_start(ifp)
1442 struct ifnet *ifp;
1443 {
1444 struct wb_softc *sc;
1445
1446 sc = ifp->if_softc;
1447 WB_LOCK(sc);
1448 wb_start_locked(ifp);
1449 WB_UNLOCK(sc);
1450 }
1451
1452 static void
1453 wb_start_locked(ifp)
1454 struct ifnet *ifp;
1455 {
1456 struct wb_softc *sc;
1457 struct mbuf *m_head = NULL;
1458 struct wb_chain *cur_tx = NULL, *start_tx;
1459
1460 sc = ifp->if_softc;
1461 WB_LOCK_ASSERT(sc);
1462
1463 /*
1464 * Check for an available queue slot. If there are none,
1465 * punt.
1466 */
1467 if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) {
1468 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1469 return;
1470 }
1471
1472 start_tx = sc->wb_cdata.wb_tx_free;
1473
1474 while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) {
1475 IF_DEQUEUE(&ifp->if_snd, m_head);
1476 if (m_head == NULL)
1477 break;
1478
1479 /* Pick a descriptor off the free list. */
1480 cur_tx = sc->wb_cdata.wb_tx_free;
1481 sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc;
1482
1483 /* Pack the data into the descriptor. */
1484 wb_encap(sc, cur_tx, m_head);
1485
1486 if (cur_tx != start_tx)
1487 WB_TXOWN(cur_tx) = WB_TXSTAT_OWN;
1488
1489 /*
1490 * If there's a BPF listener, bounce a copy of this frame
1491 * to him.
1492 */
1493 BPF_MTAP(ifp, cur_tx->wb_mbuf);
1494 }
1495
1496 /*
1497 * If there are no packets queued, bail.
1498 */
1499 if (cur_tx == NULL)
1500 return;
1501
1502 /*
1503 * Place the request for the upload interrupt
1504 * in the last descriptor in the chain. This way, if
1505 * we're chaining several packets at once, we'll only
1506 * get an interrupt once for the whole chain rather than
1507 * once for each packet.
1508 */
1509 WB_TXCTL(cur_tx) |= WB_TXCTL_FINT;
1510 cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT;
1511 sc->wb_cdata.wb_tx_tail = cur_tx;
1512
1513 if (sc->wb_cdata.wb_tx_head == NULL) {
1514 sc->wb_cdata.wb_tx_head = start_tx;
1515 WB_TXOWN(start_tx) = WB_TXSTAT_OWN;
1516 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1517 } else {
1518 /*
1519 * We need to distinguish between the case where
1520 * the own bit is clear because the chip cleared it
1521 * and where the own bit is clear because we haven't
1522 * set it yet. The magic value WB_UNSET is just some
1523 * ramdomly chosen number which doesn't have the own
1524 * bit set. When we actually transmit the frame, the
1525 * status word will have _only_ the own bit set, so
1526 * the txeoc handler will be able to tell if it needs
1527 * to initiate another transmission to flush out pending
1528 * frames.
1529 */
1530 WB_TXOWN(start_tx) = WB_UNSENT;
1531 }
1532
1533 /*
1534 * Set a timeout in case the chip goes out to lunch.
1535 */
1536 ifp->if_timer = 5;
1537
1538 return;
1539 }
1540
1541 static void
1542 wb_init(xsc)
1543 void *xsc;
1544 {
1545 struct wb_softc *sc = xsc;
1546
1547 WB_LOCK(sc);
1548 wb_init_locked(sc);
1549 WB_UNLOCK(sc);
1550 }
1551
1552 static void
1553 wb_init_locked(sc)
1554 struct wb_softc *sc;
1555 {
1556 struct ifnet *ifp = sc->wb_ifp;
1557 int i;
1558 struct mii_data *mii;
1559
1560 WB_LOCK_ASSERT(sc);
1561 mii = device_get_softc(sc->wb_miibus);
1562
1563 /*
1564 * Cancel pending I/O and free all RX/TX buffers.
1565 */
1566 wb_stop(sc);
1567 wb_reset(sc);
1568
1569 sc->wb_txthresh = WB_TXTHRESH_INIT;
1570
1571 /*
1572 * Set cache alignment and burst length.
1573 */
1574 #ifdef foo
1575 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG);
1576 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1577 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1578 #endif
1579
1580 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION);
1581 WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG);
1582 switch(sc->wb_cachesize) {
1583 case 32:
1584 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG);
1585 break;
1586 case 16:
1587 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG);
1588 break;
1589 case 8:
1590 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG);
1591 break;
1592 case 0:
1593 default:
1594 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE);
1595 break;
1596 }
1597
1598 /* This doesn't tend to work too well at 100Mbps. */
1599 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON);
1600
1601 /* Init our MAC address */
1602 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1603 CSR_WRITE_1(sc, WB_NODE0 + i, IF_LLADDR(sc->wb_ifp)[i]);
1604 }
1605
1606 /* Init circular RX list. */
1607 if (wb_list_rx_init(sc) == ENOBUFS) {
1608 device_printf(sc->wb_dev,
1609 "initialization failed: no memory for rx buffers\n");
1610 wb_stop(sc);
1611 return;
1612 }
1613
1614 /* Init TX descriptors. */
1615 wb_list_tx_init(sc);
1616
1617 /* If we want promiscuous mode, set the allframes bit. */
1618 if (ifp->if_flags & IFF_PROMISC) {
1619 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1620 } else {
1621 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1622 }
1623
1624 /*
1625 * Set capture broadcast bit to capture broadcast frames.
1626 */
1627 if (ifp->if_flags & IFF_BROADCAST) {
1628 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1629 } else {
1630 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1631 }
1632
1633 /*
1634 * Program the multicast filter, if necessary.
1635 */
1636 wb_setmulti(sc);
1637
1638 /*
1639 * Load the address of the RX list.
1640 */
1641 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1642 CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0]));
1643
1644 /*
1645 * Enable interrupts.
1646 */
1647 CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1648 CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF);
1649
1650 /* Enable receiver and transmitter. */
1651 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1652 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
1653
1654 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1655 CSR_WRITE_4(sc, WB_TXADDR, vtophys(&sc->wb_ldata->wb_tx_list[0]));
1656 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1657
1658 mii_mediachg(mii);
1659
1660 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1661 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1662
1663 callout_reset(&sc->wb_stat_callout, hz, wb_tick, sc);
1664
1665 return;
1666 }
1667
1668 /*
1669 * Set media options.
1670 */
1671 static int
1672 wb_ifmedia_upd(ifp)
1673 struct ifnet *ifp;
1674 {
1675 struct wb_softc *sc;
1676
1677 sc = ifp->if_softc;
1678
1679 WB_LOCK(sc);
1680 if (ifp->if_flags & IFF_UP)
1681 wb_init_locked(sc);
1682 WB_UNLOCK(sc);
1683
1684 return(0);
1685 }
1686
1687 /*
1688 * Report current media status.
1689 */
1690 static void
1691 wb_ifmedia_sts(ifp, ifmr)
1692 struct ifnet *ifp;
1693 struct ifmediareq *ifmr;
1694 {
1695 struct wb_softc *sc;
1696 struct mii_data *mii;
1697
1698 sc = ifp->if_softc;
1699
1700 WB_LOCK(sc);
1701 mii = device_get_softc(sc->wb_miibus);
1702
1703 mii_pollstat(mii);
1704 ifmr->ifm_active = mii->mii_media_active;
1705 ifmr->ifm_status = mii->mii_media_status;
1706 WB_UNLOCK(sc);
1707
1708 return;
1709 }
1710
1711 static int
1712 wb_ioctl(ifp, command, data)
1713 struct ifnet *ifp;
1714 u_long command;
1715 caddr_t data;
1716 {
1717 struct wb_softc *sc = ifp->if_softc;
1718 struct mii_data *mii;
1719 struct ifreq *ifr = (struct ifreq *) data;
1720 int error = 0;
1721
1722 switch(command) {
1723 case SIOCSIFFLAGS:
1724 WB_LOCK(sc);
1725 if (ifp->if_flags & IFF_UP) {
1726 wb_init_locked(sc);
1727 } else {
1728 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1729 wb_stop(sc);
1730 }
1731 WB_UNLOCK(sc);
1732 error = 0;
1733 break;
1734 case SIOCADDMULTI:
1735 case SIOCDELMULTI:
1736 WB_LOCK(sc);
1737 wb_setmulti(sc);
1738 WB_UNLOCK(sc);
1739 error = 0;
1740 break;
1741 case SIOCGIFMEDIA:
1742 case SIOCSIFMEDIA:
1743 mii = device_get_softc(sc->wb_miibus);
1744 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1745 break;
1746 default:
1747 error = ether_ioctl(ifp, command, data);
1748 break;
1749 }
1750
1751 return(error);
1752 }
1753
1754 static void
1755 wb_watchdog(ifp)
1756 struct ifnet *ifp;
1757 {
1758 struct wb_softc *sc;
1759
1760 sc = ifp->if_softc;
1761
1762 WB_LOCK(sc);
1763 ifp->if_oerrors++;
1764 if_printf(ifp, "watchdog timeout\n");
1765 #ifdef foo
1766 if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1767 if_printf(ifp, "no carrier - transceiver cable problem?\n");
1768 #endif
1769 wb_stop(sc);
1770 wb_reset(sc);
1771 wb_init_locked(sc);
1772
1773 if (ifp->if_snd.ifq_head != NULL)
1774 wb_start_locked(ifp);
1775 WB_UNLOCK(sc);
1776
1777 return;
1778 }
1779
1780 /*
1781 * Stop the adapter and free any mbufs allocated to the
1782 * RX and TX lists.
1783 */
1784 static void
1785 wb_stop(sc)
1786 struct wb_softc *sc;
1787 {
1788 register int i;
1789 struct ifnet *ifp;
1790
1791 WB_LOCK_ASSERT(sc);
1792 ifp = sc->wb_ifp;
1793 ifp->if_timer = 0;
1794
1795 callout_stop(&sc->wb_stat_callout);
1796
1797 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON));
1798 CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1799 CSR_WRITE_4(sc, WB_TXADDR, 0x00000000);
1800 CSR_WRITE_4(sc, WB_RXADDR, 0x00000000);
1801
1802 /*
1803 * Free data in the RX lists.
1804 */
1805 for (i = 0; i < WB_RX_LIST_CNT; i++) {
1806 if (sc->wb_cdata.wb_rx_chain[i].wb_mbuf != NULL) {
1807 m_freem(sc->wb_cdata.wb_rx_chain[i].wb_mbuf);
1808 sc->wb_cdata.wb_rx_chain[i].wb_mbuf = NULL;
1809 }
1810 }
1811 bzero((char *)&sc->wb_ldata->wb_rx_list,
1812 sizeof(sc->wb_ldata->wb_rx_list));
1813
1814 /*
1815 * Free the TX list buffers.
1816 */
1817 for (i = 0; i < WB_TX_LIST_CNT; i++) {
1818 if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) {
1819 m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf);
1820 sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL;
1821 }
1822 }
1823
1824 bzero((char *)&sc->wb_ldata->wb_tx_list,
1825 sizeof(sc->wb_ldata->wb_tx_list));
1826
1827 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1828
1829 return;
1830 }
1831
1832 /*
1833 * Stop all chip I/O so that the kernel's probe routines don't
1834 * get confused by errant DMAs when rebooting.
1835 */
1836 static int
1837 wb_shutdown(dev)
1838 device_t dev;
1839 {
1840 struct wb_softc *sc;
1841
1842 sc = device_get_softc(dev);
1843
1844 WB_LOCK(sc);
1845 wb_stop(sc);
1846 WB_UNLOCK(sc);
1847
1848 return (0);
1849 }
Cache object: 6dbb57abb289130a456314f20c3f8bd7
|