FreeBSD/Linux Kernel Cross Reference
sys/dev/wb/if_wb.c
1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/12.0/sys/dev/wb/if_wb.c 339735 2018-10-25 17:00:39Z brooks $");
37
38 /*
39 * Winbond fast ethernet PCI NIC driver
40 *
41 * Supports various cheap network adapters based on the Winbond W89C840F
42 * fast ethernet controller chip. This includes adapters manufactured by
43 * Winbond itself and some made by Linksys.
44 *
45 * Written by Bill Paul <wpaul@ctr.columbia.edu>
46 * Electrical Engineering Department
47 * Columbia University, New York City
48 */
49 /*
50 * The Winbond W89C840F chip is a bus master; in some ways it resembles
51 * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has
52 * one major difference which is that while the registers do many of
53 * the same things as a tulip adapter, the offsets are different: where
54 * tulip registers are typically spaced 8 bytes apart, the Winbond
55 * registers are spaced 4 bytes apart. The receiver filter is also
56 * programmed differently.
57 *
58 * Like the tulip, the Winbond chip uses small descriptors containing
59 * a status word, a control word and 32-bit areas that can either be used
60 * to point to two external data blocks, or to point to a single block
61 * and another descriptor in a linked list. Descriptors can be grouped
62 * together in blocks to form fixed length rings or can be chained
63 * together in linked lists. A single packet may be spread out over
64 * several descriptors if necessary.
65 *
66 * For the receive ring, this driver uses a linked list of descriptors,
67 * each pointing to a single mbuf cluster buffer, which us large enough
68 * to hold an entire packet. The link list is looped back to created a
69 * closed ring.
70 *
71 * For transmission, the driver creates a linked list of 'super descriptors'
72 * which each contain several individual descriptors linked toghether.
73 * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we
74 * abuse as fragment pointers. This allows us to use a buffer managment
75 * scheme very similar to that used in the ThunderLAN and Etherlink XL
76 * drivers.
77 *
78 * Autonegotiation is performed using the external PHY via the MII bus.
79 * The sample boards I have all use a Davicom PHY.
80 *
81 * Note: the author of the Linux driver for the Winbond chip alludes
82 * to some sort of flaw in the chip's design that seems to mandate some
83 * drastic workaround which signigicantly impairs transmit performance.
84 * I have no idea what he's on about: transmit performance with all
85 * three of my test boards seems fine.
86 */
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/module.h>
94 #include <sys/kernel.h>
95 #include <sys/socket.h>
96 #include <sys/queue.h>
97
98 #include <net/if.h>
99 #include <net/if_var.h>
100 #include <net/if_arp.h>
101 #include <net/ethernet.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_types.h>
105
106 #include <net/bpf.h>
107
108 #include <vm/vm.h> /* for vtophys */
109 #include <vm/pmap.h> /* for vtophys */
110 #include <machine/bus.h>
111 #include <machine/resource.h>
112 #include <sys/bus.h>
113 #include <sys/rman.h>
114
115 #include <dev/pci/pcireg.h>
116 #include <dev/pci/pcivar.h>
117
118 #include <dev/mii/mii.h>
119 #include <dev/mii/mii_bitbang.h>
120 #include <dev/mii/miivar.h>
121
122 /* "device miibus" required. See GENERIC if you get errors here. */
123 #include "miibus_if.h"
124
125 #define WB_USEIOSPACE
126
127 #include <dev/wb/if_wbreg.h>
128
129 MODULE_DEPEND(wb, pci, 1, 1, 1);
130 MODULE_DEPEND(wb, ether, 1, 1, 1);
131 MODULE_DEPEND(wb, miibus, 1, 1, 1);
132
133 /*
134 * Various supported device vendors/types and their names.
135 */
136 static const struct wb_type wb_devs[] = {
137 { WB_VENDORID, WB_DEVICEID_840F,
138 "Winbond W89C840F 10/100BaseTX" },
139 { CP_VENDORID, CP_DEVICEID_RL100,
140 "Compex RL100-ATX 10/100baseTX" },
141 { 0, 0, NULL }
142 };
143
144 static int wb_probe(device_t);
145 static int wb_attach(device_t);
146 static int wb_detach(device_t);
147
148 static void wb_bfree(struct mbuf *);
149 static int wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *,
150 struct mbuf *);
151 static int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *);
152
153 static void wb_rxeof(struct wb_softc *);
154 static void wb_rxeoc(struct wb_softc *);
155 static void wb_txeof(struct wb_softc *);
156 static void wb_txeoc(struct wb_softc *);
157 static void wb_intr(void *);
158 static void wb_tick(void *);
159 static void wb_start(struct ifnet *);
160 static void wb_start_locked(struct ifnet *);
161 static int wb_ioctl(struct ifnet *, u_long, caddr_t);
162 static void wb_init(void *);
163 static void wb_init_locked(struct wb_softc *);
164 static void wb_stop(struct wb_softc *);
165 static void wb_watchdog(struct wb_softc *);
166 static int wb_shutdown(device_t);
167 static int wb_ifmedia_upd(struct ifnet *);
168 static void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *);
169
170 static void wb_eeprom_putbyte(struct wb_softc *, int);
171 static void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *);
172 static void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int);
173
174 static void wb_setcfg(struct wb_softc *, u_int32_t);
175 static void wb_setmulti(struct wb_softc *);
176 static void wb_reset(struct wb_softc *);
177 static void wb_fixmedia(struct wb_softc *);
178 static int wb_list_rx_init(struct wb_softc *);
179 static int wb_list_tx_init(struct wb_softc *);
180
181 static int wb_miibus_readreg(device_t, int, int);
182 static int wb_miibus_writereg(device_t, int, int, int);
183 static void wb_miibus_statchg(device_t);
184
185 /*
186 * MII bit-bang glue
187 */
188 static uint32_t wb_mii_bitbang_read(device_t);
189 static void wb_mii_bitbang_write(device_t, uint32_t);
190
191 static const struct mii_bitbang_ops wb_mii_bitbang_ops = {
192 wb_mii_bitbang_read,
193 wb_mii_bitbang_write,
194 {
195 WB_SIO_MII_DATAOUT, /* MII_BIT_MDO */
196 WB_SIO_MII_DATAIN, /* MII_BIT_MDI */
197 WB_SIO_MII_CLK, /* MII_BIT_MDC */
198 WB_SIO_MII_DIR, /* MII_BIT_DIR_HOST_PHY */
199 0, /* MII_BIT_DIR_PHY_HOST */
200 }
201 };
202
203 #ifdef WB_USEIOSPACE
204 #define WB_RES SYS_RES_IOPORT
205 #define WB_RID WB_PCI_LOIO
206 #else
207 #define WB_RES SYS_RES_MEMORY
208 #define WB_RID WB_PCI_LOMEM
209 #endif
210
211 static device_method_t wb_methods[] = {
212 /* Device interface */
213 DEVMETHOD(device_probe, wb_probe),
214 DEVMETHOD(device_attach, wb_attach),
215 DEVMETHOD(device_detach, wb_detach),
216 DEVMETHOD(device_shutdown, wb_shutdown),
217
218 /* MII interface */
219 DEVMETHOD(miibus_readreg, wb_miibus_readreg),
220 DEVMETHOD(miibus_writereg, wb_miibus_writereg),
221 DEVMETHOD(miibus_statchg, wb_miibus_statchg),
222
223 DEVMETHOD_END
224 };
225
226 static driver_t wb_driver = {
227 "wb",
228 wb_methods,
229 sizeof(struct wb_softc)
230 };
231
232 static devclass_t wb_devclass;
233
234 DRIVER_MODULE(wb, pci, wb_driver, wb_devclass, 0, 0);
235 DRIVER_MODULE(miibus, wb, miibus_driver, miibus_devclass, 0, 0);
236
237 #define WB_SETBIT(sc, reg, x) \
238 CSR_WRITE_4(sc, reg, \
239 CSR_READ_4(sc, reg) | (x))
240
241 #define WB_CLRBIT(sc, reg, x) \
242 CSR_WRITE_4(sc, reg, \
243 CSR_READ_4(sc, reg) & ~(x))
244
245 #define SIO_SET(x) \
246 CSR_WRITE_4(sc, WB_SIO, \
247 CSR_READ_4(sc, WB_SIO) | (x))
248
249 #define SIO_CLR(x) \
250 CSR_WRITE_4(sc, WB_SIO, \
251 CSR_READ_4(sc, WB_SIO) & ~(x))
252
253 /*
254 * Send a read command and address to the EEPROM, check for ACK.
255 */
256 static void
257 wb_eeprom_putbyte(sc, addr)
258 struct wb_softc *sc;
259 int addr;
260 {
261 int d, i;
262
263 d = addr | WB_EECMD_READ;
264
265 /*
266 * Feed in each bit and stobe the clock.
267 */
268 for (i = 0x400; i; i >>= 1) {
269 if (d & i) {
270 SIO_SET(WB_SIO_EE_DATAIN);
271 } else {
272 SIO_CLR(WB_SIO_EE_DATAIN);
273 }
274 DELAY(100);
275 SIO_SET(WB_SIO_EE_CLK);
276 DELAY(150);
277 SIO_CLR(WB_SIO_EE_CLK);
278 DELAY(100);
279 }
280 }
281
282 /*
283 * Read a word of data stored in the EEPROM at address 'addr.'
284 */
285 static void
286 wb_eeprom_getword(sc, addr, dest)
287 struct wb_softc *sc;
288 int addr;
289 u_int16_t *dest;
290 {
291 int i;
292 u_int16_t word = 0;
293
294 /* Enter EEPROM access mode. */
295 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
296
297 /*
298 * Send address of word we want to read.
299 */
300 wb_eeprom_putbyte(sc, addr);
301
302 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
303
304 /*
305 * Start reading bits from EEPROM.
306 */
307 for (i = 0x8000; i; i >>= 1) {
308 SIO_SET(WB_SIO_EE_CLK);
309 DELAY(100);
310 if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT)
311 word |= i;
312 SIO_CLR(WB_SIO_EE_CLK);
313 DELAY(100);
314 }
315
316 /* Turn off EEPROM access mode. */
317 CSR_WRITE_4(sc, WB_SIO, 0);
318
319 *dest = word;
320 }
321
322 /*
323 * Read a sequence of words from the EEPROM.
324 */
325 static void
326 wb_read_eeprom(sc, dest, off, cnt, swap)
327 struct wb_softc *sc;
328 caddr_t dest;
329 int off;
330 int cnt;
331 int swap;
332 {
333 int i;
334 u_int16_t word = 0, *ptr;
335
336 for (i = 0; i < cnt; i++) {
337 wb_eeprom_getword(sc, off + i, &word);
338 ptr = (u_int16_t *)(dest + (i * 2));
339 if (swap)
340 *ptr = ntohs(word);
341 else
342 *ptr = word;
343 }
344 }
345
346 /*
347 * Read the MII serial port for the MII bit-bang module.
348 */
349 static uint32_t
350 wb_mii_bitbang_read(device_t dev)
351 {
352 struct wb_softc *sc;
353 uint32_t val;
354
355 sc = device_get_softc(dev);
356
357 val = CSR_READ_4(sc, WB_SIO);
358 CSR_BARRIER(sc, WB_SIO, 4,
359 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
360
361 return (val);
362 }
363
364 /*
365 * Write the MII serial port for the MII bit-bang module.
366 */
367 static void
368 wb_mii_bitbang_write(device_t dev, uint32_t val)
369 {
370 struct wb_softc *sc;
371
372 sc = device_get_softc(dev);
373
374 CSR_WRITE_4(sc, WB_SIO, val);
375 CSR_BARRIER(sc, WB_SIO, 4,
376 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
377 }
378
379 static int
380 wb_miibus_readreg(dev, phy, reg)
381 device_t dev;
382 int phy, reg;
383 {
384
385 return (mii_bitbang_readreg(dev, &wb_mii_bitbang_ops, phy, reg));
386 }
387
388 static int
389 wb_miibus_writereg(dev, phy, reg, data)
390 device_t dev;
391 int phy, reg, data;
392 {
393
394 mii_bitbang_writereg(dev, &wb_mii_bitbang_ops, phy, reg, data);
395
396 return(0);
397 }
398
399 static void
400 wb_miibus_statchg(dev)
401 device_t dev;
402 {
403 struct wb_softc *sc;
404 struct mii_data *mii;
405
406 sc = device_get_softc(dev);
407 mii = device_get_softc(sc->wb_miibus);
408 wb_setcfg(sc, mii->mii_media_active);
409 }
410
411 /*
412 * Program the 64-bit multicast hash filter.
413 */
414 static void
415 wb_setmulti(sc)
416 struct wb_softc *sc;
417 {
418 struct ifnet *ifp;
419 int h = 0;
420 u_int32_t hashes[2] = { 0, 0 };
421 struct ifmultiaddr *ifma;
422 u_int32_t rxfilt;
423 int mcnt = 0;
424
425 ifp = sc->wb_ifp;
426
427 rxfilt = CSR_READ_4(sc, WB_NETCFG);
428
429 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
430 rxfilt |= WB_NETCFG_RX_MULTI;
431 CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
432 CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF);
433 CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF);
434 return;
435 }
436
437 /* first, zot all the existing hash bits */
438 CSR_WRITE_4(sc, WB_MAR0, 0);
439 CSR_WRITE_4(sc, WB_MAR1, 0);
440
441 /* now program new ones */
442 if_maddr_rlock(ifp);
443 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
444 if (ifma->ifma_addr->sa_family != AF_LINK)
445 continue;
446 h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *)
447 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
448 if (h < 32)
449 hashes[0] |= (1 << h);
450 else
451 hashes[1] |= (1 << (h - 32));
452 mcnt++;
453 }
454 if_maddr_runlock(ifp);
455
456 if (mcnt)
457 rxfilt |= WB_NETCFG_RX_MULTI;
458 else
459 rxfilt &= ~WB_NETCFG_RX_MULTI;
460
461 CSR_WRITE_4(sc, WB_MAR0, hashes[0]);
462 CSR_WRITE_4(sc, WB_MAR1, hashes[1]);
463 CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
464 }
465
466 /*
467 * The Winbond manual states that in order to fiddle with the
468 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
469 * first have to put the transmit and/or receive logic in the idle state.
470 */
471 static void
472 wb_setcfg(sc, media)
473 struct wb_softc *sc;
474 u_int32_t media;
475 {
476 int i, restart = 0;
477
478 if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) {
479 restart = 1;
480 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON));
481
482 for (i = 0; i < WB_TIMEOUT; i++) {
483 DELAY(10);
484 if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) &&
485 (CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE))
486 break;
487 }
488
489 if (i == WB_TIMEOUT)
490 device_printf(sc->wb_dev,
491 "failed to force tx and rx to idle state\n");
492 }
493
494 if (IFM_SUBTYPE(media) == IFM_10_T)
495 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
496 else
497 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
498
499 if ((media & IFM_GMASK) == IFM_FDX)
500 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
501 else
502 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
503
504 if (restart)
505 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON);
506 }
507
508 static void
509 wb_reset(sc)
510 struct wb_softc *sc;
511 {
512 int i;
513 struct mii_data *mii;
514 struct mii_softc *miisc;
515
516 CSR_WRITE_4(sc, WB_NETCFG, 0);
517 CSR_WRITE_4(sc, WB_BUSCTL, 0);
518 CSR_WRITE_4(sc, WB_TXADDR, 0);
519 CSR_WRITE_4(sc, WB_RXADDR, 0);
520
521 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
522 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
523
524 for (i = 0; i < WB_TIMEOUT; i++) {
525 DELAY(10);
526 if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET))
527 break;
528 }
529 if (i == WB_TIMEOUT)
530 device_printf(sc->wb_dev, "reset never completed!\n");
531
532 /* Wait a little while for the chip to get its brains in order. */
533 DELAY(1000);
534
535 if (sc->wb_miibus == NULL)
536 return;
537
538 mii = device_get_softc(sc->wb_miibus);
539 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
540 PHY_RESET(miisc);
541 }
542
543 static void
544 wb_fixmedia(sc)
545 struct wb_softc *sc;
546 {
547 struct mii_data *mii = NULL;
548 struct ifnet *ifp;
549 u_int32_t media;
550
551 mii = device_get_softc(sc->wb_miibus);
552 ifp = sc->wb_ifp;
553
554 mii_pollstat(mii);
555 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) {
556 media = mii->mii_media_active & ~IFM_10_T;
557 media |= IFM_100_TX;
558 } else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
559 media = mii->mii_media_active & ~IFM_100_TX;
560 media |= IFM_10_T;
561 } else
562 return;
563
564 ifmedia_set(&mii->mii_media, media);
565 }
566
567 /*
568 * Probe for a Winbond chip. Check the PCI vendor and device
569 * IDs against our list and return a device name if we find a match.
570 */
571 static int
572 wb_probe(dev)
573 device_t dev;
574 {
575 const struct wb_type *t;
576
577 t = wb_devs;
578
579 while(t->wb_name != NULL) {
580 if ((pci_get_vendor(dev) == t->wb_vid) &&
581 (pci_get_device(dev) == t->wb_did)) {
582 device_set_desc(dev, t->wb_name);
583 return (BUS_PROBE_DEFAULT);
584 }
585 t++;
586 }
587
588 return(ENXIO);
589 }
590
591 /*
592 * Attach the interface. Allocate softc structures, do ifmedia
593 * setup and ethernet/BPF attach.
594 */
595 static int
596 wb_attach(dev)
597 device_t dev;
598 {
599 u_char eaddr[ETHER_ADDR_LEN];
600 struct wb_softc *sc;
601 struct ifnet *ifp;
602 int error = 0, rid;
603
604 sc = device_get_softc(dev);
605 sc->wb_dev = dev;
606
607 mtx_init(&sc->wb_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
608 MTX_DEF);
609 callout_init_mtx(&sc->wb_stat_callout, &sc->wb_mtx, 0);
610
611 /*
612 * Map control/status registers.
613 */
614 pci_enable_busmaster(dev);
615
616 rid = WB_RID;
617 sc->wb_res = bus_alloc_resource_any(dev, WB_RES, &rid, RF_ACTIVE);
618
619 if (sc->wb_res == NULL) {
620 device_printf(dev, "couldn't map ports/memory\n");
621 error = ENXIO;
622 goto fail;
623 }
624
625 /* Allocate interrupt */
626 rid = 0;
627 sc->wb_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
628 RF_SHAREABLE | RF_ACTIVE);
629
630 if (sc->wb_irq == NULL) {
631 device_printf(dev, "couldn't map interrupt\n");
632 error = ENXIO;
633 goto fail;
634 }
635
636 /* Save the cache line size. */
637 sc->wb_cachesize = pci_read_config(dev, WB_PCI_CACHELEN, 4) & 0xFF;
638
639 /* Reset the adapter. */
640 wb_reset(sc);
641
642 /*
643 * Get station address from the EEPROM.
644 */
645 wb_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 0);
646
647 sc->wb_ldata = contigmalloc(sizeof(struct wb_list_data) + 8, M_DEVBUF,
648 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
649
650 if (sc->wb_ldata == NULL) {
651 device_printf(dev, "no memory for list buffers!\n");
652 error = ENXIO;
653 goto fail;
654 }
655
656 bzero(sc->wb_ldata, sizeof(struct wb_list_data));
657
658 ifp = sc->wb_ifp = if_alloc(IFT_ETHER);
659 if (ifp == NULL) {
660 device_printf(dev, "can not if_alloc()\n");
661 error = ENOSPC;
662 goto fail;
663 }
664 ifp->if_softc = sc;
665 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
666 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
667 ifp->if_ioctl = wb_ioctl;
668 ifp->if_start = wb_start;
669 ifp->if_init = wb_init;
670 ifp->if_snd.ifq_maxlen = WB_TX_LIST_CNT - 1;
671
672 /*
673 * Do MII setup.
674 */
675 error = mii_attach(dev, &sc->wb_miibus, ifp, wb_ifmedia_upd,
676 wb_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
677 if (error != 0) {
678 device_printf(dev, "attaching PHYs failed\n");
679 goto fail;
680 }
681
682 /*
683 * Call MI attach routine.
684 */
685 ether_ifattach(ifp, eaddr);
686
687 /* Hook interrupt last to avoid having to lock softc */
688 error = bus_setup_intr(dev, sc->wb_irq, INTR_TYPE_NET | INTR_MPSAFE,
689 NULL, wb_intr, sc, &sc->wb_intrhand);
690
691 if (error) {
692 device_printf(dev, "couldn't set up irq\n");
693 ether_ifdetach(ifp);
694 goto fail;
695 }
696
697 gone_by_fcp101_dev(dev);
698
699 fail:
700 if (error)
701 wb_detach(dev);
702
703 return(error);
704 }
705
706 /*
707 * Shutdown hardware and free up resources. This can be called any
708 * time after the mutex has been initialized. It is called in both
709 * the error case in attach and the normal detach case so it needs
710 * to be careful about only freeing resources that have actually been
711 * allocated.
712 */
713 static int
714 wb_detach(dev)
715 device_t dev;
716 {
717 struct wb_softc *sc;
718 struct ifnet *ifp;
719
720 sc = device_get_softc(dev);
721 KASSERT(mtx_initialized(&sc->wb_mtx), ("wb mutex not initialized"));
722 ifp = sc->wb_ifp;
723
724 /*
725 * Delete any miibus and phy devices attached to this interface.
726 * This should only be done if attach succeeded.
727 */
728 if (device_is_attached(dev)) {
729 ether_ifdetach(ifp);
730 WB_LOCK(sc);
731 wb_stop(sc);
732 WB_UNLOCK(sc);
733 callout_drain(&sc->wb_stat_callout);
734 }
735 if (sc->wb_miibus)
736 device_delete_child(dev, sc->wb_miibus);
737 bus_generic_detach(dev);
738
739 if (sc->wb_intrhand)
740 bus_teardown_intr(dev, sc->wb_irq, sc->wb_intrhand);
741 if (sc->wb_irq)
742 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->wb_irq);
743 if (sc->wb_res)
744 bus_release_resource(dev, WB_RES, WB_RID, sc->wb_res);
745
746 if (ifp)
747 if_free(ifp);
748
749 if (sc->wb_ldata) {
750 contigfree(sc->wb_ldata, sizeof(struct wb_list_data) + 8,
751 M_DEVBUF);
752 }
753
754 mtx_destroy(&sc->wb_mtx);
755
756 return(0);
757 }
758
759 /*
760 * Initialize the transmit descriptors.
761 */
762 static int
763 wb_list_tx_init(sc)
764 struct wb_softc *sc;
765 {
766 struct wb_chain_data *cd;
767 struct wb_list_data *ld;
768 int i;
769
770 cd = &sc->wb_cdata;
771 ld = sc->wb_ldata;
772
773 for (i = 0; i < WB_TX_LIST_CNT; i++) {
774 cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i];
775 if (i == (WB_TX_LIST_CNT - 1)) {
776 cd->wb_tx_chain[i].wb_nextdesc =
777 &cd->wb_tx_chain[0];
778 } else {
779 cd->wb_tx_chain[i].wb_nextdesc =
780 &cd->wb_tx_chain[i + 1];
781 }
782 }
783
784 cd->wb_tx_free = &cd->wb_tx_chain[0];
785 cd->wb_tx_tail = cd->wb_tx_head = NULL;
786
787 return(0);
788 }
789
790
791 /*
792 * Initialize the RX descriptors and allocate mbufs for them. Note that
793 * we arrange the descriptors in a closed ring, so that the last descriptor
794 * points back to the first.
795 */
796 static int
797 wb_list_rx_init(sc)
798 struct wb_softc *sc;
799 {
800 struct wb_chain_data *cd;
801 struct wb_list_data *ld;
802 int i;
803
804 cd = &sc->wb_cdata;
805 ld = sc->wb_ldata;
806
807 for (i = 0; i < WB_RX_LIST_CNT; i++) {
808 cd->wb_rx_chain[i].wb_ptr =
809 (struct wb_desc *)&ld->wb_rx_list[i];
810 cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i];
811 if (wb_newbuf(sc, &cd->wb_rx_chain[i], NULL) == ENOBUFS)
812 return(ENOBUFS);
813 if (i == (WB_RX_LIST_CNT - 1)) {
814 cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0];
815 ld->wb_rx_list[i].wb_next =
816 vtophys(&ld->wb_rx_list[0]);
817 } else {
818 cd->wb_rx_chain[i].wb_nextdesc =
819 &cd->wb_rx_chain[i + 1];
820 ld->wb_rx_list[i].wb_next =
821 vtophys(&ld->wb_rx_list[i + 1]);
822 }
823 }
824
825 cd->wb_rx_head = &cd->wb_rx_chain[0];
826
827 return(0);
828 }
829
830 static void
831 wb_bfree(struct mbuf *m)
832 {
833 }
834
835 /*
836 * Initialize an RX descriptor and attach an MBUF cluster.
837 */
838 static int
839 wb_newbuf(sc, c, m)
840 struct wb_softc *sc;
841 struct wb_chain_onefrag *c;
842 struct mbuf *m;
843 {
844 struct mbuf *m_new = NULL;
845
846 if (m == NULL) {
847 MGETHDR(m_new, M_NOWAIT, MT_DATA);
848 if (m_new == NULL)
849 return(ENOBUFS);
850 m_new->m_pkthdr.len = m_new->m_len = WB_BUFBYTES;
851 m_extadd(m_new, c->wb_buf, WB_BUFBYTES, wb_bfree, NULL, NULL,
852 0, EXT_NET_DRV);
853 } else {
854 m_new = m;
855 m_new->m_len = m_new->m_pkthdr.len = WB_BUFBYTES;
856 m_new->m_data = m_new->m_ext.ext_buf;
857 }
858
859 m_adj(m_new, sizeof(u_int64_t));
860
861 c->wb_mbuf = m_new;
862 c->wb_ptr->wb_data = vtophys(mtod(m_new, caddr_t));
863 c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | 1536;
864 c->wb_ptr->wb_status = WB_RXSTAT;
865
866 return(0);
867 }
868
869 /*
870 * A frame has been uploaded: pass the resulting mbuf chain up to
871 * the higher level protocols.
872 */
873 static void
874 wb_rxeof(sc)
875 struct wb_softc *sc;
876 {
877 struct mbuf *m = NULL;
878 struct ifnet *ifp;
879 struct wb_chain_onefrag *cur_rx;
880 int total_len = 0;
881 u_int32_t rxstat;
882
883 WB_LOCK_ASSERT(sc);
884
885 ifp = sc->wb_ifp;
886
887 while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) &
888 WB_RXSTAT_OWN)) {
889 struct mbuf *m0 = NULL;
890
891 cur_rx = sc->wb_cdata.wb_rx_head;
892 sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc;
893
894 m = cur_rx->wb_mbuf;
895
896 if ((rxstat & WB_RXSTAT_MIIERR) ||
897 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) ||
898 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > 1536) ||
899 !(rxstat & WB_RXSTAT_LASTFRAG) ||
900 !(rxstat & WB_RXSTAT_RXCMP)) {
901 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
902 wb_newbuf(sc, cur_rx, m);
903 device_printf(sc->wb_dev,
904 "receiver babbling: possible chip bug,"
905 " forcing reset\n");
906 wb_fixmedia(sc);
907 wb_reset(sc);
908 wb_init_locked(sc);
909 return;
910 }
911
912 if (rxstat & WB_RXSTAT_RXERR) {
913 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
914 wb_newbuf(sc, cur_rx, m);
915 break;
916 }
917
918 /* No errors; receive the packet. */
919 total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status);
920
921 /*
922 * XXX The Winbond chip includes the CRC with every
923 * received frame, and there's no way to turn this
924 * behavior off (at least, I can't find anything in
925 * the manual that explains how to do it) so we have
926 * to trim off the CRC manually.
927 */
928 total_len -= ETHER_CRC_LEN;
929
930 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp,
931 NULL);
932 wb_newbuf(sc, cur_rx, m);
933 if (m0 == NULL) {
934 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
935 break;
936 }
937 m = m0;
938
939 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
940 WB_UNLOCK(sc);
941 (*ifp->if_input)(ifp, m);
942 WB_LOCK(sc);
943 }
944 }
945
946 static void
947 wb_rxeoc(sc)
948 struct wb_softc *sc;
949 {
950 wb_rxeof(sc);
951
952 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
953 CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0]));
954 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
955 if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND)
956 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
957 }
958
959 /*
960 * A frame was downloaded to the chip. It's safe for us to clean up
961 * the list buffers.
962 */
963 static void
964 wb_txeof(sc)
965 struct wb_softc *sc;
966 {
967 struct wb_chain *cur_tx;
968 struct ifnet *ifp;
969
970 ifp = sc->wb_ifp;
971
972 /* Clear the timeout timer. */
973 sc->wb_timer = 0;
974
975 if (sc->wb_cdata.wb_tx_head == NULL)
976 return;
977
978 /*
979 * Go through our tx list and free mbufs for those
980 * frames that have been transmitted.
981 */
982 while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) {
983 u_int32_t txstat;
984
985 cur_tx = sc->wb_cdata.wb_tx_head;
986 txstat = WB_TXSTATUS(cur_tx);
987
988 if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT)
989 break;
990
991 if (txstat & WB_TXSTAT_TXERR) {
992 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
993 if (txstat & WB_TXSTAT_ABORT)
994 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
995 if (txstat & WB_TXSTAT_LATECOLL)
996 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
997 }
998
999 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & WB_TXSTAT_COLLCNT) >> 3);
1000
1001 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1002 m_freem(cur_tx->wb_mbuf);
1003 cur_tx->wb_mbuf = NULL;
1004
1005 if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) {
1006 sc->wb_cdata.wb_tx_head = NULL;
1007 sc->wb_cdata.wb_tx_tail = NULL;
1008 break;
1009 }
1010
1011 sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc;
1012 }
1013 }
1014
1015 /*
1016 * TX 'end of channel' interrupt handler.
1017 */
1018 static void
1019 wb_txeoc(sc)
1020 struct wb_softc *sc;
1021 {
1022 struct ifnet *ifp;
1023
1024 ifp = sc->wb_ifp;
1025
1026 sc->wb_timer = 0;
1027
1028 if (sc->wb_cdata.wb_tx_head == NULL) {
1029 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1030 sc->wb_cdata.wb_tx_tail = NULL;
1031 } else {
1032 if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) {
1033 WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN;
1034 sc->wb_timer = 5;
1035 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1036 }
1037 }
1038 }
1039
1040 static void
1041 wb_intr(arg)
1042 void *arg;
1043 {
1044 struct wb_softc *sc;
1045 struct ifnet *ifp;
1046 u_int32_t status;
1047
1048 sc = arg;
1049 WB_LOCK(sc);
1050 ifp = sc->wb_ifp;
1051
1052 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1053 WB_UNLOCK(sc);
1054 return;
1055 }
1056
1057 /* Disable interrupts. */
1058 CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1059
1060 for (;;) {
1061
1062 status = CSR_READ_4(sc, WB_ISR);
1063 if (status)
1064 CSR_WRITE_4(sc, WB_ISR, status);
1065
1066 if ((status & WB_INTRS) == 0)
1067 break;
1068
1069 if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) {
1070 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1071 wb_reset(sc);
1072 if (status & WB_ISR_RX_ERR)
1073 wb_fixmedia(sc);
1074 wb_init_locked(sc);
1075 continue;
1076 }
1077
1078 if (status & WB_ISR_RX_OK)
1079 wb_rxeof(sc);
1080
1081 if (status & WB_ISR_RX_IDLE)
1082 wb_rxeoc(sc);
1083
1084 if (status & WB_ISR_TX_OK)
1085 wb_txeof(sc);
1086
1087 if (status & WB_ISR_TX_NOBUF)
1088 wb_txeoc(sc);
1089
1090 if (status & WB_ISR_TX_IDLE) {
1091 wb_txeof(sc);
1092 if (sc->wb_cdata.wb_tx_head != NULL) {
1093 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1094 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1095 }
1096 }
1097
1098 if (status & WB_ISR_TX_UNDERRUN) {
1099 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1100 wb_txeof(sc);
1101 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1102 /* Jack up TX threshold */
1103 sc->wb_txthresh += WB_TXTHRESH_CHUNK;
1104 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1105 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1106 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1107 }
1108
1109 if (status & WB_ISR_BUS_ERR) {
1110 wb_reset(sc);
1111 wb_init_locked(sc);
1112 }
1113
1114 }
1115
1116 /* Re-enable interrupts. */
1117 CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1118
1119 if (ifp->if_snd.ifq_head != NULL) {
1120 wb_start_locked(ifp);
1121 }
1122
1123 WB_UNLOCK(sc);
1124 }
1125
1126 static void
1127 wb_tick(xsc)
1128 void *xsc;
1129 {
1130 struct wb_softc *sc;
1131 struct mii_data *mii;
1132
1133 sc = xsc;
1134 WB_LOCK_ASSERT(sc);
1135 mii = device_get_softc(sc->wb_miibus);
1136
1137 mii_tick(mii);
1138
1139 if (sc->wb_timer > 0 && --sc->wb_timer == 0)
1140 wb_watchdog(sc);
1141 callout_reset(&sc->wb_stat_callout, hz, wb_tick, sc);
1142 }
1143
1144 /*
1145 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1146 * pointers to the fragment pointers.
1147 */
1148 static int
1149 wb_encap(sc, c, m_head)
1150 struct wb_softc *sc;
1151 struct wb_chain *c;
1152 struct mbuf *m_head;
1153 {
1154 int frag = 0;
1155 struct wb_desc *f = NULL;
1156 int total_len;
1157 struct mbuf *m;
1158
1159 /*
1160 * Start packing the mbufs in this chain into
1161 * the fragment pointers. Stop when we run out
1162 * of fragments or hit the end of the mbuf chain.
1163 */
1164 m = m_head;
1165 total_len = 0;
1166
1167 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1168 if (m->m_len != 0) {
1169 if (frag == WB_MAXFRAGS)
1170 break;
1171 total_len += m->m_len;
1172 f = &c->wb_ptr->wb_frag[frag];
1173 f->wb_ctl = WB_TXCTL_TLINK | m->m_len;
1174 if (frag == 0) {
1175 f->wb_ctl |= WB_TXCTL_FIRSTFRAG;
1176 f->wb_status = 0;
1177 } else
1178 f->wb_status = WB_TXSTAT_OWN;
1179 f->wb_next = vtophys(&c->wb_ptr->wb_frag[frag + 1]);
1180 f->wb_data = vtophys(mtod(m, vm_offset_t));
1181 frag++;
1182 }
1183 }
1184
1185 /*
1186 * Handle special case: we used up all 16 fragments,
1187 * but we have more mbufs left in the chain. Copy the
1188 * data into an mbuf cluster. Note that we don't
1189 * bother clearing the values in the other fragment
1190 * pointers/counters; it wouldn't gain us anything,
1191 * and would waste cycles.
1192 */
1193 if (m != NULL) {
1194 struct mbuf *m_new = NULL;
1195
1196 MGETHDR(m_new, M_NOWAIT, MT_DATA);
1197 if (m_new == NULL)
1198 return(1);
1199 if (m_head->m_pkthdr.len > MHLEN) {
1200 if (!(MCLGET(m_new, M_NOWAIT))) {
1201 m_freem(m_new);
1202 return(1);
1203 }
1204 }
1205 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1206 mtod(m_new, caddr_t));
1207 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1208 m_freem(m_head);
1209 m_head = m_new;
1210 f = &c->wb_ptr->wb_frag[0];
1211 f->wb_status = 0;
1212 f->wb_data = vtophys(mtod(m_new, caddr_t));
1213 f->wb_ctl = total_len = m_new->m_len;
1214 f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG;
1215 frag = 1;
1216 }
1217
1218 if (total_len < WB_MIN_FRAMELEN) {
1219 f = &c->wb_ptr->wb_frag[frag];
1220 f->wb_ctl = WB_MIN_FRAMELEN - total_len;
1221 f->wb_data = vtophys(&sc->wb_cdata.wb_pad);
1222 f->wb_ctl |= WB_TXCTL_TLINK;
1223 f->wb_status = WB_TXSTAT_OWN;
1224 frag++;
1225 }
1226
1227 c->wb_mbuf = m_head;
1228 c->wb_lastdesc = frag - 1;
1229 WB_TXCTL(c) |= WB_TXCTL_LASTFRAG;
1230 WB_TXNEXT(c) = vtophys(&c->wb_nextdesc->wb_ptr->wb_frag[0]);
1231
1232 return(0);
1233 }
1234
1235 /*
1236 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1237 * to the mbuf data regions directly in the transmit lists. We also save a
1238 * copy of the pointers since the transmit list fragment pointers are
1239 * physical addresses.
1240 */
1241
1242 static void
1243 wb_start(ifp)
1244 struct ifnet *ifp;
1245 {
1246 struct wb_softc *sc;
1247
1248 sc = ifp->if_softc;
1249 WB_LOCK(sc);
1250 wb_start_locked(ifp);
1251 WB_UNLOCK(sc);
1252 }
1253
1254 static void
1255 wb_start_locked(ifp)
1256 struct ifnet *ifp;
1257 {
1258 struct wb_softc *sc;
1259 struct mbuf *m_head = NULL;
1260 struct wb_chain *cur_tx = NULL, *start_tx;
1261
1262 sc = ifp->if_softc;
1263 WB_LOCK_ASSERT(sc);
1264
1265 /*
1266 * Check for an available queue slot. If there are none,
1267 * punt.
1268 */
1269 if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) {
1270 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1271 return;
1272 }
1273
1274 start_tx = sc->wb_cdata.wb_tx_free;
1275
1276 while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) {
1277 IF_DEQUEUE(&ifp->if_snd, m_head);
1278 if (m_head == NULL)
1279 break;
1280
1281 /* Pick a descriptor off the free list. */
1282 cur_tx = sc->wb_cdata.wb_tx_free;
1283 sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc;
1284
1285 /* Pack the data into the descriptor. */
1286 wb_encap(sc, cur_tx, m_head);
1287
1288 if (cur_tx != start_tx)
1289 WB_TXOWN(cur_tx) = WB_TXSTAT_OWN;
1290
1291 /*
1292 * If there's a BPF listener, bounce a copy of this frame
1293 * to him.
1294 */
1295 BPF_MTAP(ifp, cur_tx->wb_mbuf);
1296 }
1297
1298 /*
1299 * If there are no packets queued, bail.
1300 */
1301 if (cur_tx == NULL)
1302 return;
1303
1304 /*
1305 * Place the request for the upload interrupt
1306 * in the last descriptor in the chain. This way, if
1307 * we're chaining several packets at once, we'll only
1308 * get an interrupt once for the whole chain rather than
1309 * once for each packet.
1310 */
1311 WB_TXCTL(cur_tx) |= WB_TXCTL_FINT;
1312 cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT;
1313 sc->wb_cdata.wb_tx_tail = cur_tx;
1314
1315 if (sc->wb_cdata.wb_tx_head == NULL) {
1316 sc->wb_cdata.wb_tx_head = start_tx;
1317 WB_TXOWN(start_tx) = WB_TXSTAT_OWN;
1318 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1319 } else {
1320 /*
1321 * We need to distinguish between the case where
1322 * the own bit is clear because the chip cleared it
1323 * and where the own bit is clear because we haven't
1324 * set it yet. The magic value WB_UNSET is just some
1325 * ramdomly chosen number which doesn't have the own
1326 * bit set. When we actually transmit the frame, the
1327 * status word will have _only_ the own bit set, so
1328 * the txeoc handler will be able to tell if it needs
1329 * to initiate another transmission to flush out pending
1330 * frames.
1331 */
1332 WB_TXOWN(start_tx) = WB_UNSENT;
1333 }
1334
1335 /*
1336 * Set a timeout in case the chip goes out to lunch.
1337 */
1338 sc->wb_timer = 5;
1339 }
1340
1341 static void
1342 wb_init(xsc)
1343 void *xsc;
1344 {
1345 struct wb_softc *sc = xsc;
1346
1347 WB_LOCK(sc);
1348 wb_init_locked(sc);
1349 WB_UNLOCK(sc);
1350 }
1351
1352 static void
1353 wb_init_locked(sc)
1354 struct wb_softc *sc;
1355 {
1356 struct ifnet *ifp = sc->wb_ifp;
1357 int i;
1358 struct mii_data *mii;
1359
1360 WB_LOCK_ASSERT(sc);
1361 mii = device_get_softc(sc->wb_miibus);
1362
1363 /*
1364 * Cancel pending I/O and free all RX/TX buffers.
1365 */
1366 wb_stop(sc);
1367 wb_reset(sc);
1368
1369 sc->wb_txthresh = WB_TXTHRESH_INIT;
1370
1371 /*
1372 * Set cache alignment and burst length.
1373 */
1374 #ifdef foo
1375 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG);
1376 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1377 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1378 #endif
1379
1380 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION);
1381 WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG);
1382 switch(sc->wb_cachesize) {
1383 case 32:
1384 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG);
1385 break;
1386 case 16:
1387 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG);
1388 break;
1389 case 8:
1390 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG);
1391 break;
1392 case 0:
1393 default:
1394 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE);
1395 break;
1396 }
1397
1398 /* This doesn't tend to work too well at 100Mbps. */
1399 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON);
1400
1401 /* Init our MAC address */
1402 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1403 CSR_WRITE_1(sc, WB_NODE0 + i, IF_LLADDR(sc->wb_ifp)[i]);
1404 }
1405
1406 /* Init circular RX list. */
1407 if (wb_list_rx_init(sc) == ENOBUFS) {
1408 device_printf(sc->wb_dev,
1409 "initialization failed: no memory for rx buffers\n");
1410 wb_stop(sc);
1411 return;
1412 }
1413
1414 /* Init TX descriptors. */
1415 wb_list_tx_init(sc);
1416
1417 /* If we want promiscuous mode, set the allframes bit. */
1418 if (ifp->if_flags & IFF_PROMISC) {
1419 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1420 } else {
1421 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1422 }
1423
1424 /*
1425 * Set capture broadcast bit to capture broadcast frames.
1426 */
1427 if (ifp->if_flags & IFF_BROADCAST) {
1428 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1429 } else {
1430 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1431 }
1432
1433 /*
1434 * Program the multicast filter, if necessary.
1435 */
1436 wb_setmulti(sc);
1437
1438 /*
1439 * Load the address of the RX list.
1440 */
1441 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1442 CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0]));
1443
1444 /*
1445 * Enable interrupts.
1446 */
1447 CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1448 CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF);
1449
1450 /* Enable receiver and transmitter. */
1451 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1452 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
1453
1454 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1455 CSR_WRITE_4(sc, WB_TXADDR, vtophys(&sc->wb_ldata->wb_tx_list[0]));
1456 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1457
1458 mii_mediachg(mii);
1459
1460 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1461 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1462
1463 callout_reset(&sc->wb_stat_callout, hz, wb_tick, sc);
1464 }
1465
1466 /*
1467 * Set media options.
1468 */
1469 static int
1470 wb_ifmedia_upd(ifp)
1471 struct ifnet *ifp;
1472 {
1473 struct wb_softc *sc;
1474
1475 sc = ifp->if_softc;
1476
1477 WB_LOCK(sc);
1478 if (ifp->if_flags & IFF_UP)
1479 wb_init_locked(sc);
1480 WB_UNLOCK(sc);
1481
1482 return(0);
1483 }
1484
1485 /*
1486 * Report current media status.
1487 */
1488 static void
1489 wb_ifmedia_sts(ifp, ifmr)
1490 struct ifnet *ifp;
1491 struct ifmediareq *ifmr;
1492 {
1493 struct wb_softc *sc;
1494 struct mii_data *mii;
1495
1496 sc = ifp->if_softc;
1497
1498 WB_LOCK(sc);
1499 mii = device_get_softc(sc->wb_miibus);
1500
1501 mii_pollstat(mii);
1502 ifmr->ifm_active = mii->mii_media_active;
1503 ifmr->ifm_status = mii->mii_media_status;
1504 WB_UNLOCK(sc);
1505 }
1506
1507 static int
1508 wb_ioctl(ifp, command, data)
1509 struct ifnet *ifp;
1510 u_long command;
1511 caddr_t data;
1512 {
1513 struct wb_softc *sc = ifp->if_softc;
1514 struct mii_data *mii;
1515 struct ifreq *ifr = (struct ifreq *) data;
1516 int error = 0;
1517
1518 switch(command) {
1519 case SIOCSIFFLAGS:
1520 WB_LOCK(sc);
1521 if (ifp->if_flags & IFF_UP) {
1522 wb_init_locked(sc);
1523 } else {
1524 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1525 wb_stop(sc);
1526 }
1527 WB_UNLOCK(sc);
1528 error = 0;
1529 break;
1530 case SIOCADDMULTI:
1531 case SIOCDELMULTI:
1532 WB_LOCK(sc);
1533 wb_setmulti(sc);
1534 WB_UNLOCK(sc);
1535 error = 0;
1536 break;
1537 case SIOCGIFMEDIA:
1538 case SIOCSIFMEDIA:
1539 mii = device_get_softc(sc->wb_miibus);
1540 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1541 break;
1542 default:
1543 error = ether_ioctl(ifp, command, data);
1544 break;
1545 }
1546
1547 return(error);
1548 }
1549
1550 static void
1551 wb_watchdog(sc)
1552 struct wb_softc *sc;
1553 {
1554 struct ifnet *ifp;
1555
1556 WB_LOCK_ASSERT(sc);
1557 ifp = sc->wb_ifp;
1558 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1559 if_printf(ifp, "watchdog timeout\n");
1560 #ifdef foo
1561 if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1562 if_printf(ifp, "no carrier - transceiver cable problem?\n");
1563 #endif
1564 wb_stop(sc);
1565 wb_reset(sc);
1566 wb_init_locked(sc);
1567
1568 if (ifp->if_snd.ifq_head != NULL)
1569 wb_start_locked(ifp);
1570 }
1571
1572 /*
1573 * Stop the adapter and free any mbufs allocated to the
1574 * RX and TX lists.
1575 */
1576 static void
1577 wb_stop(sc)
1578 struct wb_softc *sc;
1579 {
1580 int i;
1581 struct ifnet *ifp;
1582
1583 WB_LOCK_ASSERT(sc);
1584 ifp = sc->wb_ifp;
1585 sc->wb_timer = 0;
1586
1587 callout_stop(&sc->wb_stat_callout);
1588
1589 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON));
1590 CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1591 CSR_WRITE_4(sc, WB_TXADDR, 0x00000000);
1592 CSR_WRITE_4(sc, WB_RXADDR, 0x00000000);
1593
1594 /*
1595 * Free data in the RX lists.
1596 */
1597 for (i = 0; i < WB_RX_LIST_CNT; i++) {
1598 if (sc->wb_cdata.wb_rx_chain[i].wb_mbuf != NULL) {
1599 m_freem(sc->wb_cdata.wb_rx_chain[i].wb_mbuf);
1600 sc->wb_cdata.wb_rx_chain[i].wb_mbuf = NULL;
1601 }
1602 }
1603 bzero((char *)&sc->wb_ldata->wb_rx_list,
1604 sizeof(sc->wb_ldata->wb_rx_list));
1605
1606 /*
1607 * Free the TX list buffers.
1608 */
1609 for (i = 0; i < WB_TX_LIST_CNT; i++) {
1610 if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) {
1611 m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf);
1612 sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL;
1613 }
1614 }
1615
1616 bzero((char *)&sc->wb_ldata->wb_tx_list,
1617 sizeof(sc->wb_ldata->wb_tx_list));
1618
1619 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1620 }
1621
1622 /*
1623 * Stop all chip I/O so that the kernel's probe routines don't
1624 * get confused by errant DMAs when rebooting.
1625 */
1626 static int
1627 wb_shutdown(dev)
1628 device_t dev;
1629 {
1630 struct wb_softc *sc;
1631
1632 sc = device_get_softc(dev);
1633
1634 WB_LOCK(sc);
1635 wb_stop(sc);
1636 WB_UNLOCK(sc);
1637
1638 return (0);
1639 }
Cache object: ad5d6a9fa58954bba1005e0c7b6c6827
|