FreeBSD/Linux Kernel Cross Reference
sys/pci/if_vr.c
1 /*
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD: releng/5.0/sys/pci/if_vr.c 107220 2002-11-25 05:15:27Z silby $
33 */
34
35 /*
36 * VIA Rhine fast ethernet PCI NIC driver
37 *
38 * Supports various network adapters based on the VIA Rhine
39 * and Rhine II PCI controllers, including the D-Link DFE530TX.
40 * Datasheets are available at http://www.via.com.tw.
41 *
42 * Written by Bill Paul <wpaul@ctr.columbia.edu>
43 * Electrical Engineering Department
44 * Columbia University, New York City
45 */
46
47 /*
48 * The VIA Rhine controllers are similar in some respects to the
49 * the DEC tulip chips, except less complicated. The controller
50 * uses an MII bus and an external physical layer interface. The
51 * receiver has a one entry perfect filter and a 64-bit hash table
52 * multicast filter. Transmit and receive descriptors are similar
53 * to the tulip.
54 *
55 * The Rhine has a serious flaw in its transmit DMA mechanism:
56 * transmit buffers must be longword aligned. Unfortunately,
57 * FreeBSD doesn't guarantee that mbufs will be filled in starting
58 * at longword boundaries, so we have to do a buffer copy before
59 * transmission.
60 */
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/sockio.h>
65 #include <sys/mbuf.h>
66 #include <sys/malloc.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
69
70 #include <net/if.h>
71 #include <net/if_arp.h>
72 #include <net/ethernet.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75
76 #include <net/bpf.h>
77
78 #include <vm/vm.h> /* for vtophys */
79 #include <vm/pmap.h> /* for vtophys */
80 #include <machine/bus_pio.h>
81 #include <machine/bus_memio.h>
82 #include <machine/bus.h>
83 #include <machine/resource.h>
84 #include <sys/bus.h>
85 #include <sys/rman.h>
86
87 #include <dev/mii/mii.h>
88 #include <dev/mii/miivar.h>
89
90 #include <pci/pcireg.h>
91 #include <pci/pcivar.h>
92
93 #define VR_USEIOSPACE
94
95 #include <pci/if_vrreg.h>
96
97 MODULE_DEPEND(vr, miibus, 1, 1, 1);
98
99 /* "controller miibus0" required. See GENERIC if you get errors here. */
100 #include "miibus_if.h"
101
102 #ifndef lint
103 static const char rcsid[] =
104 "$FreeBSD: releng/5.0/sys/pci/if_vr.c 107220 2002-11-25 05:15:27Z silby $";
105 #endif
106
107 /*
108 * Various supported device vendors/types and their names.
109 */
110 static struct vr_type vr_devs[] = {
111 { VIA_VENDORID, VIA_DEVICEID_RHINE,
112 "VIA VT3043 Rhine I 10/100BaseTX" },
113 { VIA_VENDORID, VIA_DEVICEID_RHINE_II,
114 "VIA VT86C100A Rhine II 10/100BaseTX" },
115 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
116 "VIA VT6102 Rhine II 10/100BaseTX" },
117 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
118 "Delta Electronics Rhine II 10/100BaseTX" },
119 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
120 "Addtron Technology Rhine II 10/100BaseTX" },
121 { 0, 0, NULL }
122 };
123
124 static int vr_probe (device_t);
125 static int vr_attach (device_t);
126 static int vr_detach (device_t);
127
128 static int vr_newbuf (struct vr_softc *,
129 struct vr_chain_onefrag *,
130 struct mbuf *);
131 static int vr_encap (struct vr_softc *, struct vr_chain *,
132 struct mbuf * );
133
134 static void vr_rxeof (struct vr_softc *);
135 static void vr_rxeoc (struct vr_softc *);
136 static void vr_txeof (struct vr_softc *);
137 static void vr_txeoc (struct vr_softc *);
138 static void vr_tick (void *);
139 static void vr_intr (void *);
140 static void vr_start (struct ifnet *);
141 static int vr_ioctl (struct ifnet *, u_long, caddr_t);
142 static void vr_init (void *);
143 static void vr_stop (struct vr_softc *);
144 static void vr_watchdog (struct ifnet *);
145 static void vr_shutdown (device_t);
146 static int vr_ifmedia_upd (struct ifnet *);
147 static void vr_ifmedia_sts (struct ifnet *, struct ifmediareq *);
148
149 static void vr_mii_sync (struct vr_softc *);
150 static void vr_mii_send (struct vr_softc *, u_int32_t, int);
151 static int vr_mii_readreg (struct vr_softc *, struct vr_mii_frame *);
152 static int vr_mii_writereg (struct vr_softc *, struct vr_mii_frame *);
153 static int vr_miibus_readreg (device_t, int, int);
154 static int vr_miibus_writereg (device_t, int, int, int);
155 static void vr_miibus_statchg (device_t);
156
157 static void vr_setcfg (struct vr_softc *, int);
158 static u_int8_t vr_calchash (u_int8_t *);
159 static void vr_setmulti (struct vr_softc *);
160 static void vr_reset (struct vr_softc *);
161 static int vr_list_rx_init (struct vr_softc *);
162 static int vr_list_tx_init (struct vr_softc *);
163
164 #ifdef VR_USEIOSPACE
165 #define VR_RES SYS_RES_IOPORT
166 #define VR_RID VR_PCI_LOIO
167 #else
168 #define VR_RES SYS_RES_MEMORY
169 #define VR_RID VR_PCI_LOMEM
170 #endif
171
172 static device_method_t vr_methods[] = {
173 /* Device interface */
174 DEVMETHOD(device_probe, vr_probe),
175 DEVMETHOD(device_attach, vr_attach),
176 DEVMETHOD(device_detach, vr_detach),
177 DEVMETHOD(device_shutdown, vr_shutdown),
178
179 /* bus interface */
180 DEVMETHOD(bus_print_child, bus_generic_print_child),
181 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
182
183 /* MII interface */
184 DEVMETHOD(miibus_readreg, vr_miibus_readreg),
185 DEVMETHOD(miibus_writereg, vr_miibus_writereg),
186 DEVMETHOD(miibus_statchg, vr_miibus_statchg),
187
188 { 0, 0 }
189 };
190
191 static driver_t vr_driver = {
192 "vr",
193 vr_methods,
194 sizeof(struct vr_softc)
195 };
196
197 static devclass_t vr_devclass;
198
199 DRIVER_MODULE(if_vr, pci, vr_driver, vr_devclass, 0, 0);
200 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
201
202 #define VR_SETBIT(sc, reg, x) \
203 CSR_WRITE_1(sc, reg, \
204 CSR_READ_1(sc, reg) | (x))
205
206 #define VR_CLRBIT(sc, reg, x) \
207 CSR_WRITE_1(sc, reg, \
208 CSR_READ_1(sc, reg) & ~(x))
209
210 #define VR_SETBIT16(sc, reg, x) \
211 CSR_WRITE_2(sc, reg, \
212 CSR_READ_2(sc, reg) | (x))
213
214 #define VR_CLRBIT16(sc, reg, x) \
215 CSR_WRITE_2(sc, reg, \
216 CSR_READ_2(sc, reg) & ~(x))
217
218 #define VR_SETBIT32(sc, reg, x) \
219 CSR_WRITE_4(sc, reg, \
220 CSR_READ_4(sc, reg) | (x))
221
222 #define VR_CLRBIT32(sc, reg, x) \
223 CSR_WRITE_4(sc, reg, \
224 CSR_READ_4(sc, reg) & ~(x))
225
226 #define SIO_SET(x) \
227 CSR_WRITE_1(sc, VR_MIICMD, \
228 CSR_READ_1(sc, VR_MIICMD) | (x))
229
230 #define SIO_CLR(x) \
231 CSR_WRITE_1(sc, VR_MIICMD, \
232 CSR_READ_1(sc, VR_MIICMD) & ~(x))
233
234 /*
235 * Sync the PHYs by setting data bit and strobing the clock 32 times.
236 */
237 static void
238 vr_mii_sync(sc)
239 struct vr_softc *sc;
240 {
241 register int i;
242
243 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
244
245 for (i = 0; i < 32; i++) {
246 SIO_SET(VR_MIICMD_CLK);
247 DELAY(1);
248 SIO_CLR(VR_MIICMD_CLK);
249 DELAY(1);
250 }
251
252 return;
253 }
254
255 /*
256 * Clock a series of bits through the MII.
257 */
258 static void
259 vr_mii_send(sc, bits, cnt)
260 struct vr_softc *sc;
261 u_int32_t bits;
262 int cnt;
263 {
264 int i;
265
266 SIO_CLR(VR_MIICMD_CLK);
267
268 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
269 if (bits & i) {
270 SIO_SET(VR_MIICMD_DATAIN);
271 } else {
272 SIO_CLR(VR_MIICMD_DATAIN);
273 }
274 DELAY(1);
275 SIO_CLR(VR_MIICMD_CLK);
276 DELAY(1);
277 SIO_SET(VR_MIICMD_CLK);
278 }
279 }
280
281 /*
282 * Read an PHY register through the MII.
283 */
284 static int
285 vr_mii_readreg(sc, frame)
286 struct vr_softc *sc;
287 struct vr_mii_frame *frame;
288
289 {
290 int i, ack;
291
292 VR_LOCK(sc);
293
294 /*
295 * Set up frame for RX.
296 */
297 frame->mii_stdelim = VR_MII_STARTDELIM;
298 frame->mii_opcode = VR_MII_READOP;
299 frame->mii_turnaround = 0;
300 frame->mii_data = 0;
301
302 CSR_WRITE_1(sc, VR_MIICMD, 0);
303 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
304
305 /*
306 * Turn on data xmit.
307 */
308 SIO_SET(VR_MIICMD_DIR);
309
310 vr_mii_sync(sc);
311
312 /*
313 * Send command/address info.
314 */
315 vr_mii_send(sc, frame->mii_stdelim, 2);
316 vr_mii_send(sc, frame->mii_opcode, 2);
317 vr_mii_send(sc, frame->mii_phyaddr, 5);
318 vr_mii_send(sc, frame->mii_regaddr, 5);
319
320 /* Idle bit */
321 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
322 DELAY(1);
323 SIO_SET(VR_MIICMD_CLK);
324 DELAY(1);
325
326 /* Turn off xmit. */
327 SIO_CLR(VR_MIICMD_DIR);
328
329 /* Check for ack */
330 SIO_CLR(VR_MIICMD_CLK);
331 DELAY(1);
332 SIO_SET(VR_MIICMD_CLK);
333 DELAY(1);
334 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
335
336 /*
337 * Now try reading data bits. If the ack failed, we still
338 * need to clock through 16 cycles to keep the PHY(s) in sync.
339 */
340 if (ack) {
341 for(i = 0; i < 16; i++) {
342 SIO_CLR(VR_MIICMD_CLK);
343 DELAY(1);
344 SIO_SET(VR_MIICMD_CLK);
345 DELAY(1);
346 }
347 goto fail;
348 }
349
350 for (i = 0x8000; i; i >>= 1) {
351 SIO_CLR(VR_MIICMD_CLK);
352 DELAY(1);
353 if (!ack) {
354 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
355 frame->mii_data |= i;
356 DELAY(1);
357 }
358 SIO_SET(VR_MIICMD_CLK);
359 DELAY(1);
360 }
361
362 fail:
363
364 SIO_CLR(VR_MIICMD_CLK);
365 DELAY(1);
366 SIO_SET(VR_MIICMD_CLK);
367 DELAY(1);
368
369 VR_UNLOCK(sc);
370
371 if (ack)
372 return(1);
373 return(0);
374 }
375
376 /*
377 * Write to a PHY register through the MII.
378 */
379 static int
380 vr_mii_writereg(sc, frame)
381 struct vr_softc *sc;
382 struct vr_mii_frame *frame;
383
384 {
385 VR_LOCK(sc);
386
387 CSR_WRITE_1(sc, VR_MIICMD, 0);
388 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
389
390 /*
391 * Set up frame for TX.
392 */
393
394 frame->mii_stdelim = VR_MII_STARTDELIM;
395 frame->mii_opcode = VR_MII_WRITEOP;
396 frame->mii_turnaround = VR_MII_TURNAROUND;
397
398 /*
399 * Turn on data output.
400 */
401 SIO_SET(VR_MIICMD_DIR);
402
403 vr_mii_sync(sc);
404
405 vr_mii_send(sc, frame->mii_stdelim, 2);
406 vr_mii_send(sc, frame->mii_opcode, 2);
407 vr_mii_send(sc, frame->mii_phyaddr, 5);
408 vr_mii_send(sc, frame->mii_regaddr, 5);
409 vr_mii_send(sc, frame->mii_turnaround, 2);
410 vr_mii_send(sc, frame->mii_data, 16);
411
412 /* Idle bit. */
413 SIO_SET(VR_MIICMD_CLK);
414 DELAY(1);
415 SIO_CLR(VR_MIICMD_CLK);
416 DELAY(1);
417
418 /*
419 * Turn off xmit.
420 */
421 SIO_CLR(VR_MIICMD_DIR);
422
423 VR_UNLOCK(sc);
424
425 return(0);
426 }
427
428 static int
429 vr_miibus_readreg(dev, phy, reg)
430 device_t dev;
431 int phy, reg;
432 {
433 struct vr_softc *sc;
434 struct vr_mii_frame frame;
435
436 sc = device_get_softc(dev);
437 bzero((char *)&frame, sizeof(frame));
438
439 frame.mii_phyaddr = phy;
440 frame.mii_regaddr = reg;
441 vr_mii_readreg(sc, &frame);
442
443 return(frame.mii_data);
444 }
445
446 static int
447 vr_miibus_writereg(dev, phy, reg, data)
448 device_t dev;
449 u_int16_t phy, reg, data;
450 {
451 struct vr_softc *sc;
452 struct vr_mii_frame frame;
453
454 sc = device_get_softc(dev);
455 bzero((char *)&frame, sizeof(frame));
456
457 frame.mii_phyaddr = phy;
458 frame.mii_regaddr = reg;
459 frame.mii_data = data;
460
461 vr_mii_writereg(sc, &frame);
462
463 return(0);
464 }
465
466 static void
467 vr_miibus_statchg(dev)
468 device_t dev;
469 {
470 struct vr_softc *sc;
471 struct mii_data *mii;
472
473 sc = device_get_softc(dev);
474 VR_LOCK(sc);
475 mii = device_get_softc(sc->vr_miibus);
476 vr_setcfg(sc, mii->mii_media_active);
477 VR_UNLOCK(sc);
478
479 return;
480 }
481
482 /*
483 * Calculate CRC of a multicast group address, return the lower 6 bits.
484 */
485 static u_int8_t vr_calchash(addr)
486 u_int8_t *addr;
487 {
488 u_int32_t crc, carry;
489 int i, j;
490 u_int8_t c;
491
492 /* Compute CRC for the address value. */
493 crc = 0xFFFFFFFF; /* initial value */
494
495 for (i = 0; i < 6; i++) {
496 c = *(addr + i);
497 for (j = 0; j < 8; j++) {
498 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
499 crc <<= 1;
500 c >>= 1;
501 if (carry)
502 crc = (crc ^ 0x04c11db6) | carry;
503 }
504 }
505
506 /* return the filter bit position */
507 return((crc >> 26) & 0x0000003F);
508 }
509
510 /*
511 * Program the 64-bit multicast hash filter.
512 */
513 static void
514 vr_setmulti(sc)
515 struct vr_softc *sc;
516 {
517 struct ifnet *ifp;
518 int h = 0;
519 u_int32_t hashes[2] = { 0, 0 };
520 struct ifmultiaddr *ifma;
521 u_int8_t rxfilt;
522 int mcnt = 0;
523
524 ifp = &sc->arpcom.ac_if;
525
526 rxfilt = CSR_READ_1(sc, VR_RXCFG);
527
528 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
529 rxfilt |= VR_RXCFG_RX_MULTI;
530 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
531 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
532 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
533 return;
534 }
535
536 /* first, zot all the existing hash bits */
537 CSR_WRITE_4(sc, VR_MAR0, 0);
538 CSR_WRITE_4(sc, VR_MAR1, 0);
539
540 /* now program new ones */
541 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
542 if (ifma->ifma_addr->sa_family != AF_LINK)
543 continue;
544 h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
545 if (h < 32)
546 hashes[0] |= (1 << h);
547 else
548 hashes[1] |= (1 << (h - 32));
549 mcnt++;
550 }
551
552 if (mcnt)
553 rxfilt |= VR_RXCFG_RX_MULTI;
554 else
555 rxfilt &= ~VR_RXCFG_RX_MULTI;
556
557 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
558 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
559 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
560
561 return;
562 }
563
564 /*
565 * In order to fiddle with the
566 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
567 * first have to put the transmit and/or receive logic in the idle state.
568 */
569 static void
570 vr_setcfg(sc, media)
571 struct vr_softc *sc;
572 int media;
573 {
574 int restart = 0;
575
576 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
577 restart = 1;
578 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
579 }
580
581 if ((media & IFM_GMASK) == IFM_FDX)
582 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
583 else
584 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
585
586 if (restart)
587 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
588
589 return;
590 }
591
592 static void
593 vr_reset(sc)
594 struct vr_softc *sc;
595 {
596 register int i;
597
598 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
599
600 for (i = 0; i < VR_TIMEOUT; i++) {
601 DELAY(10);
602 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
603 break;
604 }
605 if (i == VR_TIMEOUT) {
606 if (sc->vr_revid < REV_ID_VT3065_A)
607 printf("vr%d: reset never completed!\n", sc->vr_unit);
608 else {
609 /* Use newer force reset command */
610 printf("vr%d: Using force reset command.\n", sc->vr_unit);
611 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
612 }
613 }
614
615 /* Wait a little while for the chip to get its brains in order. */
616 DELAY(1000);
617
618 return;
619 }
620
621 /*
622 * Probe for a VIA Rhine chip. Check the PCI vendor and device
623 * IDs against our list and return a device name if we find a match.
624 */
625 static int
626 vr_probe(dev)
627 device_t dev;
628 {
629 struct vr_type *t;
630
631 t = vr_devs;
632
633 while(t->vr_name != NULL) {
634 if ((pci_get_vendor(dev) == t->vr_vid) &&
635 (pci_get_device(dev) == t->vr_did)) {
636 device_set_desc(dev, t->vr_name);
637 return(0);
638 }
639 t++;
640 }
641
642 return(ENXIO);
643 }
644
645 /*
646 * Attach the interface. Allocate softc structures, do ifmedia
647 * setup and ethernet/BPF attach.
648 */
649 static int
650 vr_attach(dev)
651 device_t dev;
652 {
653 int i;
654 u_char eaddr[ETHER_ADDR_LEN];
655 u_int32_t command;
656 struct vr_softc *sc;
657 struct ifnet *ifp;
658 int unit, error = 0, rid;
659
660 sc = device_get_softc(dev);
661 unit = device_get_unit(dev);
662 bzero(sc, sizeof(struct vr_softc *));
663
664 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
665 MTX_DEF | MTX_RECURSE);
666 VR_LOCK(sc);
667
668 /*
669 * Handle power management nonsense.
670 */
671 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
672 u_int32_t iobase, membase, irq;
673
674 /* Save important PCI config data. */
675 iobase = pci_read_config(dev, VR_PCI_LOIO, 4);
676 membase = pci_read_config(dev, VR_PCI_LOMEM, 4);
677 irq = pci_read_config(dev, VR_PCI_INTLINE, 4);
678
679 /* Reset the power state. */
680 printf("vr%d: chip is in D%d power mode "
681 "-- setting to D0\n", unit,
682 pci_get_powerstate(dev));
683 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
684
685 /* Restore PCI config data. */
686 pci_write_config(dev, VR_PCI_LOIO, iobase, 4);
687 pci_write_config(dev, VR_PCI_LOMEM, membase, 4);
688 pci_write_config(dev, VR_PCI_INTLINE, irq, 4);
689 }
690
691 /*
692 * Map control/status registers.
693 */
694 pci_enable_busmaster(dev);
695 pci_enable_io(dev, SYS_RES_IOPORT);
696 pci_enable_io(dev, SYS_RES_MEMORY);
697 command = pci_read_config(dev, PCIR_COMMAND, 4);
698 sc->vr_revid = pci_read_config(dev, VR_PCI_REVID, 4) & 0x000000FF;
699
700 #ifdef VR_USEIOSPACE
701 if (!(command & PCIM_CMD_PORTEN)) {
702 printf("vr%d: failed to enable I/O ports!\n", unit);
703 free(sc, M_DEVBUF);
704 goto fail;
705 }
706 #else
707 if (!(command & PCIM_CMD_MEMEN)) {
708 printf("vr%d: failed to enable memory mapping!\n", unit);
709 goto fail;
710 }
711 #endif
712
713 rid = VR_RID;
714 sc->vr_res = bus_alloc_resource(dev, VR_RES, &rid,
715 0, ~0, 1, RF_ACTIVE);
716
717 if (sc->vr_res == NULL) {
718 printf("vr%d: couldn't map ports/memory\n", unit);
719 error = ENXIO;
720 goto fail;
721 }
722
723 sc->vr_btag = rman_get_bustag(sc->vr_res);
724 sc->vr_bhandle = rman_get_bushandle(sc->vr_res);
725
726 /* Allocate interrupt */
727 rid = 0;
728 sc->vr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
729 RF_SHAREABLE | RF_ACTIVE);
730
731 if (sc->vr_irq == NULL) {
732 printf("vr%d: couldn't map interrupt\n", unit);
733 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
734 error = ENXIO;
735 goto fail;
736 }
737
738 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET,
739 vr_intr, sc, &sc->vr_intrhand);
740
741 if (error) {
742 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
743 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
744 printf("vr%d: couldn't set up irq\n", unit);
745 goto fail;
746 }
747
748 /*
749 * Windows may put the chip in suspend mode when it
750 * shuts down. Be sure to kick it in the head to wake it
751 * up again.
752 */
753 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
754
755 /* Reset the adapter. */
756 vr_reset(sc);
757
758 /*
759 * Get station address. The way the Rhine chips work,
760 * you're not allowed to directly access the EEPROM once
761 * they've been programmed a special way. Consequently,
762 * we need to read the node address from the PAR0 and PAR1
763 * registers.
764 */
765 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
766 DELAY(200);
767 for (i = 0; i < ETHER_ADDR_LEN; i++)
768 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
769
770 /*
771 * A Rhine chip was detected. Inform the world.
772 */
773 printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":");
774
775 sc->vr_unit = unit;
776 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
777
778 sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
779 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
780
781 if (sc->vr_ldata == NULL) {
782 printf("vr%d: no memory for list buffers!\n", unit);
783 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
784 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
785 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
786 error = ENXIO;
787 goto fail;
788 }
789
790 bzero(sc->vr_ldata, sizeof(struct vr_list_data));
791
792 ifp = &sc->arpcom.ac_if;
793 ifp->if_softc = sc;
794 ifp->if_unit = unit;
795 ifp->if_name = "vr";
796 ifp->if_mtu = ETHERMTU;
797 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
798 ifp->if_ioctl = vr_ioctl;
799 ifp->if_output = ether_output;
800 ifp->if_start = vr_start;
801 ifp->if_watchdog = vr_watchdog;
802 ifp->if_init = vr_init;
803 ifp->if_baudrate = 10000000;
804 ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
805
806 /*
807 * Do MII setup.
808 */
809 if (mii_phy_probe(dev, &sc->vr_miibus,
810 vr_ifmedia_upd, vr_ifmedia_sts)) {
811 printf("vr%d: MII without any phy!\n", sc->vr_unit);
812 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
813 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
814 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
815 contigfree(sc->vr_ldata,
816 sizeof(struct vr_list_data), M_DEVBUF);
817 error = ENXIO;
818 goto fail;
819 }
820
821 callout_handle_init(&sc->vr_stat_ch);
822
823 /*
824 * Call MI attach routine.
825 */
826 ether_ifattach(ifp, eaddr);
827 VR_UNLOCK(sc);
828 return(0);
829
830 fail:
831 VR_UNLOCK(sc);
832 mtx_destroy(&sc->vr_mtx);
833
834 return(error);
835 }
836
837 static int
838 vr_detach(dev)
839 device_t dev;
840 {
841 struct vr_softc *sc;
842 struct ifnet *ifp;
843
844 sc = device_get_softc(dev);
845 VR_LOCK(sc);
846 ifp = &sc->arpcom.ac_if;
847
848 vr_stop(sc);
849 ether_ifdetach(ifp);
850
851 bus_generic_detach(dev);
852 device_delete_child(dev, sc->vr_miibus);
853
854 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
855 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
856 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
857
858 contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
859
860 VR_UNLOCK(sc);
861 mtx_destroy(&sc->vr_mtx);
862
863 return(0);
864 }
865
866 /*
867 * Initialize the transmit descriptors.
868 */
869 static int
870 vr_list_tx_init(sc)
871 struct vr_softc *sc;
872 {
873 struct vr_chain_data *cd;
874 struct vr_list_data *ld;
875 int i;
876
877 cd = &sc->vr_cdata;
878 ld = sc->vr_ldata;
879 for (i = 0; i < VR_TX_LIST_CNT; i++) {
880 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
881 if (i == (VR_TX_LIST_CNT - 1))
882 cd->vr_tx_chain[i].vr_nextdesc =
883 &cd->vr_tx_chain[0];
884 else
885 cd->vr_tx_chain[i].vr_nextdesc =
886 &cd->vr_tx_chain[i + 1];
887 }
888
889 cd->vr_tx_free = &cd->vr_tx_chain[0];
890 cd->vr_tx_tail = cd->vr_tx_head = NULL;
891
892 return(0);
893 }
894
895
896 /*
897 * Initialize the RX descriptors and allocate mbufs for them. Note that
898 * we arrange the descriptors in a closed ring, so that the last descriptor
899 * points back to the first.
900 */
901 static int
902 vr_list_rx_init(sc)
903 struct vr_softc *sc;
904 {
905 struct vr_chain_data *cd;
906 struct vr_list_data *ld;
907 int i;
908
909 cd = &sc->vr_cdata;
910 ld = sc->vr_ldata;
911
912 for (i = 0; i < VR_RX_LIST_CNT; i++) {
913 cd->vr_rx_chain[i].vr_ptr =
914 (struct vr_desc *)&ld->vr_rx_list[i];
915 if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS)
916 return(ENOBUFS);
917 if (i == (VR_RX_LIST_CNT - 1)) {
918 cd->vr_rx_chain[i].vr_nextdesc =
919 &cd->vr_rx_chain[0];
920 ld->vr_rx_list[i].vr_next =
921 vtophys(&ld->vr_rx_list[0]);
922 } else {
923 cd->vr_rx_chain[i].vr_nextdesc =
924 &cd->vr_rx_chain[i + 1];
925 ld->vr_rx_list[i].vr_next =
926 vtophys(&ld->vr_rx_list[i + 1]);
927 }
928 }
929
930 cd->vr_rx_head = &cd->vr_rx_chain[0];
931
932 return(0);
933 }
934
935 /*
936 * Initialize an RX descriptor and attach an MBUF cluster.
937 * Note: the length fields are only 11 bits wide, which means the
938 * largest size we can specify is 2047. This is important because
939 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
940 * overflow the field and make a mess.
941 */
942 static int
943 vr_newbuf(sc, c, m)
944 struct vr_softc *sc;
945 struct vr_chain_onefrag *c;
946 struct mbuf *m;
947 {
948 struct mbuf *m_new = NULL;
949
950 if (m == NULL) {
951 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
952 if (m_new == NULL)
953 return(ENOBUFS);
954
955 MCLGET(m_new, M_DONTWAIT);
956 if (!(m_new->m_flags & M_EXT)) {
957 m_freem(m_new);
958 return(ENOBUFS);
959 }
960 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
961 } else {
962 m_new = m;
963 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
964 m_new->m_data = m_new->m_ext.ext_buf;
965 }
966
967 m_adj(m_new, sizeof(u_int64_t));
968
969 c->vr_mbuf = m_new;
970 c->vr_ptr->vr_status = VR_RXSTAT;
971 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
972 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
973
974 return(0);
975 }
976
977 /*
978 * A frame has been uploaded: pass the resulting mbuf chain up to
979 * the higher level protocols.
980 */
981 static void
982 vr_rxeof(sc)
983 struct vr_softc *sc;
984 {
985 struct mbuf *m;
986 struct ifnet *ifp;
987 struct vr_chain_onefrag *cur_rx;
988 int total_len = 0;
989 u_int32_t rxstat;
990
991 ifp = &sc->arpcom.ac_if;
992
993 while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
994 VR_RXSTAT_OWN)) {
995 struct mbuf *m0 = NULL;
996
997 cur_rx = sc->vr_cdata.vr_rx_head;
998 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
999 m = cur_rx->vr_mbuf;
1000
1001 /*
1002 * If an error occurs, update stats, clear the
1003 * status word and leave the mbuf cluster in place:
1004 * it should simply get re-used next time this descriptor
1005 * comes up in the ring.
1006 */
1007 if (rxstat & VR_RXSTAT_RXERR) {
1008 ifp->if_ierrors++;
1009 printf("vr%d: rx error: ", sc->vr_unit);
1010 switch(rxstat & 0x000000FF) {
1011 case VR_RXSTAT_CRCERR:
1012 printf("crc error\n");
1013 break;
1014 case VR_RXSTAT_FRAMEALIGNERR:
1015 printf("frame alignment error\n");
1016 break;
1017 case VR_RXSTAT_FIFOOFLOW:
1018 printf("FIFO overflow\n");
1019 break;
1020 case VR_RXSTAT_GIANT:
1021 printf("received giant packet\n");
1022 break;
1023 case VR_RXSTAT_RUNT:
1024 printf("received runt packet\n");
1025 break;
1026 case VR_RXSTAT_BUSERR:
1027 printf("system bus error\n");
1028 break;
1029 case VR_RXSTAT_BUFFERR:
1030 printf("rx buffer error\n");
1031 break;
1032 default:
1033 printf("unknown rx error\n");
1034 break;
1035 }
1036 vr_newbuf(sc, cur_rx, m);
1037 continue;
1038 }
1039
1040 /* No errors; receive the packet. */
1041 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1042
1043 /*
1044 * XXX The VIA Rhine chip includes the CRC with every
1045 * received frame, and there's no way to turn this
1046 * behavior off (at least, I can't find anything in
1047 * the manual that explains how to do it) so we have
1048 * to trim off the CRC manually.
1049 */
1050 total_len -= ETHER_CRC_LEN;
1051
1052 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp,
1053 NULL);
1054 vr_newbuf(sc, cur_rx, m);
1055 if (m0 == NULL) {
1056 ifp->if_ierrors++;
1057 continue;
1058 }
1059 m = m0;
1060
1061 ifp->if_ipackets++;
1062 (*ifp->if_input)(ifp, m);
1063 }
1064
1065 return;
1066 }
1067
1068 static void
1069 vr_rxeoc(sc)
1070 struct vr_softc *sc;
1071 {
1072
1073 vr_rxeof(sc);
1074 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1075 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1076 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1077 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1078
1079 return;
1080 }
1081
1082 /*
1083 * A frame was downloaded to the chip. It's safe for us to clean up
1084 * the list buffers.
1085 */
1086
1087 static void
1088 vr_txeof(sc)
1089 struct vr_softc *sc;
1090 {
1091 struct vr_chain *cur_tx;
1092 struct ifnet *ifp;
1093
1094 ifp = &sc->arpcom.ac_if;
1095
1096 /* Reset the timeout timer; if_txeoc will clear it. */
1097 ifp->if_timer = 5;
1098
1099 /* Sanity check. */
1100 if (sc->vr_cdata.vr_tx_head == NULL)
1101 return;
1102
1103 /*
1104 * Go through our tx list and free mbufs for those
1105 * frames that have been transmitted.
1106 */
1107 while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1108 u_int32_t txstat;
1109
1110 cur_tx = sc->vr_cdata.vr_tx_head;
1111 txstat = cur_tx->vr_ptr->vr_status;
1112
1113 if ((txstat & VR_TXSTAT_ABRT) ||
1114 (txstat & VR_TXSTAT_UDF)) {
1115 while (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON)
1116 ; /* Wait for chip to shutdown */
1117 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1118 CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr));
1119 break;
1120 }
1121
1122 if (txstat & VR_TXSTAT_OWN)
1123 break;
1124
1125 if (txstat & VR_TXSTAT_ERRSUM) {
1126 ifp->if_oerrors++;
1127 if (txstat & VR_TXSTAT_DEFER)
1128 ifp->if_collisions++;
1129 if (txstat & VR_TXSTAT_LATECOLL)
1130 ifp->if_collisions++;
1131 }
1132
1133 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1134
1135 ifp->if_opackets++;
1136 if (cur_tx->vr_mbuf != NULL) {
1137 m_freem(cur_tx->vr_mbuf);
1138 cur_tx->vr_mbuf = NULL;
1139 }
1140
1141 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1142 sc->vr_cdata.vr_tx_head = NULL;
1143 sc->vr_cdata.vr_tx_tail = NULL;
1144 break;
1145 }
1146
1147 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1148 }
1149
1150 return;
1151 }
1152
1153 /*
1154 * TX 'end of channel' interrupt handler.
1155 */
1156 static void
1157 vr_txeoc(sc)
1158 struct vr_softc *sc;
1159 {
1160 struct ifnet *ifp;
1161
1162 ifp = &sc->arpcom.ac_if;
1163
1164 if (sc->vr_cdata.vr_tx_head == NULL) {
1165 ifp->if_flags &= ~IFF_OACTIVE;
1166 sc->vr_cdata.vr_tx_tail = NULL;
1167 ifp->if_timer = 0;
1168 }
1169
1170 return;
1171 }
1172
1173 static void
1174 vr_tick(xsc)
1175 void *xsc;
1176 {
1177 struct vr_softc *sc;
1178 struct mii_data *mii;
1179
1180 sc = xsc;
1181 VR_LOCK(sc);
1182 mii = device_get_softc(sc->vr_miibus);
1183 mii_tick(mii);
1184
1185 sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1186
1187 VR_UNLOCK(sc);
1188
1189 return;
1190 }
1191
1192 static void
1193 vr_intr(arg)
1194 void *arg;
1195 {
1196 struct vr_softc *sc;
1197 struct ifnet *ifp;
1198 u_int16_t status;
1199
1200 sc = arg;
1201 VR_LOCK(sc);
1202 ifp = &sc->arpcom.ac_if;
1203
1204 /* Supress unwanted interrupts. */
1205 if (!(ifp->if_flags & IFF_UP)) {
1206 vr_stop(sc);
1207 VR_UNLOCK(sc);
1208 return;
1209 }
1210
1211 /* Disable interrupts. */
1212 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1213
1214 for (;;) {
1215
1216 status = CSR_READ_2(sc, VR_ISR);
1217 if (status)
1218 CSR_WRITE_2(sc, VR_ISR, status);
1219
1220 if ((status & VR_INTRS) == 0)
1221 break;
1222
1223 if (status & VR_ISR_RX_OK)
1224 vr_rxeof(sc);
1225
1226 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1227 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1228 (status & VR_ISR_RX_DROPPED)) {
1229 vr_rxeof(sc);
1230 vr_rxeoc(sc);
1231 }
1232
1233 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1234 vr_reset(sc);
1235 vr_init(sc);
1236 break;
1237 }
1238
1239 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1240 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1241 vr_txeof(sc);
1242 if ((status & VR_ISR_UDFI) ||
1243 (status & VR_ISR_TX_ABRT2) ||
1244 (status & VR_ISR_TX_ABRT)) {
1245 ifp->if_oerrors++;
1246 if (sc->vr_cdata.vr_tx_head != NULL) {
1247 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1248 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1249 }
1250 } else
1251 vr_txeoc(sc);
1252 }
1253
1254 }
1255
1256 /* Re-enable interrupts. */
1257 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1258
1259 if (ifp->if_snd.ifq_head != NULL) {
1260 vr_start(ifp);
1261 }
1262
1263 VR_UNLOCK(sc);
1264
1265 return;
1266 }
1267
1268 /*
1269 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1270 * pointers to the fragment pointers.
1271 */
1272 static int
1273 vr_encap(sc, c, m_head)
1274 struct vr_softc *sc;
1275 struct vr_chain *c;
1276 struct mbuf *m_head;
1277 {
1278 int frag = 0;
1279 struct vr_desc *f = NULL;
1280 int total_len;
1281 struct mbuf *m;
1282
1283 m = m_head;
1284 total_len = 0;
1285
1286 /*
1287 * The VIA Rhine wants packet buffers to be longword
1288 * aligned, but very often our mbufs aren't. Rather than
1289 * waste time trying to decide when to copy and when not
1290 * to copy, just do it all the time.
1291 */
1292 if (m != NULL) {
1293 struct mbuf *m_new = NULL;
1294
1295 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1296 if (m_new == NULL) {
1297 printf("vr%d: no memory for tx list\n", sc->vr_unit);
1298 return(1);
1299 }
1300 if (m_head->m_pkthdr.len > MHLEN) {
1301 MCLGET(m_new, M_DONTWAIT);
1302 if (!(m_new->m_flags & M_EXT)) {
1303 m_freem(m_new);
1304 printf("vr%d: no memory for tx list\n",
1305 sc->vr_unit);
1306 return(1);
1307 }
1308 }
1309 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1310 mtod(m_new, caddr_t));
1311 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1312 m_freem(m_head);
1313 m_head = m_new;
1314 /*
1315 * The Rhine chip doesn't auto-pad, so we have to make
1316 * sure to pad short frames out to the minimum frame length
1317 * ourselves.
1318 */
1319 if (m_head->m_len < VR_MIN_FRAMELEN) {
1320 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1321 m_new->m_len = m_new->m_pkthdr.len;
1322 }
1323 f = c->vr_ptr;
1324 f->vr_data = vtophys(mtod(m_new, caddr_t));
1325 f->vr_ctl = total_len = m_new->m_len;
1326 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1327 f->vr_status = 0;
1328 frag = 1;
1329 }
1330
1331 c->vr_mbuf = m_head;
1332 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1333 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1334
1335 return(0);
1336 }
1337
1338 /*
1339 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1340 * to the mbuf data regions directly in the transmit lists. We also save a
1341 * copy of the pointers since the transmit list fragment pointers are
1342 * physical addresses.
1343 */
1344
1345 static void
1346 vr_start(ifp)
1347 struct ifnet *ifp;
1348 {
1349 struct vr_softc *sc;
1350 struct mbuf *m_head = NULL;
1351 struct vr_chain *cur_tx = NULL, *start_tx;
1352
1353 sc = ifp->if_softc;
1354
1355 VR_LOCK(sc);
1356 if (ifp->if_flags & IFF_OACTIVE) {
1357 VR_UNLOCK(sc);
1358 return;
1359 }
1360
1361 /*
1362 * Check for an available queue slot. If there are none,
1363 * punt.
1364 */
1365 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1366 ifp->if_flags |= IFF_OACTIVE;
1367 return;
1368 }
1369
1370 start_tx = sc->vr_cdata.vr_tx_free;
1371
1372 while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1373 IF_DEQUEUE(&ifp->if_snd, m_head);
1374 if (m_head == NULL)
1375 break;
1376
1377 /* Pick a descriptor off the free list. */
1378 cur_tx = sc->vr_cdata.vr_tx_free;
1379 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1380
1381 /* Pack the data into the descriptor. */
1382 if (vr_encap(sc, cur_tx, m_head)) {
1383 IF_PREPEND(&ifp->if_snd, m_head);
1384 ifp->if_flags |= IFF_OACTIVE;
1385 cur_tx = NULL;
1386 break;
1387 }
1388
1389 if (cur_tx != start_tx)
1390 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1391
1392 /*
1393 * If there's a BPF listener, bounce a copy of this frame
1394 * to him.
1395 */
1396 BPF_MTAP(ifp, cur_tx->vr_mbuf);
1397
1398 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1399 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1400 }
1401
1402 /*
1403 * If there are no frames queued, bail.
1404 */
1405 if (cur_tx == NULL) {
1406 VR_UNLOCK(sc);
1407 return;
1408 }
1409
1410 sc->vr_cdata.vr_tx_tail = cur_tx;
1411
1412 if (sc->vr_cdata.vr_tx_head == NULL)
1413 sc->vr_cdata.vr_tx_head = start_tx;
1414
1415 /*
1416 * Set a timeout in case the chip goes out to lunch.
1417 */
1418 ifp->if_timer = 5;
1419 VR_UNLOCK(sc);
1420
1421 return;
1422 }
1423
1424 static void
1425 vr_init(xsc)
1426 void *xsc;
1427 {
1428 struct vr_softc *sc = xsc;
1429 struct ifnet *ifp = &sc->arpcom.ac_if;
1430 struct mii_data *mii;
1431 int i;
1432
1433 VR_LOCK(sc);
1434
1435 mii = device_get_softc(sc->vr_miibus);
1436
1437 /*
1438 * Cancel pending I/O and free all RX/TX buffers.
1439 */
1440 vr_stop(sc);
1441 vr_reset(sc);
1442
1443 /*
1444 * Set our station address.
1445 */
1446 for (i = 0; i < ETHER_ADDR_LEN; i++)
1447 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1448
1449 /* Set DMA size */
1450 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1451 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1452
1453 /*
1454 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1455 * so we must set both.
1456 */
1457 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1458 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESHSTORENFWD);
1459
1460 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1461 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1462
1463 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1464 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1465
1466 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1467 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1468
1469 /* Init circular RX list. */
1470 if (vr_list_rx_init(sc) == ENOBUFS) {
1471 printf("vr%d: initialization failed: no "
1472 "memory for rx buffers\n", sc->vr_unit);
1473 vr_stop(sc);
1474 VR_UNLOCK(sc);
1475 return;
1476 }
1477
1478 /*
1479 * Init tx descriptors.
1480 */
1481 vr_list_tx_init(sc);
1482
1483 /* If we want promiscuous mode, set the allframes bit. */
1484 if (ifp->if_flags & IFF_PROMISC)
1485 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1486 else
1487 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1488
1489 /* Set capture broadcast bit to capture broadcast frames. */
1490 if (ifp->if_flags & IFF_BROADCAST)
1491 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1492 else
1493 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1494
1495 /*
1496 * Program the multicast filter, if necessary.
1497 */
1498 vr_setmulti(sc);
1499
1500 /*
1501 * Load the address of the RX list.
1502 */
1503 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1504
1505 /* Enable receiver and transmitter. */
1506 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1507 VR_CMD_TX_ON|VR_CMD_RX_ON|
1508 VR_CMD_RX_GO);
1509
1510 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1511
1512 /*
1513 * Enable interrupts.
1514 */
1515 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1516 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1517
1518 mii_mediachg(mii);
1519
1520 ifp->if_flags |= IFF_RUNNING;
1521 ifp->if_flags &= ~IFF_OACTIVE;
1522
1523 sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1524
1525 VR_UNLOCK(sc);
1526
1527 return;
1528 }
1529
1530 /*
1531 * Set media options.
1532 */
1533 static int
1534 vr_ifmedia_upd(ifp)
1535 struct ifnet *ifp;
1536 {
1537 struct vr_softc *sc;
1538
1539 sc = ifp->if_softc;
1540
1541 if (ifp->if_flags & IFF_UP)
1542 vr_init(sc);
1543
1544 return(0);
1545 }
1546
1547 /*
1548 * Report current media status.
1549 */
1550 static void
1551 vr_ifmedia_sts(ifp, ifmr)
1552 struct ifnet *ifp;
1553 struct ifmediareq *ifmr;
1554 {
1555 struct vr_softc *sc;
1556 struct mii_data *mii;
1557
1558 sc = ifp->if_softc;
1559 mii = device_get_softc(sc->vr_miibus);
1560 mii_pollstat(mii);
1561 ifmr->ifm_active = mii->mii_media_active;
1562 ifmr->ifm_status = mii->mii_media_status;
1563
1564 return;
1565 }
1566
1567 static int
1568 vr_ioctl(ifp, command, data)
1569 struct ifnet *ifp;
1570 u_long command;
1571 caddr_t data;
1572 {
1573 struct vr_softc *sc = ifp->if_softc;
1574 struct ifreq *ifr = (struct ifreq *) data;
1575 struct mii_data *mii;
1576 int error = 0;
1577
1578 VR_LOCK(sc);
1579
1580 switch(command) {
1581 case SIOCSIFFLAGS:
1582 if (ifp->if_flags & IFF_UP) {
1583 vr_init(sc);
1584 } else {
1585 if (ifp->if_flags & IFF_RUNNING)
1586 vr_stop(sc);
1587 }
1588 error = 0;
1589 break;
1590 case SIOCADDMULTI:
1591 case SIOCDELMULTI:
1592 vr_setmulti(sc);
1593 error = 0;
1594 break;
1595 case SIOCGIFMEDIA:
1596 case SIOCSIFMEDIA:
1597 mii = device_get_softc(sc->vr_miibus);
1598 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1599 break;
1600 default:
1601 error = ether_ioctl(ifp, command, data);
1602 break;
1603 }
1604
1605 VR_UNLOCK(sc);
1606
1607 return(error);
1608 }
1609
1610 static void
1611 vr_watchdog(ifp)
1612 struct ifnet *ifp;
1613 {
1614 struct vr_softc *sc;
1615
1616 sc = ifp->if_softc;
1617
1618 VR_LOCK(sc);
1619 ifp->if_oerrors++;
1620 printf("vr%d: watchdog timeout\n", sc->vr_unit);
1621
1622 vr_stop(sc);
1623 vr_reset(sc);
1624 vr_init(sc);
1625
1626 if (ifp->if_snd.ifq_head != NULL)
1627 vr_start(ifp);
1628
1629 VR_UNLOCK(sc);
1630
1631 return;
1632 }
1633
1634 /*
1635 * Stop the adapter and free any mbufs allocated to the
1636 * RX and TX lists.
1637 */
1638 static void
1639 vr_stop(sc)
1640 struct vr_softc *sc;
1641 {
1642 register int i;
1643 struct ifnet *ifp;
1644
1645 VR_LOCK(sc);
1646
1647 ifp = &sc->arpcom.ac_if;
1648 ifp->if_timer = 0;
1649
1650 untimeout(vr_tick, sc, sc->vr_stat_ch);
1651
1652 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1653 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1654 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1655 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1656 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1657
1658 /*
1659 * Free data in the RX lists.
1660 */
1661 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1662 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1663 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1664 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1665 }
1666 }
1667 bzero((char *)&sc->vr_ldata->vr_rx_list,
1668 sizeof(sc->vr_ldata->vr_rx_list));
1669
1670 /*
1671 * Free the TX list buffers.
1672 */
1673 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1674 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1675 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1676 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1677 }
1678 }
1679
1680 bzero((char *)&sc->vr_ldata->vr_tx_list,
1681 sizeof(sc->vr_ldata->vr_tx_list));
1682
1683 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1684 VR_UNLOCK(sc);
1685
1686 return;
1687 }
1688
1689 /*
1690 * Stop all chip I/O so that the kernel's probe routines don't
1691 * get confused by errant DMAs when rebooting.
1692 */
1693 static void
1694 vr_shutdown(dev)
1695 device_t dev;
1696 {
1697 struct vr_softc *sc;
1698
1699 sc = device_get_softc(dev);
1700
1701 vr_stop(sc);
1702
1703 return;
1704 }
Cache object: 18196ccda559d1621920e3dd66752428
|