FreeBSD/Linux Kernel Cross Reference
sys/pci/if_vr.c
1 /*
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD$
33 */
34
35 /*
36 * VIA Rhine fast ethernet PCI NIC driver
37 *
38 * Supports various network adapters based on the VIA Rhine
39 * and Rhine II PCI controllers, including the D-Link DFE530TX.
40 * Datasheets are available at http://www.via.com.tw.
41 *
42 * Written by Bill Paul <wpaul@ctr.columbia.edu>
43 * Electrical Engineering Department
44 * Columbia University, New York City
45 */
46
47 /*
48 * The VIA Rhine controllers are similar in some respects to the
49 * the DEC tulip chips, except less complicated. The controller
50 * uses an MII bus and an external physical layer interface. The
51 * receiver has a one entry perfect filter and a 64-bit hash table
52 * multicast filter. Transmit and receive descriptors are similar
53 * to the tulip.
54 *
55 * The Rhine has a serious flaw in its transmit DMA mechanism:
56 * transmit buffers must be longword aligned. Unfortunately,
57 * FreeBSD doesn't guarantee that mbufs will be filled in starting
58 * at longword boundaries, so we have to do a buffer copy before
59 * transmission.
60 */
61
62 #include "bpfilter.h"
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/sockio.h>
67 #include <sys/mbuf.h>
68 #include <sys/malloc.h>
69 #include <sys/kernel.h>
70 #include <sys/socket.h>
71
72 #include <net/if.h>
73 #include <net/if_arp.h>
74 #include <net/ethernet.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #endif
81
82 #include "opt_bdg.h"
83 #ifdef BRIDGE
84 #include <net/bridge.h>
85 #endif /* BRIDGE */
86
87 #include <vm/vm.h> /* for vtophys */
88 #include <vm/pmap.h> /* for vtophys */
89 #include <machine/clock.h> /* for DELAY */
90 #include <machine/bus_pio.h>
91 #include <machine/bus_memio.h>
92 #include <machine/bus.h>
93
94 #include <pci/pcireg.h>
95 #include <pci/pcivar.h>
96
97 #define VR_USEIOSPACE
98
99 /* #define VR_BACKGROUND_AUTONEG */
100
101 #include <pci/if_vrreg.h>
102
103 #ifndef lint
104 static const char rcsid[] =
105 "$FreeBSD$";
106 #endif
107
108 /*
109 * Various supported device vendors/types and their names.
110 */
111 static struct vr_type vr_devs[] = {
112 { VIA_VENDORID, VIA_DEVICEID_RHINE,
113 "VIA VT3043 Rhine I 10/100BaseTX" },
114 { VIA_VENDORID, VIA_DEVICEID_RHINE_II,
115 "VIA VT86C100A Rhine II 10/100BaseTX" },
116 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
117 "Delta Electronics Rhine II 10/100BaseTX" },
118 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
119 "Addtron Technology Rhine II 10/100BaseTX" },
120 { 0, 0, NULL }
121 };
122
123 /*
124 * Various supported PHY vendors/types and their names. Note that
125 * this driver will work with pretty much any MII-compliant PHY,
126 * so failure to positively identify the chip is not a fatal error.
127 */
128
129 static struct vr_type vr_phys[] = {
130 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
131 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
132 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
133 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
134 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
135 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
136 { 0, 0, "<MII-compliant physical interface>" }
137 };
138
139 static unsigned long vr_count = 0;
140 static const char *vr_probe __P((pcici_t, pcidi_t));
141 static void vr_attach __P((pcici_t, int));
142
143 static int vr_newbuf __P((struct vr_softc *,
144 struct vr_chain_onefrag *));
145 static int vr_encap __P((struct vr_softc *, struct vr_chain *,
146 struct mbuf * ));
147
148 static void vr_rxeof __P((struct vr_softc *));
149 static void vr_rxeoc __P((struct vr_softc *));
150 static void vr_txeof __P((struct vr_softc *));
151 static void vr_txeoc __P((struct vr_softc *));
152 static void vr_intr __P((void *));
153 static void vr_start __P((struct ifnet *));
154 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
155 static void vr_init __P((void *));
156 static void vr_stop __P((struct vr_softc *));
157 static void vr_watchdog __P((struct ifnet *));
158 static void vr_shutdown __P((int, void *));
159 static int vr_ifmedia_upd __P((struct ifnet *));
160 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
161
162 static void vr_mii_sync __P((struct vr_softc *));
163 static void vr_mii_send __P((struct vr_softc *, u_int32_t, int));
164 static int vr_mii_readreg __P((struct vr_softc *, struct vr_mii_frame *));
165 static int vr_mii_writereg __P((struct vr_softc *, struct vr_mii_frame *));
166 static u_int16_t vr_phy_readreg __P((struct vr_softc *, int));
167 static void vr_phy_writereg __P((struct vr_softc *, u_int16_t, u_int16_t));
168
169 static void vr_autoneg_xmit __P((struct vr_softc *));
170 static void vr_autoneg_mii __P((struct vr_softc *, int, int));
171 static void vr_setmode_mii __P((struct vr_softc *, int));
172 static void vr_getmode_mii __P((struct vr_softc *));
173 static void vr_setcfg __P((struct vr_softc *, u_int16_t));
174 static u_int8_t vr_calchash __P((u_int8_t *));
175 static void vr_setmulti __P((struct vr_softc *));
176 static void vr_reset __P((struct vr_softc *));
177 static int vr_list_rx_init __P((struct vr_softc *));
178 static int vr_list_tx_init __P((struct vr_softc *));
179
180 #define VR_SETBIT(sc, reg, x) \
181 CSR_WRITE_1(sc, reg, \
182 CSR_READ_1(sc, reg) | x)
183
184 #define VR_CLRBIT(sc, reg, x) \
185 CSR_WRITE_1(sc, reg, \
186 CSR_READ_1(sc, reg) & ~x)
187
188 #define VR_SETBIT16(sc, reg, x) \
189 CSR_WRITE_2(sc, reg, \
190 CSR_READ_2(sc, reg) | x)
191
192 #define VR_CLRBIT16(sc, reg, x) \
193 CSR_WRITE_2(sc, reg, \
194 CSR_READ_2(sc, reg) & ~x)
195
196 #define VR_SETBIT32(sc, reg, x) \
197 CSR_WRITE_4(sc, reg, \
198 CSR_READ_4(sc, reg) | x)
199
200 #define VR_CLRBIT32(sc, reg, x) \
201 CSR_WRITE_4(sc, reg, \
202 CSR_READ_4(sc, reg) & ~x)
203
204 #define SIO_SET(x) \
205 CSR_WRITE_1(sc, VR_MIICMD, \
206 CSR_READ_1(sc, VR_MIICMD) | x)
207
208 #define SIO_CLR(x) \
209 CSR_WRITE_1(sc, VR_MIICMD, \
210 CSR_READ_1(sc, VR_MIICMD) & ~x)
211
212 /*
213 * Sync the PHYs by setting data bit and strobing the clock 32 times.
214 */
215 static void vr_mii_sync(sc)
216 struct vr_softc *sc;
217 {
218 register int i;
219
220 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
221
222 for (i = 0; i < 32; i++) {
223 SIO_SET(VR_MIICMD_CLK);
224 DELAY(1);
225 SIO_CLR(VR_MIICMD_CLK);
226 DELAY(1);
227 }
228
229 return;
230 }
231
232 /*
233 * Clock a series of bits through the MII.
234 */
235 static void vr_mii_send(sc, bits, cnt)
236 struct vr_softc *sc;
237 u_int32_t bits;
238 int cnt;
239 {
240 int i;
241
242 SIO_CLR(VR_MIICMD_CLK);
243
244 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
245 if (bits & i) {
246 SIO_SET(VR_MIICMD_DATAIN);
247 } else {
248 SIO_CLR(VR_MIICMD_DATAIN);
249 }
250 DELAY(1);
251 SIO_CLR(VR_MIICMD_CLK);
252 DELAY(1);
253 SIO_SET(VR_MIICMD_CLK);
254 }
255 }
256
257 /*
258 * Read an PHY register through the MII.
259 */
260 static int vr_mii_readreg(sc, frame)
261 struct vr_softc *sc;
262 struct vr_mii_frame *frame;
263
264 {
265 int i, ack, s;
266
267 s = splimp();
268
269 /*
270 * Set up frame for RX.
271 */
272 frame->mii_stdelim = VR_MII_STARTDELIM;
273 frame->mii_opcode = VR_MII_READOP;
274 frame->mii_turnaround = 0;
275 frame->mii_data = 0;
276
277 CSR_WRITE_1(sc, VR_MIICMD, 0);
278 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
279
280 /*
281 * Turn on data xmit.
282 */
283 SIO_SET(VR_MIICMD_DIR);
284
285 vr_mii_sync(sc);
286
287 /*
288 * Send command/address info.
289 */
290 vr_mii_send(sc, frame->mii_stdelim, 2);
291 vr_mii_send(sc, frame->mii_opcode, 2);
292 vr_mii_send(sc, frame->mii_phyaddr, 5);
293 vr_mii_send(sc, frame->mii_regaddr, 5);
294
295 /* Idle bit */
296 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
297 DELAY(1);
298 SIO_SET(VR_MIICMD_CLK);
299 DELAY(1);
300
301 /* Turn off xmit. */
302 SIO_CLR(VR_MIICMD_DIR);
303
304 /* Check for ack */
305 SIO_CLR(VR_MIICMD_CLK);
306 DELAY(1);
307 SIO_SET(VR_MIICMD_CLK);
308 DELAY(1);
309 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
310
311 /*
312 * Now try reading data bits. If the ack failed, we still
313 * need to clock through 16 cycles to keep the PHY(s) in sync.
314 */
315 if (ack) {
316 for(i = 0; i < 16; i++) {
317 SIO_CLR(VR_MIICMD_CLK);
318 DELAY(1);
319 SIO_SET(VR_MIICMD_CLK);
320 DELAY(1);
321 }
322 goto fail;
323 }
324
325 for (i = 0x8000; i; i >>= 1) {
326 SIO_CLR(VR_MIICMD_CLK);
327 DELAY(1);
328 if (!ack) {
329 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
330 frame->mii_data |= i;
331 DELAY(1);
332 }
333 SIO_SET(VR_MIICMD_CLK);
334 DELAY(1);
335 }
336
337 fail:
338
339 SIO_CLR(VR_MIICMD_CLK);
340 DELAY(1);
341 SIO_SET(VR_MIICMD_CLK);
342 DELAY(1);
343
344 splx(s);
345
346 if (ack)
347 return(1);
348 return(0);
349 }
350
351 /*
352 * Write to a PHY register through the MII.
353 */
354 static int vr_mii_writereg(sc, frame)
355 struct vr_softc *sc;
356 struct vr_mii_frame *frame;
357
358 {
359 int s;
360
361 s = splimp();
362
363 CSR_WRITE_1(sc, VR_MIICMD, 0);
364 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
365
366 /*
367 * Set up frame for TX.
368 */
369
370 frame->mii_stdelim = VR_MII_STARTDELIM;
371 frame->mii_opcode = VR_MII_WRITEOP;
372 frame->mii_turnaround = VR_MII_TURNAROUND;
373
374 /*
375 * Turn on data output.
376 */
377 SIO_SET(VR_MIICMD_DIR);
378
379 vr_mii_sync(sc);
380
381 vr_mii_send(sc, frame->mii_stdelim, 2);
382 vr_mii_send(sc, frame->mii_opcode, 2);
383 vr_mii_send(sc, frame->mii_phyaddr, 5);
384 vr_mii_send(sc, frame->mii_regaddr, 5);
385 vr_mii_send(sc, frame->mii_turnaround, 2);
386 vr_mii_send(sc, frame->mii_data, 16);
387
388 /* Idle bit. */
389 SIO_SET(VR_MIICMD_CLK);
390 DELAY(1);
391 SIO_CLR(VR_MIICMD_CLK);
392 DELAY(1);
393
394 /*
395 * Turn off xmit.
396 */
397 SIO_CLR(VR_MIICMD_DIR);
398
399 splx(s);
400
401 return(0);
402 }
403
404 static u_int16_t vr_phy_readreg(sc, reg)
405 struct vr_softc *sc;
406 int reg;
407 {
408 struct vr_mii_frame frame;
409
410 bzero((char *)&frame, sizeof(frame));
411
412 frame.mii_phyaddr = sc->vr_phy_addr;
413 frame.mii_regaddr = reg;
414 vr_mii_readreg(sc, &frame);
415
416 return(frame.mii_data);
417 }
418
419 static void vr_phy_writereg(sc, reg, data)
420 struct vr_softc *sc;
421 u_int16_t reg;
422 u_int16_t data;
423 {
424 struct vr_mii_frame frame;
425
426 bzero((char *)&frame, sizeof(frame));
427
428 frame.mii_phyaddr = sc->vr_phy_addr;
429 frame.mii_regaddr = reg;
430 frame.mii_data = data;
431
432 vr_mii_writereg(sc, &frame);
433
434 return;
435 }
436
437 /*
438 * Calculate CRC of a multicast group address, return the lower 6 bits.
439 */
440 static u_int8_t vr_calchash(addr)
441 u_int8_t *addr;
442 {
443 u_int32_t crc, carry;
444 int i, j;
445 u_int8_t c;
446
447 /* Compute CRC for the address value. */
448 crc = 0xFFFFFFFF; /* initial value */
449
450 for (i = 0; i < 6; i++) {
451 c = *(addr + i);
452 for (j = 0; j < 8; j++) {
453 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
454 crc <<= 1;
455 c >>= 1;
456 if (carry)
457 crc = (crc ^ 0x04c11db6) | carry;
458 }
459 }
460
461 /* return the filter bit position */
462 return((crc >> 26) & 0x0000003F);
463 }
464
465 /*
466 * Program the 64-bit multicast hash filter.
467 */
468 static void vr_setmulti(sc)
469 struct vr_softc *sc;
470 {
471 struct ifnet *ifp;
472 int h = 0;
473 u_int32_t hashes[2] = { 0, 0 };
474 struct ifmultiaddr *ifma;
475 u_int8_t rxfilt;
476 int mcnt = 0;
477
478 ifp = &sc->arpcom.ac_if;
479
480 rxfilt = CSR_READ_1(sc, VR_RXCFG);
481
482 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
483 rxfilt |= VR_RXCFG_RX_MULTI;
484 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
485 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
486 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
487 return;
488 }
489
490 /* first, zot all the existing hash bits */
491 CSR_WRITE_4(sc, VR_MAR0, 0);
492 CSR_WRITE_4(sc, VR_MAR1, 0);
493
494 /* now program new ones */
495 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
496 ifma = ifma->ifma_link.le_next) {
497 if (ifma->ifma_addr->sa_family != AF_LINK)
498 continue;
499 h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
500 if (h < 32)
501 hashes[0] |= (1 << h);
502 else
503 hashes[1] |= (1 << (h - 32));
504 mcnt++;
505 }
506
507 if (mcnt)
508 rxfilt |= VR_RXCFG_RX_MULTI;
509 else
510 rxfilt &= ~VR_RXCFG_RX_MULTI;
511
512 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
513 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
514 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
515
516 return;
517 }
518
519 /*
520 * Initiate an autonegotiation session.
521 */
522 static void vr_autoneg_xmit(sc)
523 struct vr_softc *sc;
524 {
525 u_int16_t phy_sts;
526
527 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
528 DELAY(500);
529 while(vr_phy_readreg(sc, PHY_BMCR)
530 & PHY_BMCR_RESET);
531
532 phy_sts = vr_phy_readreg(sc, PHY_BMCR);
533 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
534 vr_phy_writereg(sc, PHY_BMCR, phy_sts);
535
536 return;
537 }
538
539 /*
540 * Invoke autonegotiation on a PHY.
541 */
542 static void vr_autoneg_mii(sc, flag, verbose)
543 struct vr_softc *sc;
544 int flag;
545 int verbose;
546 {
547 u_int16_t phy_sts = 0, media, advert, ability;
548 struct ifnet *ifp;
549 struct ifmedia *ifm;
550
551 ifm = &sc->ifmedia;
552 ifp = &sc->arpcom.ac_if;
553
554 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
555
556 /*
557 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
558 * bit cleared in the status register, but has the 'autoneg enabled'
559 * bit set in the control register. This is a contradiction, and
560 * I'm not sure how to handle it. If you want to force an attempt
561 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
562 * and see what happens.
563 */
564 #ifndef FORCE_AUTONEG_TFOUR
565 /*
566 * First, see if autoneg is supported. If not, there's
567 * no point in continuing.
568 */
569 phy_sts = vr_phy_readreg(sc, PHY_BMSR);
570 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
571 if (verbose)
572 printf("vr%d: autonegotiation not supported\n",
573 sc->vr_unit);
574 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
575 return;
576 }
577 #endif
578
579 switch (flag) {
580 case VR_FLAG_FORCEDELAY:
581 /*
582 * XXX Never use this option anywhere but in the probe
583 * routine: making the kernel stop dead in its tracks
584 * for three whole seconds after we've gone multi-user
585 * is really bad manners.
586 */
587 vr_autoneg_xmit(sc);
588 DELAY(5000000);
589 break;
590 case VR_FLAG_SCHEDDELAY:
591 /*
592 * Wait for the transmitter to go idle before starting
593 * an autoneg session, otherwise vr_start() may clobber
594 * our timeout, and we don't want to allow transmission
595 * during an autoneg session since that can screw it up.
596 */
597 if (sc->vr_cdata.vr_tx_head != NULL) {
598 sc->vr_want_auto = 1;
599 return;
600 }
601 vr_autoneg_xmit(sc);
602 ifp->if_timer = 5;
603 sc->vr_autoneg = 1;
604 sc->vr_want_auto = 0;
605 return;
606 break;
607 case VR_FLAG_DELAYTIMEO:
608 ifp->if_timer = 0;
609 sc->vr_autoneg = 0;
610 break;
611 default:
612 printf("vr%d: invalid autoneg flag: %d\n", sc->vr_unit, flag);
613 return;
614 }
615
616 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
617 if (verbose)
618 printf("vr%d: autoneg complete, ", sc->vr_unit);
619 phy_sts = vr_phy_readreg(sc, PHY_BMSR);
620 } else {
621 if (verbose)
622 printf("vr%d: autoneg not complete, ", sc->vr_unit);
623 }
624
625 media = vr_phy_readreg(sc, PHY_BMCR);
626
627 /* Link is good. Report modes and set duplex mode. */
628 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
629 if (verbose)
630 printf("link status good ");
631 advert = vr_phy_readreg(sc, PHY_ANAR);
632 ability = vr_phy_readreg(sc, PHY_LPAR);
633
634 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
635 ifm->ifm_media = IFM_ETHER|IFM_100_T4;
636 media |= PHY_BMCR_SPEEDSEL;
637 media &= ~PHY_BMCR_DUPLEX;
638 printf("(100baseT4)\n");
639 } else if (advert & PHY_ANAR_100BTXFULL &&
640 ability & PHY_ANAR_100BTXFULL) {
641 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
642 media |= PHY_BMCR_SPEEDSEL;
643 media |= PHY_BMCR_DUPLEX;
644 printf("(full-duplex, 100Mbps)\n");
645 } else if (advert & PHY_ANAR_100BTXHALF &&
646 ability & PHY_ANAR_100BTXHALF) {
647 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
648 media |= PHY_BMCR_SPEEDSEL;
649 media &= ~PHY_BMCR_DUPLEX;
650 printf("(half-duplex, 100Mbps)\n");
651 } else if (advert & PHY_ANAR_10BTFULL &&
652 ability & PHY_ANAR_10BTFULL) {
653 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
654 media &= ~PHY_BMCR_SPEEDSEL;
655 media |= PHY_BMCR_DUPLEX;
656 printf("(full-duplex, 10Mbps)\n");
657 } else {
658 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
659 media &= ~PHY_BMCR_SPEEDSEL;
660 media &= ~PHY_BMCR_DUPLEX;
661 printf("(half-duplex, 10Mbps)\n");
662 }
663
664 media &= ~PHY_BMCR_AUTONEGENBL;
665
666 /* Set ASIC's duplex mode to match the PHY. */
667 vr_setcfg(sc, media);
668 vr_phy_writereg(sc, PHY_BMCR, media);
669 } else {
670 if (verbose)
671 printf("no carrier\n");
672 }
673
674 vr_init(sc);
675
676 if (sc->vr_tx_pend) {
677 sc->vr_autoneg = 0;
678 sc->vr_tx_pend = 0;
679 vr_start(ifp);
680 }
681
682 return;
683 }
684
685 static void vr_getmode_mii(sc)
686 struct vr_softc *sc;
687 {
688 u_int16_t bmsr;
689 struct ifnet *ifp;
690
691 ifp = &sc->arpcom.ac_if;
692
693 bmsr = vr_phy_readreg(sc, PHY_BMSR);
694 if (bootverbose)
695 printf("vr%d: PHY status word: %x\n", sc->vr_unit, bmsr);
696
697 /* fallback */
698 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
699
700 if (bmsr & PHY_BMSR_10BTHALF) {
701 if (bootverbose)
702 printf("vr%d: 10Mbps half-duplex mode supported\n",
703 sc->vr_unit);
704 ifmedia_add(&sc->ifmedia,
705 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
706 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
707 }
708
709 if (bmsr & PHY_BMSR_10BTFULL) {
710 if (bootverbose)
711 printf("vr%d: 10Mbps full-duplex mode supported\n",
712 sc->vr_unit);
713 ifmedia_add(&sc->ifmedia,
714 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
715 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
716 }
717
718 if (bmsr & PHY_BMSR_100BTXHALF) {
719 if (bootverbose)
720 printf("vr%d: 100Mbps half-duplex mode supported\n",
721 sc->vr_unit);
722 ifp->if_baudrate = 100000000;
723 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
724 ifmedia_add(&sc->ifmedia,
725 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
726 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
727 }
728
729 if (bmsr & PHY_BMSR_100BTXFULL) {
730 if (bootverbose)
731 printf("vr%d: 100Mbps full-duplex mode supported\n",
732 sc->vr_unit);
733 ifp->if_baudrate = 100000000;
734 ifmedia_add(&sc->ifmedia,
735 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
736 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
737 }
738
739 /* Some also support 100BaseT4. */
740 if (bmsr & PHY_BMSR_100BT4) {
741 if (bootverbose)
742 printf("vr%d: 100baseT4 mode supported\n", sc->vr_unit);
743 ifp->if_baudrate = 100000000;
744 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
745 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
746 #ifdef FORCE_AUTONEG_TFOUR
747 if (bootverbose)
748 printf("vr%d: forcing on autoneg support for BT4\n",
749 sc->vr_unit);
750 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL):
751 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
752 #endif
753 }
754
755 if (bmsr & PHY_BMSR_CANAUTONEG) {
756 if (bootverbose)
757 printf("vr%d: autoneg supported\n", sc->vr_unit);
758 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
759 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
760 }
761
762 return;
763 }
764
765 /*
766 * Set speed and duplex mode.
767 */
768 static void vr_setmode_mii(sc, media)
769 struct vr_softc *sc;
770 int media;
771 {
772 u_int16_t bmcr;
773 struct ifnet *ifp;
774
775 ifp = &sc->arpcom.ac_if;
776
777 /*
778 * If an autoneg session is in progress, stop it.
779 */
780 if (sc->vr_autoneg) {
781 printf("vr%d: canceling autoneg session\n", sc->vr_unit);
782 ifp->if_timer = sc->vr_autoneg = sc->vr_want_auto = 0;
783 bmcr = vr_phy_readreg(sc, PHY_BMCR);
784 bmcr &= ~PHY_BMCR_AUTONEGENBL;
785 vr_phy_writereg(sc, PHY_BMCR, bmcr);
786 }
787
788 printf("vr%d: selecting MII, ", sc->vr_unit);
789
790 bmcr = vr_phy_readreg(sc, PHY_BMCR);
791
792 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
793 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
794
795 if (IFM_SUBTYPE(media) == IFM_100_T4) {
796 printf("100Mbps/T4, half-duplex\n");
797 bmcr |= PHY_BMCR_SPEEDSEL;
798 bmcr &= ~PHY_BMCR_DUPLEX;
799 }
800
801 if (IFM_SUBTYPE(media) == IFM_100_TX) {
802 printf("100Mbps, ");
803 bmcr |= PHY_BMCR_SPEEDSEL;
804 }
805
806 if (IFM_SUBTYPE(media) == IFM_10_T) {
807 printf("10Mbps, ");
808 bmcr &= ~PHY_BMCR_SPEEDSEL;
809 }
810
811 if ((media & IFM_GMASK) == IFM_FDX) {
812 printf("full duplex\n");
813 bmcr |= PHY_BMCR_DUPLEX;
814 } else {
815 printf("half duplex\n");
816 bmcr &= ~PHY_BMCR_DUPLEX;
817 }
818
819 vr_setcfg(sc, bmcr);
820 vr_phy_writereg(sc, PHY_BMCR, bmcr);
821
822 return;
823 }
824
825 /*
826 * In order to fiddle with the
827 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
828 * first have to put the transmit and/or receive logic in the idle state.
829 */
830 static void vr_setcfg(sc, bmcr)
831 struct vr_softc *sc;
832 u_int16_t bmcr;
833 {
834 int restart = 0;
835
836 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
837 restart = 1;
838 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
839 }
840
841 if (bmcr & PHY_BMCR_DUPLEX)
842 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
843 else
844 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
845
846 if (restart)
847 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
848
849 return;
850 }
851
852 static void vr_reset(sc)
853 struct vr_softc *sc;
854 {
855 register int i;
856
857 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
858
859 for (i = 0; i < VR_TIMEOUT; i++) {
860 DELAY(10);
861 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
862 break;
863 }
864 if (i == VR_TIMEOUT)
865 printf("vr%d: reset never completed!\n", sc->vr_unit);
866
867 /* Wait a little while for the chip to get its brains in order. */
868 DELAY(1000);
869
870 return;
871 }
872
873 /*
874 * Probe for a VIA Rhine chip. Check the PCI vendor and device
875 * IDs against our list and return a device name if we find a match.
876 */
877 static const char *
878 vr_probe(config_id, device_id)
879 pcici_t config_id;
880 pcidi_t device_id;
881 {
882 struct vr_type *t;
883
884 t = vr_devs;
885
886 while(t->vr_name != NULL) {
887 if ((device_id & 0xFFFF) == t->vr_vid &&
888 ((device_id >> 16) & 0xFFFF) == t->vr_did) {
889 return(t->vr_name);
890 }
891 t++;
892 }
893
894 return(NULL);
895 }
896
897 /*
898 * Attach the interface. Allocate softc structures, do ifmedia
899 * setup and ethernet/BPF attach.
900 */
901 static void
902 vr_attach(config_id, unit)
903 pcici_t config_id;
904 int unit;
905 {
906 int s, i;
907 #ifndef VR_USEIOSPACE
908 vm_offset_t pbase, vbase;
909 #endif
910 u_char eaddr[ETHER_ADDR_LEN];
911 u_int32_t command;
912 struct vr_softc *sc;
913 struct ifnet *ifp;
914 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
915 unsigned int round;
916 caddr_t roundptr;
917 struct vr_type *p;
918 u_int16_t phy_vid, phy_did, phy_sts;
919
920 s = splimp();
921
922 sc = malloc(sizeof(struct vr_softc), M_DEVBUF, M_NOWAIT);
923 if (sc == NULL) {
924 printf("vr%d: no memory for softc struct!\n", unit);
925 return;
926 }
927 bzero(sc, sizeof(struct vr_softc));
928
929 /*
930 * Handle power management nonsense.
931 */
932
933 command = pci_conf_read(config_id, VR_PCI_CAPID) & 0x000000FF;
934 if (command == 0x01) {
935
936 command = pci_conf_read(config_id, VR_PCI_PWRMGMTCTRL);
937 if (command & VR_PSTATE_MASK) {
938 u_int32_t iobase, membase, irq;
939
940 /* Save important PCI config data. */
941 iobase = pci_conf_read(config_id, VR_PCI_LOIO);
942 membase = pci_conf_read(config_id, VR_PCI_LOMEM);
943 irq = pci_conf_read(config_id, VR_PCI_INTLINE);
944
945 /* Reset the power state. */
946 printf("vr%d: chip is in D%d power mode "
947 "-- setting to D0\n", unit, command & VR_PSTATE_MASK);
948 command &= 0xFFFFFFFC;
949 pci_conf_write(config_id, VR_PCI_PWRMGMTCTRL, command);
950
951 /* Restore PCI config data. */
952 pci_conf_write(config_id, VR_PCI_LOIO, iobase);
953 pci_conf_write(config_id, VR_PCI_LOMEM, membase);
954 pci_conf_write(config_id, VR_PCI_INTLINE, irq);
955 }
956 }
957
958 /*
959 * Map control/status registers.
960 */
961 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
962 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
963 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command);
964 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
965
966 #ifdef VR_USEIOSPACE
967 if (!(command & PCIM_CMD_PORTEN)) {
968 printf("vr%d: failed to enable I/O ports!\n", unit);
969 free(sc, M_DEVBUF);
970 goto fail;
971 }
972
973 if (!pci_map_port(config_id, VR_PCI_LOIO,
974 (u_int16_t *)(&sc->vr_bhandle))) {
975 printf ("vr%d: couldn't map ports\n", unit);
976 goto fail;
977 }
978 sc->vr_btag = I386_BUS_SPACE_IO;
979 #else
980 if (!(command & PCIM_CMD_MEMEN)) {
981 printf("vr%d: failed to enable memory mapping!\n", unit);
982 goto fail;
983 }
984
985 if (!pci_map_mem(config_id, VR_PCI_LOMEM, &vbase, &pbase)) {
986 printf ("vr%d: couldn't map memory\n", unit);
987 goto fail;
988 }
989
990 sc->vr_bhandle = vbase;
991 sc->vr_btag = I386_BUS_SPACE_MEM;
992 #endif
993
994 /* Allocate interrupt */
995 if (!pci_map_int(config_id, vr_intr, sc, &net_imask)) {
996 printf("vr%d: couldn't map interrupt\n", unit);
997 goto fail;
998 }
999
1000 /* Reset the adapter. */
1001 vr_reset(sc);
1002
1003 /*
1004 * Get station address. The way the Rhine chips work,
1005 * you're not allowed to directly access the EEPROM once
1006 * they've been programmed a special way. Consequently,
1007 * we need to read the node address from the PAR0 and PAR1
1008 * registers.
1009 */
1010 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1011 DELAY(200);
1012 for (i = 0; i < ETHER_ADDR_LEN; i++)
1013 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1014
1015 /*
1016 * A Rhine chip was detected. Inform the world.
1017 */
1018 printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":");
1019
1020 sc->vr_unit = unit;
1021 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1022
1023 sc->vr_ldata_ptr = malloc(sizeof(struct vr_list_data) + 8,
1024 M_DEVBUF, M_NOWAIT);
1025 if (sc->vr_ldata_ptr == NULL) {
1026 free(sc, M_DEVBUF);
1027 printf("vr%d: no memory for list buffers!\n", unit);
1028 return;
1029 }
1030
1031 sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
1032 round = (unsigned int)sc->vr_ldata_ptr & 0xF;
1033 roundptr = sc->vr_ldata_ptr;
1034 for (i = 0; i < 8; i++) {
1035 if (round % 8) {
1036 round++;
1037 roundptr++;
1038 } else
1039 break;
1040 }
1041 sc->vr_ldata = (struct vr_list_data *)roundptr;
1042 bzero(sc->vr_ldata, sizeof(struct vr_list_data));
1043
1044 ifp = &sc->arpcom.ac_if;
1045 ifp->if_softc = sc;
1046 ifp->if_unit = unit;
1047 ifp->if_name = "vr";
1048 ifp->if_mtu = ETHERMTU;
1049 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1050 ifp->if_ioctl = vr_ioctl;
1051 ifp->if_output = ether_output;
1052 ifp->if_start = vr_start;
1053 ifp->if_watchdog = vr_watchdog;
1054 ifp->if_init = vr_init;
1055 ifp->if_baudrate = 10000000;
1056 ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
1057
1058 if (bootverbose)
1059 printf("vr%d: probing for a PHY\n", sc->vr_unit);
1060 for (i = VR_PHYADDR_MIN; i < VR_PHYADDR_MAX + 1; i++) {
1061 if (bootverbose)
1062 printf("vr%d: checking address: %d\n",
1063 sc->vr_unit, i);
1064 sc->vr_phy_addr = i;
1065 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
1066 DELAY(500);
1067 while(vr_phy_readreg(sc, PHY_BMCR)
1068 & PHY_BMCR_RESET);
1069 if ((phy_sts = vr_phy_readreg(sc, PHY_BMSR)))
1070 break;
1071 }
1072 if (phy_sts) {
1073 phy_vid = vr_phy_readreg(sc, PHY_VENID);
1074 phy_did = vr_phy_readreg(sc, PHY_DEVID);
1075 if (bootverbose)
1076 printf("vr%d: found PHY at address %d, ",
1077 sc->vr_unit, sc->vr_phy_addr);
1078 if (bootverbose)
1079 printf("vendor id: %x device id: %x\n",
1080 phy_vid, phy_did);
1081 p = vr_phys;
1082 while(p->vr_vid) {
1083 if (phy_vid == p->vr_vid &&
1084 (phy_did | 0x000F) == p->vr_did) {
1085 sc->vr_pinfo = p;
1086 break;
1087 }
1088 p++;
1089 }
1090 if (sc->vr_pinfo == NULL)
1091 sc->vr_pinfo = &vr_phys[PHY_UNKNOWN];
1092 if (bootverbose)
1093 printf("vr%d: PHY type: %s\n",
1094 sc->vr_unit, sc->vr_pinfo->vr_name);
1095 } else {
1096 printf("vr%d: MII without any phy!\n", sc->vr_unit);
1097 goto fail;
1098 }
1099
1100 /*
1101 * Do ifmedia setup.
1102 */
1103 ifmedia_init(&sc->ifmedia, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1104
1105 vr_getmode_mii(sc);
1106 vr_autoneg_mii(sc, VR_FLAG_FORCEDELAY, 1);
1107 media = sc->ifmedia.ifm_media;
1108 vr_stop(sc);
1109
1110 ifmedia_set(&sc->ifmedia, media);
1111
1112 /*
1113 * Call MI attach routines.
1114 */
1115 if_attach(ifp);
1116 ether_ifattach(ifp);
1117
1118 #if NBPFILTER > 0
1119 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1120 #endif
1121
1122 at_shutdown(vr_shutdown, sc, SHUTDOWN_POST_SYNC);
1123
1124 fail:
1125 splx(s);
1126 return;
1127 }
1128
1129 /*
1130 * Initialize the transmit descriptors.
1131 */
1132 static int vr_list_tx_init(sc)
1133 struct vr_softc *sc;
1134 {
1135 struct vr_chain_data *cd;
1136 struct vr_list_data *ld;
1137 int i;
1138
1139 cd = &sc->vr_cdata;
1140 ld = sc->vr_ldata;
1141 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1142 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
1143 if (i == (VR_TX_LIST_CNT - 1))
1144 cd->vr_tx_chain[i].vr_nextdesc =
1145 &cd->vr_tx_chain[0];
1146 else
1147 cd->vr_tx_chain[i].vr_nextdesc =
1148 &cd->vr_tx_chain[i + 1];
1149 }
1150
1151 cd->vr_tx_free = &cd->vr_tx_chain[0];
1152 cd->vr_tx_tail = cd->vr_tx_head = NULL;
1153
1154 return(0);
1155 }
1156
1157
1158 /*
1159 * Initialize the RX descriptors and allocate mbufs for them. Note that
1160 * we arrange the descriptors in a closed ring, so that the last descriptor
1161 * points back to the first.
1162 */
1163 static int vr_list_rx_init(sc)
1164 struct vr_softc *sc;
1165 {
1166 struct vr_chain_data *cd;
1167 struct vr_list_data *ld;
1168 int i;
1169
1170 cd = &sc->vr_cdata;
1171 ld = sc->vr_ldata;
1172
1173 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1174 cd->vr_rx_chain[i].vr_ptr =
1175 (struct vr_desc *)&ld->vr_rx_list[i];
1176 if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS)
1177 return(ENOBUFS);
1178 if (i == (VR_RX_LIST_CNT - 1)) {
1179 cd->vr_rx_chain[i].vr_nextdesc =
1180 &cd->vr_rx_chain[0];
1181 ld->vr_rx_list[i].vr_next =
1182 vtophys(&ld->vr_rx_list[0]);
1183 } else {
1184 cd->vr_rx_chain[i].vr_nextdesc =
1185 &cd->vr_rx_chain[i + 1];
1186 ld->vr_rx_list[i].vr_next =
1187 vtophys(&ld->vr_rx_list[i + 1]);
1188 }
1189 }
1190
1191 cd->vr_rx_head = &cd->vr_rx_chain[0];
1192
1193 return(0);
1194 }
1195
1196 /*
1197 * Initialize an RX descriptor and attach an MBUF cluster.
1198 * Note: the length fields are only 11 bits wide, which means the
1199 * largest size we can specify is 2047. This is important because
1200 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1201 * overflow the field and make a mess.
1202 */
1203 static int vr_newbuf(sc, c)
1204 struct vr_softc *sc;
1205 struct vr_chain_onefrag *c;
1206 {
1207 struct mbuf *m_new = NULL;
1208
1209 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1210 if (m_new == NULL) {
1211 printf("vr%d: no memory for rx list -- packet dropped!\n",
1212 sc->vr_unit);
1213 return(ENOBUFS);
1214 }
1215
1216 MCLGET(m_new, M_DONTWAIT);
1217 if (!(m_new->m_flags & M_EXT)) {
1218 printf("vr%d: no memory for rx list -- packet dropped!\n",
1219 sc->vr_unit);
1220 m_freem(m_new);
1221 return(ENOBUFS);
1222 }
1223
1224 c->vr_mbuf = m_new;
1225 c->vr_ptr->vr_status = VR_RXSTAT;
1226 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
1227 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
1228
1229 return(0);
1230 }
1231
1232 /*
1233 * A frame has been uploaded: pass the resulting mbuf chain up to
1234 * the higher level protocols.
1235 */
1236 static void vr_rxeof(sc)
1237 struct vr_softc *sc;
1238 {
1239 struct ether_header *eh;
1240 struct mbuf *m;
1241 struct ifnet *ifp;
1242 struct vr_chain_onefrag *cur_rx;
1243 int total_len = 0;
1244 u_int32_t rxstat;
1245
1246 ifp = &sc->arpcom.ac_if;
1247
1248 while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
1249 VR_RXSTAT_OWN)) {
1250 cur_rx = sc->vr_cdata.vr_rx_head;
1251 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
1252
1253 /*
1254 * If an error occurs, update stats, clear the
1255 * status word and leave the mbuf cluster in place:
1256 * it should simply get re-used next time this descriptor
1257 * comes up in the ring.
1258 */
1259 if (rxstat & VR_RXSTAT_RXERR) {
1260 ifp->if_ierrors++;
1261 printf("vr%d: rx error: ", sc->vr_unit);
1262 switch(rxstat & 0x000000FF) {
1263 case VR_RXSTAT_CRCERR:
1264 printf("crc error\n");
1265 break;
1266 case VR_RXSTAT_FRAMEALIGNERR:
1267 printf("frame alignment error\n");
1268 break;
1269 case VR_RXSTAT_FIFOOFLOW:
1270 printf("FIFO overflow\n");
1271 break;
1272 case VR_RXSTAT_GIANT:
1273 printf("received giant packet\n");
1274 break;
1275 case VR_RXSTAT_RUNT:
1276 printf("received runt packet\n");
1277 break;
1278 case VR_RXSTAT_BUSERR:
1279 printf("system bus error\n");
1280 break;
1281 case VR_RXSTAT_BUFFERR:
1282 printf("rx buffer error\n");
1283 break;
1284 default:
1285 printf("unknown rx error\n");
1286 break;
1287 }
1288 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
1289 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
1290 continue;
1291 }
1292
1293 /* No errors; receive the packet. */
1294 m = cur_rx->vr_mbuf;
1295 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1296
1297 /*
1298 * XXX The VIA Rhine chip includes the CRC with every
1299 * received frame, and there's no way to turn this
1300 * behavior off (at least, I can't find anything in
1301 * the manual that explains how to do it) so we have
1302 * to trim off the CRC manually.
1303 */
1304 total_len -= ETHER_CRC_LEN;
1305
1306 /*
1307 * Try to conjure up a new mbuf cluster. If that
1308 * fails, it means we have an out of memory condition and
1309 * should leave the buffer in place and continue. This will
1310 * result in a lost packet, but there's little else we
1311 * can do in this situation.
1312 */
1313 if (vr_newbuf(sc, cur_rx) == ENOBUFS) {
1314 ifp->if_ierrors++;
1315 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
1316 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
1317 continue;
1318 }
1319
1320 ifp->if_ipackets++;
1321 eh = mtod(m, struct ether_header *);
1322 m->m_pkthdr.rcvif = ifp;
1323 m->m_pkthdr.len = m->m_len = total_len;
1324 #if NBPFILTER > 0
1325 /*
1326 * Handle BPF listeners. Let the BPF user see the packet, but
1327 * don't pass it up to the ether_input() layer unless it's
1328 * a broadcast packet, multicast packet, matches our ethernet
1329 * address or the interface is in promiscuous mode.
1330 */
1331 if (ifp->if_bpf) {
1332 bpf_mtap(ifp, m);
1333 if (ifp->if_flags & IFF_PROMISC &&
1334 (bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1335 ETHER_ADDR_LEN) &&
1336 (eh->ether_dhost[0] & 1) == 0)) {
1337 m_freem(m);
1338 continue;
1339 }
1340 }
1341 #endif /* NBPF>0 */
1342 #ifdef BRIDGE
1343 if (do_bridge) {
1344 struct ifnet *bdg_ifp;
1345 bdg_ifp = bridge_in(m);
1346 if (bdg_ifp != BDG_LOCAL && bdg_ifp != BDG_DROP)
1347 bdg_forward(&m, bdg_ifp);
1348 if (((bdg_ifp != BDG_LOCAL) && (bdg_ifp != BDG_BCAST) &&
1349 (bdg_ifp != BDG_MCAST)) || bdg_ifp == BDG_DROP) {
1350 m_freem(m);
1351 continue;
1352 }
1353 }
1354 #endif /* BRIDGE */
1355
1356 /* Remove header from mbuf and pass it on. */
1357 m_adj(m, sizeof(struct ether_header));
1358 ether_input(ifp, eh, m);
1359 }
1360
1361 return;
1362 }
1363
1364 void vr_rxeoc(sc)
1365 struct vr_softc *sc;
1366 {
1367
1368 vr_rxeof(sc);
1369 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1370 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1371 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1372 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1373
1374 return;
1375 }
1376
1377 /*
1378 * A frame was downloaded to the chip. It's safe for us to clean up
1379 * the list buffers.
1380 */
1381
1382 static void vr_txeof(sc)
1383 struct vr_softc *sc;
1384 {
1385 struct vr_chain *cur_tx;
1386 struct ifnet *ifp;
1387 register struct mbuf *n;
1388
1389 ifp = &sc->arpcom.ac_if;
1390
1391 /* Clear the timeout timer. */
1392 ifp->if_timer = 0;
1393
1394 /* Sanity check. */
1395 if (sc->vr_cdata.vr_tx_head == NULL)
1396 return;
1397
1398 /*
1399 * Go through our tx list and free mbufs for those
1400 * frames that have been transmitted.
1401 */
1402 while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1403 u_int32_t txstat;
1404
1405 cur_tx = sc->vr_cdata.vr_tx_head;
1406 txstat = cur_tx->vr_ptr->vr_status;
1407
1408 if (txstat & VR_TXSTAT_OWN)
1409 break;
1410
1411 if (txstat & VR_TXSTAT_ERRSUM) {
1412 ifp->if_oerrors++;
1413 if (txstat & VR_TXSTAT_DEFER)
1414 ifp->if_collisions++;
1415 if (txstat & VR_TXSTAT_LATECOLL)
1416 ifp->if_collisions++;
1417 }
1418
1419 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1420
1421 ifp->if_opackets++;
1422 MFREE(cur_tx->vr_mbuf, n);
1423 cur_tx->vr_mbuf = NULL;
1424
1425 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1426 sc->vr_cdata.vr_tx_head = NULL;
1427 sc->vr_cdata.vr_tx_tail = NULL;
1428 break;
1429 }
1430
1431 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1432 }
1433
1434 return;
1435 }
1436
1437 /*
1438 * TX 'end of channel' interrupt handler.
1439 */
1440 static void vr_txeoc(sc)
1441 struct vr_softc *sc;
1442 {
1443 struct ifnet *ifp;
1444
1445 ifp = &sc->arpcom.ac_if;
1446
1447 ifp->if_timer = 0;
1448
1449 if (sc->vr_cdata.vr_tx_head == NULL) {
1450 ifp->if_flags &= ~IFF_OACTIVE;
1451 sc->vr_cdata.vr_tx_tail = NULL;
1452 if (sc->vr_want_auto)
1453 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1454 }
1455
1456 return;
1457 }
1458
1459 static void vr_intr(arg)
1460 void *arg;
1461 {
1462 struct vr_softc *sc;
1463 struct ifnet *ifp;
1464 u_int16_t status;
1465
1466 sc = arg;
1467 ifp = &sc->arpcom.ac_if;
1468
1469 /* Supress unwanted interrupts. */
1470 if (!(ifp->if_flags & IFF_UP)) {
1471 vr_stop(sc);
1472 return;
1473 }
1474
1475 /* Disable interrupts. */
1476 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1477
1478 for (;;) {
1479
1480 status = CSR_READ_2(sc, VR_ISR);
1481 if (status)
1482 CSR_WRITE_2(sc, VR_ISR, status);
1483
1484 if ((status & VR_INTRS) == 0)
1485 break;
1486
1487 if (status & VR_ISR_RX_OK)
1488 vr_rxeof(sc);
1489
1490 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1491 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1492 (status & VR_ISR_RX_DROPPED)) {
1493 vr_rxeof(sc);
1494 vr_rxeoc(sc);
1495 }
1496
1497 if (status & VR_ISR_TX_OK) {
1498 vr_txeof(sc);
1499 vr_txeoc(sc);
1500 }
1501
1502 if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){
1503 ifp->if_oerrors++;
1504 vr_txeof(sc);
1505 if (sc->vr_cdata.vr_tx_head != NULL) {
1506 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1507 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1508 }
1509 }
1510
1511 if (status & VR_ISR_BUSERR) {
1512 vr_reset(sc);
1513 vr_init(sc);
1514 }
1515 }
1516
1517 /* Re-enable interrupts. */
1518 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1519
1520 if (ifp->if_snd.ifq_head != NULL) {
1521 vr_start(ifp);
1522 }
1523
1524 return;
1525 }
1526
1527 /*
1528 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1529 * pointers to the fragment pointers.
1530 */
1531 static int vr_encap(sc, c, m_head)
1532 struct vr_softc *sc;
1533 struct vr_chain *c;
1534 struct mbuf *m_head;
1535 {
1536 int frag = 0;
1537 struct vr_desc *f = NULL;
1538 int total_len;
1539 struct mbuf *m;
1540
1541 m = m_head;
1542 total_len = 0;
1543
1544 /*
1545 * The VIA Rhine wants packet buffers to be longword
1546 * aligned, but very often our mbufs aren't. Rather than
1547 * waste time trying to decide when to copy and when not
1548 * to copy, just do it all the time.
1549 */
1550 if (m != NULL) {
1551 struct mbuf *m_new = NULL;
1552
1553 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1554 if (m_new == NULL) {
1555 printf("vr%d: no memory for tx list", sc->vr_unit);
1556 return(1);
1557 }
1558 if (m_head->m_pkthdr.len > MHLEN) {
1559 MCLGET(m_new, M_DONTWAIT);
1560 if (!(m_new->m_flags & M_EXT)) {
1561 m_freem(m_new);
1562 printf("vr%d: no memory for tx list",
1563 sc->vr_unit);
1564 return(1);
1565 }
1566 }
1567 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1568 mtod(m_new, caddr_t));
1569 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1570 m_freem(m_head);
1571 m_head = m_new;
1572 /*
1573 * The Rhine chip doesn't auto-pad, so we have to make
1574 * sure to pad short frames out to the minimum frame length
1575 * ourselves.
1576 */
1577 if (m_head->m_len < VR_MIN_FRAMELEN) {
1578 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1579 m_new->m_len = m_new->m_pkthdr.len;
1580 }
1581 f = c->vr_ptr;
1582 f->vr_data = vtophys(mtod(m_new, caddr_t));
1583 f->vr_ctl = total_len = m_new->m_len;
1584 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1585 f->vr_status = 0;
1586 frag = 1;
1587 }
1588
1589 c->vr_mbuf = m_head;
1590 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1591 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1592
1593 return(0);
1594 }
1595
1596 /*
1597 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1598 * to the mbuf data regions directly in the transmit lists. We also save a
1599 * copy of the pointers since the transmit list fragment pointers are
1600 * physical addresses.
1601 */
1602
1603 static void vr_start(ifp)
1604 struct ifnet *ifp;
1605 {
1606 struct vr_softc *sc;
1607 struct mbuf *m_head = NULL;
1608 struct vr_chain *cur_tx = NULL, *start_tx;
1609
1610 sc = ifp->if_softc;
1611
1612 if (sc->vr_autoneg) {
1613 sc->vr_tx_pend = 1;
1614 return;
1615 }
1616
1617 /*
1618 * Check for an available queue slot. If there are none,
1619 * punt.
1620 */
1621 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1622 ifp->if_flags |= IFF_OACTIVE;
1623 return;
1624 }
1625
1626 start_tx = sc->vr_cdata.vr_tx_free;
1627
1628 while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1629 IF_DEQUEUE(&ifp->if_snd, m_head);
1630 if (m_head == NULL)
1631 break;
1632
1633 /* Pick a descriptor off the free list. */
1634 cur_tx = sc->vr_cdata.vr_tx_free;
1635 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1636
1637 /* Pack the data into the descriptor. */
1638 vr_encap(sc, cur_tx, m_head);
1639
1640 if (cur_tx != start_tx)
1641 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1642
1643 #if NBPFILTER > 0
1644 /*
1645 * If there's a BPF listener, bounce a copy of this frame
1646 * to him.
1647 */
1648 if (ifp->if_bpf)
1649 bpf_mtap(ifp, cur_tx->vr_mbuf);
1650 #endif
1651 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1652 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1653 }
1654
1655 /*
1656 * If there are no frames queued, bail.
1657 */
1658 if (cur_tx == NULL)
1659 return;
1660
1661 sc->vr_cdata.vr_tx_tail = cur_tx;
1662
1663 if (sc->vr_cdata.vr_tx_head == NULL)
1664 sc->vr_cdata.vr_tx_head = start_tx;
1665
1666 /*
1667 * Set a timeout in case the chip goes out to lunch.
1668 */
1669 ifp->if_timer = 5;
1670
1671 return;
1672 }
1673
1674 static void vr_init(xsc)
1675 void *xsc;
1676 {
1677 struct vr_softc *sc = xsc;
1678 struct ifnet *ifp = &sc->arpcom.ac_if;
1679 u_int16_t phy_bmcr = 0;
1680 int s;
1681
1682 if (sc->vr_autoneg)
1683 return;
1684
1685 s = splimp();
1686
1687 if (sc->vr_pinfo != NULL)
1688 phy_bmcr = vr_phy_readreg(sc, PHY_BMCR);
1689
1690 /*
1691 * Cancel pending I/O and free all RX/TX buffers.
1692 */
1693 vr_stop(sc);
1694 vr_reset(sc);
1695
1696 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1697 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1698
1699 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1700 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1701
1702 /* Init circular RX list. */
1703 if (vr_list_rx_init(sc) == ENOBUFS) {
1704 printf("vr%d: initialization failed: no "
1705 "memory for rx buffers\n", sc->vr_unit);
1706 vr_stop(sc);
1707 (void)splx(s);
1708 return;
1709 }
1710
1711 /*
1712 * Init tx descriptors.
1713 */
1714 vr_list_tx_init(sc);
1715
1716 /* If we want promiscuous mode, set the allframes bit. */
1717 if (ifp->if_flags & IFF_PROMISC)
1718 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1719 else
1720 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1721
1722 /* Set capture broadcast bit to capture broadcast frames. */
1723 if (ifp->if_flags & IFF_BROADCAST)
1724 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1725 else
1726 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1727
1728 /*
1729 * Program the multicast filter, if necessary.
1730 */
1731 vr_setmulti(sc);
1732
1733 /*
1734 * Load the address of the RX list.
1735 */
1736 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1737
1738 /* Enable receiver and transmitter. */
1739 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1740 VR_CMD_TX_ON|VR_CMD_RX_ON|
1741 VR_CMD_RX_GO);
1742
1743 vr_setcfg(sc, vr_phy_readreg(sc, PHY_BMCR));
1744
1745 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1746
1747 /*
1748 * Enable interrupts.
1749 */
1750 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1751 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1752
1753 /* Restore state of BMCR */
1754 if (sc->vr_pinfo != NULL)
1755 vr_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1756
1757 ifp->if_flags |= IFF_RUNNING;
1758 ifp->if_flags &= ~IFF_OACTIVE;
1759
1760 (void)splx(s);
1761
1762 return;
1763 }
1764
1765 /*
1766 * Set media options.
1767 */
1768 static int vr_ifmedia_upd(ifp)
1769 struct ifnet *ifp;
1770 {
1771 struct vr_softc *sc;
1772 struct ifmedia *ifm;
1773
1774 sc = ifp->if_softc;
1775 ifm = &sc->ifmedia;
1776
1777 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1778 return(EINVAL);
1779
1780 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1781 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1782 else
1783 vr_setmode_mii(sc, ifm->ifm_media);
1784
1785 return(0);
1786 }
1787
1788 /*
1789 * Report current media status.
1790 */
1791 static void vr_ifmedia_sts(ifp, ifmr)
1792 struct ifnet *ifp;
1793 struct ifmediareq *ifmr;
1794 {
1795 struct vr_softc *sc;
1796 u_int16_t advert = 0, ability = 0;
1797
1798 sc = ifp->if_softc;
1799
1800 ifmr->ifm_active = IFM_ETHER;
1801
1802 if (!(vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1803 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1804 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1805 else
1806 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1807 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1808 ifmr->ifm_active |= IFM_FDX;
1809 else
1810 ifmr->ifm_active |= IFM_HDX;
1811 return;
1812 }
1813
1814 ability = vr_phy_readreg(sc, PHY_LPAR);
1815 advert = vr_phy_readreg(sc, PHY_ANAR);
1816 if (advert & PHY_ANAR_100BT4 &&
1817 ability & PHY_ANAR_100BT4) {
1818 ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
1819 } else if (advert & PHY_ANAR_100BTXFULL &&
1820 ability & PHY_ANAR_100BTXFULL) {
1821 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
1822 } else if (advert & PHY_ANAR_100BTXHALF &&
1823 ability & PHY_ANAR_100BTXHALF) {
1824 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
1825 } else if (advert & PHY_ANAR_10BTFULL &&
1826 ability & PHY_ANAR_10BTFULL) {
1827 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
1828 } else if (advert & PHY_ANAR_10BTHALF &&
1829 ability & PHY_ANAR_10BTHALF) {
1830 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
1831 }
1832
1833 return;
1834 }
1835
1836 static int vr_ioctl(ifp, command, data)
1837 struct ifnet *ifp;
1838 u_long command;
1839 caddr_t data;
1840 {
1841 struct vr_softc *sc = ifp->if_softc;
1842 struct ifreq *ifr = (struct ifreq *) data;
1843 int s, error = 0;
1844
1845 s = splimp();
1846
1847 switch(command) {
1848 case SIOCSIFADDR:
1849 case SIOCGIFADDR:
1850 case SIOCSIFMTU:
1851 error = ether_ioctl(ifp, command, data);
1852 break;
1853 case SIOCSIFFLAGS:
1854 if (ifp->if_flags & IFF_UP) {
1855 vr_init(sc);
1856 } else {
1857 if (ifp->if_flags & IFF_RUNNING)
1858 vr_stop(sc);
1859 }
1860 error = 0;
1861 break;
1862 case SIOCADDMULTI:
1863 case SIOCDELMULTI:
1864 vr_setmulti(sc);
1865 error = 0;
1866 break;
1867 case SIOCGIFMEDIA:
1868 case SIOCSIFMEDIA:
1869 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1870 break;
1871 default:
1872 error = EINVAL;
1873 break;
1874 }
1875
1876 (void)splx(s);
1877
1878 return(error);
1879 }
1880
1881 static void vr_watchdog(ifp)
1882 struct ifnet *ifp;
1883 {
1884 struct vr_softc *sc;
1885
1886 sc = ifp->if_softc;
1887
1888 if (sc->vr_autoneg) {
1889 vr_autoneg_mii(sc, VR_FLAG_DELAYTIMEO, 1);
1890 return;
1891 }
1892
1893 ifp->if_oerrors++;
1894 printf("vr%d: watchdog timeout\n", sc->vr_unit);
1895
1896 if (!(vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1897 printf("vr%d: no carrier - transceiver cable problem?\n",
1898 sc->vr_unit);
1899
1900 vr_stop(sc);
1901 vr_reset(sc);
1902 vr_init(sc);
1903
1904 if (ifp->if_snd.ifq_head != NULL)
1905 vr_start(ifp);
1906
1907 return;
1908 }
1909
1910 /*
1911 * Stop the adapter and free any mbufs allocated to the
1912 * RX and TX lists.
1913 */
1914 static void vr_stop(sc)
1915 struct vr_softc *sc;
1916 {
1917 register int i;
1918 struct ifnet *ifp;
1919
1920 ifp = &sc->arpcom.ac_if;
1921 ifp->if_timer = 0;
1922
1923 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1924 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1925 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1926 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1927 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1928
1929 /*
1930 * Free data in the RX lists.
1931 */
1932 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1933 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1934 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1935 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1936 }
1937 }
1938 bzero((char *)&sc->vr_ldata->vr_rx_list,
1939 sizeof(sc->vr_ldata->vr_rx_list));
1940
1941 /*
1942 * Free the TX list buffers.
1943 */
1944 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1945 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1946 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1947 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1948 }
1949 }
1950
1951 bzero((char *)&sc->vr_ldata->vr_tx_list,
1952 sizeof(sc->vr_ldata->vr_tx_list));
1953
1954 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1955
1956 return;
1957 }
1958
1959 /*
1960 * Stop all chip I/O so that the kernel's probe routines don't
1961 * get confused by errant DMAs when rebooting.
1962 */
1963 static void vr_shutdown(howto, arg)
1964 int howto;
1965 void *arg;
1966 {
1967 struct vr_softc *sc = (struct vr_softc *)arg;
1968
1969 vr_stop(sc);
1970
1971 return;
1972 }
1973
1974 static struct pci_device vr_device = {
1975 "vr",
1976 vr_probe,
1977 vr_attach,
1978 &vr_count,
1979 NULL
1980 };
1981 DATA_SET(pcidevice_set, vr_device);
Cache object: 7aa430ab7747e1bdf80df915d46b1a0d
|