FreeBSD/Linux Kernel Cross Reference
sys/pci/if_vr.c
1 /*-
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/6.0/sys/pci/if_vr.c 151140 2005-10-09 04:11:20Z delphij $");
35
36 /*
37 * VIA Rhine fast ethernet PCI NIC driver
38 *
39 * Supports various network adapters based on the VIA Rhine
40 * and Rhine II PCI controllers, including the D-Link DFE530TX.
41 * Datasheets are available at http://www.via.com.tw.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47
48 /*
49 * The VIA Rhine controllers are similar in some respects to the
50 * the DEC tulip chips, except less complicated. The controller
51 * uses an MII bus and an external physical layer interface. The
52 * receiver has a one entry perfect filter and a 64-bit hash table
53 * multicast filter. Transmit and receive descriptors are similar
54 * to the tulip.
55 *
56 * The Rhine has a serious flaw in its transmit DMA mechanism:
57 * transmit buffers must be longword aligned. Unfortunately,
58 * FreeBSD doesn't guarantee that mbufs will be filled in starting
59 * at longword boundaries, so we have to do a buffer copy before
60 * transmission.
61 */
62
63 #ifdef HAVE_KERNEL_OPTION_HEADERS
64 #include "opt_device_polling.h"
65 #endif
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/sockio.h>
70 #include <sys/mbuf.h>
71 #include <sys/malloc.h>
72 #include <sys/kernel.h>
73 #include <sys/module.h>
74 #include <sys/socket.h>
75
76 #include <net/if.h>
77 #include <net/if_arp.h>
78 #include <net/ethernet.h>
79 #include <net/if_dl.h>
80 #include <net/if_media.h>
81 #include <net/if_types.h>
82
83 #include <net/bpf.h>
84
85 #include <vm/vm.h> /* for vtophys */
86 #include <vm/pmap.h> /* for vtophys */
87 #include <machine/bus.h>
88 #include <machine/resource.h>
89 #include <sys/bus.h>
90 #include <sys/rman.h>
91
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97
98 #define VR_USEIOSPACE
99
100 #include <pci/if_vrreg.h>
101
102 MODULE_DEPEND(vr, pci, 1, 1, 1);
103 MODULE_DEPEND(vr, ether, 1, 1, 1);
104 MODULE_DEPEND(vr, miibus, 1, 1, 1);
105
106 /* "controller miibus0" required. See GENERIC if you get errors here. */
107 #include "miibus_if.h"
108
109 #undef VR_USESWSHIFT
110
111 /*
112 * Various supported device vendors/types and their names.
113 */
114 static struct vr_type vr_devs[] = {
115 { VIA_VENDORID, VIA_DEVICEID_RHINE,
116 "VIA VT3043 Rhine I 10/100BaseTX" },
117 { VIA_VENDORID, VIA_DEVICEID_RHINE_II,
118 "VIA VT86C100A Rhine II 10/100BaseTX" },
119 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
120 "VIA VT6102 Rhine II 10/100BaseTX" },
121 { VIA_VENDORID, VIA_DEVICEID_RHINE_III,
122 "VIA VT6105 Rhine III 10/100BaseTX" },
123 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
124 "VIA VT6105M Rhine III 10/100BaseTX" },
125 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
126 "Delta Electronics Rhine II 10/100BaseTX" },
127 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
128 "Addtron Technology Rhine II 10/100BaseTX" },
129 { 0, 0, NULL }
130 };
131
132 static int vr_probe(device_t);
133 static int vr_attach(device_t);
134 static int vr_detach(device_t);
135
136 static int vr_newbuf(struct vr_softc *, struct vr_chain_onefrag *,
137 struct mbuf *);
138 static int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf * );
139
140 static void vr_rxeof(struct vr_softc *);
141 static void vr_rxeoc(struct vr_softc *);
142 static void vr_txeof(struct vr_softc *);
143 static void vr_tick(void *);
144 static void vr_intr(void *);
145 static void vr_start(struct ifnet *);
146 static void vr_start_locked(struct ifnet *);
147 static int vr_ioctl(struct ifnet *, u_long, caddr_t);
148 static void vr_init(void *);
149 static void vr_init_locked(struct vr_softc *);
150 static void vr_stop(struct vr_softc *);
151 static void vr_watchdog(struct ifnet *);
152 static void vr_shutdown(device_t);
153 static int vr_ifmedia_upd(struct ifnet *);
154 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
155
156 #ifdef VR_USESWSHIFT
157 static void vr_mii_sync(struct vr_softc *);
158 static void vr_mii_send(struct vr_softc *, uint32_t, int);
159 #endif
160 static int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *);
161 static int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *);
162 static int vr_miibus_readreg(device_t, uint16_t, uint16_t);
163 static int vr_miibus_writereg(device_t, uint16_t, uint16_t, uint16_t);
164 static void vr_miibus_statchg(device_t);
165
166 static void vr_setcfg(struct vr_softc *, int);
167 static void vr_setmulti(struct vr_softc *);
168 static void vr_reset(struct vr_softc *);
169 static int vr_list_rx_init(struct vr_softc *);
170 static int vr_list_tx_init(struct vr_softc *);
171
172 #ifdef VR_USEIOSPACE
173 #define VR_RES SYS_RES_IOPORT
174 #define VR_RID VR_PCI_LOIO
175 #else
176 #define VR_RES SYS_RES_MEMORY
177 #define VR_RID VR_PCI_LOMEM
178 #endif
179
180 static device_method_t vr_methods[] = {
181 /* Device interface */
182 DEVMETHOD(device_probe, vr_probe),
183 DEVMETHOD(device_attach, vr_attach),
184 DEVMETHOD(device_detach, vr_detach),
185 DEVMETHOD(device_shutdown, vr_shutdown),
186
187 /* bus interface */
188 DEVMETHOD(bus_print_child, bus_generic_print_child),
189 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
190
191 /* MII interface */
192 DEVMETHOD(miibus_readreg, vr_miibus_readreg),
193 DEVMETHOD(miibus_writereg, vr_miibus_writereg),
194 DEVMETHOD(miibus_statchg, vr_miibus_statchg),
195
196 { 0, 0 }
197 };
198
199 static driver_t vr_driver = {
200 "vr",
201 vr_methods,
202 sizeof(struct vr_softc)
203 };
204
205 static devclass_t vr_devclass;
206
207 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
208 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
209
210 #define VR_SETBIT(sc, reg, x) \
211 CSR_WRITE_1(sc, reg, \
212 CSR_READ_1(sc, reg) | (x))
213
214 #define VR_CLRBIT(sc, reg, x) \
215 CSR_WRITE_1(sc, reg, \
216 CSR_READ_1(sc, reg) & ~(x))
217
218 #define VR_SETBIT16(sc, reg, x) \
219 CSR_WRITE_2(sc, reg, \
220 CSR_READ_2(sc, reg) | (x))
221
222 #define VR_CLRBIT16(sc, reg, x) \
223 CSR_WRITE_2(sc, reg, \
224 CSR_READ_2(sc, reg) & ~(x))
225
226 #define VR_SETBIT32(sc, reg, x) \
227 CSR_WRITE_4(sc, reg, \
228 CSR_READ_4(sc, reg) | (x))
229
230 #define VR_CLRBIT32(sc, reg, x) \
231 CSR_WRITE_4(sc, reg, \
232 CSR_READ_4(sc, reg) & ~(x))
233
234 #define SIO_SET(x) \
235 CSR_WRITE_1(sc, VR_MIICMD, \
236 CSR_READ_1(sc, VR_MIICMD) | (x))
237
238 #define SIO_CLR(x) \
239 CSR_WRITE_1(sc, VR_MIICMD, \
240 CSR_READ_1(sc, VR_MIICMD) & ~(x))
241
242 #ifdef VR_USESWSHIFT
243 /*
244 * Sync the PHYs by setting data bit and strobing the clock 32 times.
245 */
246 static void
247 vr_mii_sync(struct vr_softc *sc)
248 {
249 register int i;
250
251 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
252
253 for (i = 0; i < 32; i++) {
254 SIO_SET(VR_MIICMD_CLK);
255 DELAY(1);
256 SIO_CLR(VR_MIICMD_CLK);
257 DELAY(1);
258 }
259 }
260
261 /*
262 * Clock a series of bits through the MII.
263 */
264 static void
265 vr_mii_send(struct vr_softc *sc, uint32_t bits, int cnt)
266 {
267 int i;
268
269 SIO_CLR(VR_MIICMD_CLK);
270
271 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
272 if (bits & i) {
273 SIO_SET(VR_MIICMD_DATAIN);
274 } else {
275 SIO_CLR(VR_MIICMD_DATAIN);
276 }
277 DELAY(1);
278 SIO_CLR(VR_MIICMD_CLK);
279 DELAY(1);
280 SIO_SET(VR_MIICMD_CLK);
281 }
282 }
283 #endif
284
285 /*
286 * Read an PHY register through the MII.
287 */
288 static int
289 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame)
290 #ifdef VR_USESWSHIFT
291 {
292 int i, ack;
293
294 /* Set up frame for RX. */
295 frame->mii_stdelim = VR_MII_STARTDELIM;
296 frame->mii_opcode = VR_MII_READOP;
297 frame->mii_turnaround = 0;
298 frame->mii_data = 0;
299
300 CSR_WRITE_1(sc, VR_MIICMD, 0);
301 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
302
303 /* Turn on data xmit. */
304 SIO_SET(VR_MIICMD_DIR);
305
306 vr_mii_sync(sc);
307
308 /* Send command/address info. */
309 vr_mii_send(sc, frame->mii_stdelim, 2);
310 vr_mii_send(sc, frame->mii_opcode, 2);
311 vr_mii_send(sc, frame->mii_phyaddr, 5);
312 vr_mii_send(sc, frame->mii_regaddr, 5);
313
314 /* Idle bit. */
315 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
316 DELAY(1);
317 SIO_SET(VR_MIICMD_CLK);
318 DELAY(1);
319
320 /* Turn off xmit. */
321 SIO_CLR(VR_MIICMD_DIR);
322
323 /* Check for ack */
324 SIO_CLR(VR_MIICMD_CLK);
325 DELAY(1);
326 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
327 SIO_SET(VR_MIICMD_CLK);
328 DELAY(1);
329
330 /*
331 * Now try reading data bits. If the ack failed, we still
332 * need to clock through 16 cycles to keep the PHY(s) in sync.
333 */
334 if (ack) {
335 for(i = 0; i < 16; i++) {
336 SIO_CLR(VR_MIICMD_CLK);
337 DELAY(1);
338 SIO_SET(VR_MIICMD_CLK);
339 DELAY(1);
340 }
341 goto fail;
342 }
343
344 for (i = 0x8000; i; i >>= 1) {
345 SIO_CLR(VR_MIICMD_CLK);
346 DELAY(1);
347 if (!ack) {
348 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
349 frame->mii_data |= i;
350 DELAY(1);
351 }
352 SIO_SET(VR_MIICMD_CLK);
353 DELAY(1);
354 }
355
356 fail:
357 SIO_CLR(VR_MIICMD_CLK);
358 DELAY(1);
359 SIO_SET(VR_MIICMD_CLK);
360 DELAY(1);
361
362 if (ack)
363 return (1);
364 return (0);
365 }
366 #else
367 {
368 int i;
369
370 /* Set the PHY address. */
371 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
372 frame->mii_phyaddr);
373
374 /* Set the register address. */
375 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
376 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
377
378 for (i = 0; i < 10000; i++) {
379 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
380 break;
381 DELAY(1);
382 }
383 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
384
385 return (0);
386 }
387 #endif
388
389
390 /*
391 * Write to a PHY register through the MII.
392 */
393 static int
394 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame)
395 #ifdef VR_USESWSHIFT
396 {
397 CSR_WRITE_1(sc, VR_MIICMD, 0);
398 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
399
400 /* Set up frame for TX. */
401 frame->mii_stdelim = VR_MII_STARTDELIM;
402 frame->mii_opcode = VR_MII_WRITEOP;
403 frame->mii_turnaround = VR_MII_TURNAROUND;
404
405 /* Turn on data output. */
406 SIO_SET(VR_MIICMD_DIR);
407
408 vr_mii_sync(sc);
409
410 vr_mii_send(sc, frame->mii_stdelim, 2);
411 vr_mii_send(sc, frame->mii_opcode, 2);
412 vr_mii_send(sc, frame->mii_phyaddr, 5);
413 vr_mii_send(sc, frame->mii_regaddr, 5);
414 vr_mii_send(sc, frame->mii_turnaround, 2);
415 vr_mii_send(sc, frame->mii_data, 16);
416
417 /* Idle bit. */
418 SIO_SET(VR_MIICMD_CLK);
419 DELAY(1);
420 SIO_CLR(VR_MIICMD_CLK);
421 DELAY(1);
422
423 /* Turn off xmit. */
424 SIO_CLR(VR_MIICMD_DIR);
425
426 return (0);
427 }
428 #else
429 {
430 int i;
431
432 /* Set the PHY address. */
433 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
434 frame->mii_phyaddr);
435
436 /* Set the register address and data to write. */
437 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
438 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
439
440 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
441
442 for (i = 0; i < 10000; i++) {
443 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
444 break;
445 DELAY(1);
446 }
447
448 return (0);
449 }
450 #endif
451
452 static int
453 vr_miibus_readreg(device_t dev, uint16_t phy, uint16_t reg)
454 {
455 struct vr_mii_frame frame;
456 struct vr_softc *sc = device_get_softc(dev);
457
458 switch (sc->vr_revid) {
459 case REV_ID_VT6102_APOLLO:
460 if (phy != 1) {
461 frame.mii_data = 0;
462 goto out;
463 }
464 default:
465 break;
466 }
467
468 bzero((char *)&frame, sizeof(frame));
469 frame.mii_phyaddr = phy;
470 frame.mii_regaddr = reg;
471 vr_mii_readreg(sc, &frame);
472
473 out:
474 return (frame.mii_data);
475 }
476
477 static int
478 vr_miibus_writereg(device_t dev, uint16_t phy, uint16_t reg, uint16_t data)
479 {
480 struct vr_mii_frame frame;
481 struct vr_softc *sc = device_get_softc(dev);
482
483 switch (sc->vr_revid) {
484 case REV_ID_VT6102_APOLLO:
485 if (phy != 1)
486 return (0);
487 default:
488 break;
489 }
490
491 bzero((char *)&frame, sizeof(frame));
492 frame.mii_phyaddr = phy;
493 frame.mii_regaddr = reg;
494 frame.mii_data = data;
495 vr_mii_writereg(sc, &frame);
496
497 return (0);
498 }
499
500 static void
501 vr_miibus_statchg(device_t dev)
502 {
503 struct mii_data *mii;
504 struct vr_softc *sc = device_get_softc(dev);
505
506 mii = device_get_softc(sc->vr_miibus);
507 vr_setcfg(sc, mii->mii_media_active);
508 }
509
510 /*
511 * Program the 64-bit multicast hash filter.
512 */
513 static void
514 vr_setmulti(struct vr_softc *sc)
515 {
516 struct ifnet *ifp = sc->vr_ifp;
517 int h = 0;
518 uint32_t hashes[2] = { 0, 0 };
519 struct ifmultiaddr *ifma;
520 uint8_t rxfilt;
521 int mcnt = 0;
522
523 VR_LOCK_ASSERT(sc);
524
525 rxfilt = CSR_READ_1(sc, VR_RXCFG);
526
527 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
528 rxfilt |= VR_RXCFG_RX_MULTI;
529 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
530 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
531 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
532 return;
533 }
534
535 /* First, zero out all the existing hash bits. */
536 CSR_WRITE_4(sc, VR_MAR0, 0);
537 CSR_WRITE_4(sc, VR_MAR1, 0);
538
539 /* Now program new ones. */
540 IF_ADDR_LOCK(ifp);
541 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
542 if (ifma->ifma_addr->sa_family != AF_LINK)
543 continue;
544 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
545 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
546 if (h < 32)
547 hashes[0] |= (1 << h);
548 else
549 hashes[1] |= (1 << (h - 32));
550 mcnt++;
551 }
552 IF_ADDR_UNLOCK(ifp);
553
554 if (mcnt)
555 rxfilt |= VR_RXCFG_RX_MULTI;
556 else
557 rxfilt &= ~VR_RXCFG_RX_MULTI;
558
559 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
560 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
561 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
562 }
563
564 /*
565 * In order to fiddle with the
566 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
567 * first have to put the transmit and/or receive logic in the idle state.
568 */
569 static void
570 vr_setcfg(struct vr_softc *sc, int media)
571 {
572 int restart = 0;
573
574 VR_LOCK_ASSERT(sc);
575
576 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
577 restart = 1;
578 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
579 }
580
581 if ((media & IFM_GMASK) == IFM_FDX)
582 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
583 else
584 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
585
586 if (restart)
587 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
588 }
589
590 static void
591 vr_reset(struct vr_softc *sc)
592 {
593 register int i;
594
595 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during detach w/o lock. */
596
597 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
598
599 for (i = 0; i < VR_TIMEOUT; i++) {
600 DELAY(10);
601 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
602 break;
603 }
604 if (i == VR_TIMEOUT) {
605 if (sc->vr_revid < REV_ID_VT3065_A)
606 printf("vr%d: reset never completed!\n", sc->vr_unit);
607 else {
608 /* Use newer force reset command */
609 printf("vr%d: Using force reset command.\n",
610 sc->vr_unit);
611 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
612 }
613 }
614
615 /* Wait a little while for the chip to get its brains in order. */
616 DELAY(1000);
617 }
618
619 /*
620 * Probe for a VIA Rhine chip. Check the PCI vendor and device
621 * IDs against our list and return a device name if we find a match.
622 */
623 static int
624 vr_probe(device_t dev)
625 {
626 struct vr_type *t = vr_devs;
627
628 while (t->vr_name != NULL) {
629 if ((pci_get_vendor(dev) == t->vr_vid) &&
630 (pci_get_device(dev) == t->vr_did)) {
631 device_set_desc(dev, t->vr_name);
632 return (BUS_PROBE_DEFAULT);
633 }
634 t++;
635 }
636
637 return (ENXIO);
638 }
639
640 /*
641 * Attach the interface. Allocate softc structures, do ifmedia
642 * setup and ethernet/BPF attach.
643 */
644 static int
645 vr_attach(dev)
646 device_t dev;
647 {
648 int i;
649 u_char eaddr[ETHER_ADDR_LEN];
650 struct vr_softc *sc;
651 struct ifnet *ifp;
652 int unit, error = 0, rid;
653
654 sc = device_get_softc(dev);
655 unit = device_get_unit(dev);
656
657 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
658 MTX_DEF);
659 /*
660 * Map control/status registers.
661 */
662 pci_enable_busmaster(dev);
663 sc->vr_revid = pci_read_config(dev, VR_PCI_REVID, 4) & 0x000000FF;
664
665 rid = VR_RID;
666 sc->vr_res = bus_alloc_resource_any(dev, VR_RES, &rid, RF_ACTIVE);
667
668 if (sc->vr_res == NULL) {
669 printf("vr%d: couldn't map ports/memory\n", unit);
670 error = ENXIO;
671 goto fail;
672 }
673
674 sc->vr_btag = rman_get_bustag(sc->vr_res);
675 sc->vr_bhandle = rman_get_bushandle(sc->vr_res);
676
677 /* Allocate interrupt */
678 rid = 0;
679 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
680 RF_SHAREABLE | RF_ACTIVE);
681
682 if (sc->vr_irq == NULL) {
683 printf("vr%d: couldn't map interrupt\n", unit);
684 error = ENXIO;
685 goto fail;
686 }
687
688 /*
689 * Windows may put the chip in suspend mode when it
690 * shuts down. Be sure to kick it in the head to wake it
691 * up again.
692 */
693 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
694
695 /* Reset the adapter. */
696 vr_reset(sc);
697
698 /*
699 * Turn on bit2 (MIION) in PCI configuration register 0x53 during
700 * initialization and disable AUTOPOLL.
701 */
702 pci_write_config(dev, VR_PCI_MODE,
703 pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4);
704 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
705
706 /*
707 * Get station address. The way the Rhine chips work,
708 * you're not allowed to directly access the EEPROM once
709 * they've been programmed a special way. Consequently,
710 * we need to read the node address from the PAR0 and PAR1
711 * registers.
712 */
713 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
714 DELAY(200);
715 for (i = 0; i < ETHER_ADDR_LEN; i++)
716 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
717
718 sc->vr_unit = unit;
719
720 sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
721 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
722
723 if (sc->vr_ldata == NULL) {
724 printf("vr%d: no memory for list buffers!\n", unit);
725 error = ENXIO;
726 goto fail;
727 }
728
729 bzero(sc->vr_ldata, sizeof(struct vr_list_data));
730
731 ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
732 if (ifp == NULL) {
733 printf("vr%d: can not if_alloc()\n", unit);
734 error = ENOSPC;
735 goto fail;
736 }
737 ifp->if_softc = sc;
738 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
739 ifp->if_mtu = ETHERMTU;
740 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
741 ifp->if_ioctl = vr_ioctl;
742 ifp->if_start = vr_start;
743 ifp->if_watchdog = vr_watchdog;
744 ifp->if_init = vr_init;
745 ifp->if_baudrate = 10000000;
746 IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_LIST_CNT - 1);
747 ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
748 IFQ_SET_READY(&ifp->if_snd);
749 ifp->if_capenable = ifp->if_capabilities;
750 #ifdef DEVICE_POLLING
751 ifp->if_capabilities |= IFCAP_POLLING;
752 #endif
753
754 /* Do MII setup. */
755 if (mii_phy_probe(dev, &sc->vr_miibus,
756 vr_ifmedia_upd, vr_ifmedia_sts)) {
757 printf("vr%d: MII without any phy!\n", sc->vr_unit);
758 error = ENXIO;
759 goto fail;
760 }
761
762 callout_handle_init(&sc->vr_stat_ch);
763
764 /* Call MI attach routine. */
765 ether_ifattach(ifp, eaddr);
766
767 sc->suspended = 0;
768
769 /* Hook interrupt last to avoid having to lock softc */
770 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
771 vr_intr, sc, &sc->vr_intrhand);
772
773 if (error) {
774 printf("vr%d: couldn't set up irq\n", unit);
775 ether_ifdetach(ifp);
776 goto fail;
777 }
778
779 fail:
780 if (error)
781 vr_detach(dev);
782
783 return (error);
784 }
785
786 /*
787 * Shutdown hardware and free up resources. This can be called any
788 * time after the mutex has been initialized. It is called in both
789 * the error case in attach and the normal detach case so it needs
790 * to be careful about only freeing resources that have actually been
791 * allocated.
792 */
793 static int
794 vr_detach(device_t dev)
795 {
796 struct vr_softc *sc = device_get_softc(dev);
797 struct ifnet *ifp = sc->vr_ifp;
798
799 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
800
801 #ifdef DEVICE_POLLING
802 if (ifp->if_capenable & IFCAP_POLLING)
803 ether_poll_deregister(ifp);
804 #endif
805
806 VR_LOCK(sc);
807
808 sc->suspended = 1;
809
810 /* These should only be active if attach succeeded */
811 if (device_is_attached(dev)) {
812 vr_stop(sc);
813 VR_UNLOCK(sc); /* XXX: Avoid recursive acquire. */
814 ether_ifdetach(ifp);
815 VR_LOCK(sc);
816 }
817 if (ifp)
818 if_free(ifp);
819 if (sc->vr_miibus)
820 device_delete_child(dev, sc->vr_miibus);
821 bus_generic_detach(dev);
822
823 if (sc->vr_intrhand)
824 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
825 if (sc->vr_irq)
826 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
827 if (sc->vr_res)
828 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
829
830 if (sc->vr_ldata)
831 contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
832
833 VR_UNLOCK(sc);
834 mtx_destroy(&sc->vr_mtx);
835
836 return (0);
837 }
838
839 /*
840 * Initialize the transmit descriptors.
841 */
842 static int
843 vr_list_tx_init(struct vr_softc *sc)
844 {
845 struct vr_chain_data *cd;
846 struct vr_list_data *ld;
847 int i;
848
849 cd = &sc->vr_cdata;
850 ld = sc->vr_ldata;
851 for (i = 0; i < VR_TX_LIST_CNT; i++) {
852 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
853 if (i == (VR_TX_LIST_CNT - 1))
854 cd->vr_tx_chain[i].vr_nextdesc =
855 &cd->vr_tx_chain[0];
856 else
857 cd->vr_tx_chain[i].vr_nextdesc =
858 &cd->vr_tx_chain[i + 1];
859 }
860 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0];
861
862 return (0);
863 }
864
865
866 /*
867 * Initialize the RX descriptors and allocate mbufs for them. Note that
868 * we arrange the descriptors in a closed ring, so that the last descriptor
869 * points back to the first.
870 */
871 static int
872 vr_list_rx_init(struct vr_softc *sc)
873 {
874 struct vr_chain_data *cd;
875 struct vr_list_data *ld;
876 int i;
877
878 VR_LOCK_ASSERT(sc);
879
880 cd = &sc->vr_cdata;
881 ld = sc->vr_ldata;
882
883 for (i = 0; i < VR_RX_LIST_CNT; i++) {
884 cd->vr_rx_chain[i].vr_ptr =
885 (struct vr_desc *)&ld->vr_rx_list[i];
886 if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS)
887 return (ENOBUFS);
888 if (i == (VR_RX_LIST_CNT - 1)) {
889 cd->vr_rx_chain[i].vr_nextdesc =
890 &cd->vr_rx_chain[0];
891 ld->vr_rx_list[i].vr_next =
892 vtophys(&ld->vr_rx_list[0]);
893 } else {
894 cd->vr_rx_chain[i].vr_nextdesc =
895 &cd->vr_rx_chain[i + 1];
896 ld->vr_rx_list[i].vr_next =
897 vtophys(&ld->vr_rx_list[i + 1]);
898 }
899 }
900
901 cd->vr_rx_head = &cd->vr_rx_chain[0];
902
903 return (0);
904 }
905
906 /*
907 * Initialize an RX descriptor and attach an MBUF cluster.
908 * Note: the length fields are only 11 bits wide, which means the
909 * largest size we can specify is 2047. This is important because
910 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
911 * overflow the field and make a mess.
912 */
913 static int
914 vr_newbuf(struct vr_softc *sc, struct vr_chain_onefrag *c, struct mbuf *m)
915 {
916 struct mbuf *m_new = NULL;
917
918 if (m == NULL) {
919 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
920 if (m_new == NULL)
921 return (ENOBUFS);
922
923 MCLGET(m_new, M_DONTWAIT);
924 if (!(m_new->m_flags & M_EXT)) {
925 m_freem(m_new);
926 return (ENOBUFS);
927 }
928 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
929 } else {
930 m_new = m;
931 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
932 m_new->m_data = m_new->m_ext.ext_buf;
933 }
934
935 m_adj(m_new, sizeof(uint64_t));
936
937 c->vr_mbuf = m_new;
938 c->vr_ptr->vr_status = VR_RXSTAT;
939 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
940 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
941
942 return (0);
943 }
944
945 /*
946 * A frame has been uploaded: pass the resulting mbuf chain up to
947 * the higher level protocols.
948 */
949 static void
950 vr_rxeof(struct vr_softc *sc)
951 {
952 struct mbuf *m, *m0;
953 struct ifnet *ifp;
954 struct vr_chain_onefrag *cur_rx;
955 int total_len = 0;
956 uint32_t rxstat;
957
958 VR_LOCK_ASSERT(sc);
959 ifp = sc->vr_ifp;
960
961 while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
962 VR_RXSTAT_OWN)) {
963 #ifdef DEVICE_POLLING
964 if (ifp->if_capenable & IFCAP_POLLING) {
965 if (sc->rxcycles <= 0)
966 break;
967 sc->rxcycles--;
968 }
969 #endif
970 m0 = NULL;
971 cur_rx = sc->vr_cdata.vr_rx_head;
972 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
973 m = cur_rx->vr_mbuf;
974
975 /*
976 * If an error occurs, update stats, clear the
977 * status word and leave the mbuf cluster in place:
978 * it should simply get re-used next time this descriptor
979 * comes up in the ring.
980 */
981 if (rxstat & VR_RXSTAT_RXERR) {
982 ifp->if_ierrors++;
983 printf("vr%d: rx error (%02x):", sc->vr_unit,
984 rxstat & 0x000000ff);
985 if (rxstat & VR_RXSTAT_CRCERR)
986 printf(" crc error");
987 if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
988 printf(" frame alignment error\n");
989 if (rxstat & VR_RXSTAT_FIFOOFLOW)
990 printf(" FIFO overflow");
991 if (rxstat & VR_RXSTAT_GIANT)
992 printf(" received giant packet");
993 if (rxstat & VR_RXSTAT_RUNT)
994 printf(" received runt packet");
995 if (rxstat & VR_RXSTAT_BUSERR)
996 printf(" system bus error");
997 if (rxstat & VR_RXSTAT_BUFFERR)
998 printf("rx buffer error");
999 printf("\n");
1000 vr_newbuf(sc, cur_rx, m);
1001 continue;
1002 }
1003
1004 /* No errors; receive the packet. */
1005 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1006
1007 /*
1008 * XXX The VIA Rhine chip includes the CRC with every
1009 * received frame, and there's no way to turn this
1010 * behavior off (at least, I can't find anything in
1011 * the manual that explains how to do it) so we have
1012 * to trim off the CRC manually.
1013 */
1014 total_len -= ETHER_CRC_LEN;
1015
1016 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp,
1017 NULL);
1018 vr_newbuf(sc, cur_rx, m);
1019 if (m0 == NULL) {
1020 ifp->if_ierrors++;
1021 continue;
1022 }
1023 m = m0;
1024
1025 ifp->if_ipackets++;
1026 VR_UNLOCK(sc);
1027 (*ifp->if_input)(ifp, m);
1028 VR_LOCK(sc);
1029 }
1030 }
1031
1032 static void
1033 vr_rxeoc(struct vr_softc *sc)
1034 {
1035 struct ifnet *ifp = sc->vr_ifp;
1036 int i;
1037
1038 VR_LOCK_ASSERT(sc);
1039
1040 ifp->if_ierrors++;
1041
1042 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1043 DELAY(10000);
1044
1045 /* Wait for receiver to stop */
1046 for (i = 0x400;
1047 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
1048 i--) {
1049 ;
1050 }
1051
1052 if (!i) {
1053 printf("vr%d: rx shutdown error!\n", sc->vr_unit);
1054 sc->vr_flags |= VR_F_RESTART;
1055 return;
1056 }
1057
1058 vr_rxeof(sc);
1059
1060 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1061 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1062 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1063 }
1064
1065 /*
1066 * A frame was downloaded to the chip. It's safe for us to clean up
1067 * the list buffers.
1068 */
1069 static void
1070 vr_txeof(struct vr_softc *sc)
1071 {
1072 struct vr_chain *cur_tx;
1073 struct ifnet *ifp = sc->vr_ifp;
1074
1075 VR_LOCK_ASSERT(sc);
1076
1077 /*
1078 * Go through our tx list and free mbufs for those
1079 * frames that have been transmitted.
1080 */
1081 cur_tx = sc->vr_cdata.vr_tx_cons;
1082 while (cur_tx->vr_mbuf != NULL) {
1083 uint32_t txstat;
1084 int i;
1085
1086 txstat = cur_tx->vr_ptr->vr_status;
1087
1088 if ((txstat & VR_TXSTAT_ABRT) ||
1089 (txstat & VR_TXSTAT_UDF)) {
1090 for (i = 0x400;
1091 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
1092 i--)
1093 ; /* Wait for chip to shutdown */
1094 if (!i) {
1095 printf("vr%d: tx shutdown timeout\n",
1096 sc->vr_unit);
1097 sc->vr_flags |= VR_F_RESTART;
1098 break;
1099 }
1100 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1101 CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr));
1102 break;
1103 }
1104
1105 if (txstat & VR_TXSTAT_OWN)
1106 break;
1107
1108 if (txstat & VR_TXSTAT_ERRSUM) {
1109 ifp->if_oerrors++;
1110 if (txstat & VR_TXSTAT_DEFER)
1111 ifp->if_collisions++;
1112 if (txstat & VR_TXSTAT_LATECOLL)
1113 ifp->if_collisions++;
1114 }
1115
1116 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1117
1118 ifp->if_opackets++;
1119 m_freem(cur_tx->vr_mbuf);
1120 cur_tx->vr_mbuf = NULL;
1121 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1122
1123 cur_tx = cur_tx->vr_nextdesc;
1124 }
1125 sc->vr_cdata.vr_tx_cons = cur_tx;
1126 if (cur_tx->vr_mbuf == NULL)
1127 ifp->if_timer = 0;
1128 }
1129
1130 static void
1131 vr_tick(void *xsc)
1132 {
1133 struct vr_softc *sc = xsc;
1134 struct mii_data *mii;
1135
1136 VR_LOCK(sc);
1137
1138 if (sc->vr_flags & VR_F_RESTART) {
1139 printf("vr%d: restarting\n", sc->vr_unit);
1140 vr_stop(sc);
1141 vr_reset(sc);
1142 vr_init_locked(sc);
1143 sc->vr_flags &= ~VR_F_RESTART;
1144 }
1145
1146 mii = device_get_softc(sc->vr_miibus);
1147 mii_tick(mii);
1148 sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1149
1150 VR_UNLOCK(sc);
1151 }
1152
1153 #ifdef DEVICE_POLLING
1154 static poll_handler_t vr_poll;
1155 static poll_handler_t vr_poll_locked;
1156
1157 static void
1158 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1159 {
1160 struct vr_softc *sc = ifp->if_softc;
1161
1162 VR_LOCK(sc);
1163 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1164 vr_poll_locked(ifp, cmd, count);
1165 VR_UNLOCK(sc);
1166 }
1167
1168 static void
1169 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1170 {
1171 struct vr_softc *sc = ifp->if_softc;
1172
1173 VR_LOCK_ASSERT(sc);
1174
1175 sc->rxcycles = count;
1176 vr_rxeof(sc);
1177 vr_txeof(sc);
1178 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1179 vr_start_locked(ifp);
1180
1181 if (cmd == POLL_AND_CHECK_STATUS) {
1182 uint16_t status;
1183
1184 /* Also check status register. */
1185 status = CSR_READ_2(sc, VR_ISR);
1186 if (status)
1187 CSR_WRITE_2(sc, VR_ISR, status);
1188
1189 if ((status & VR_INTRS) == 0)
1190 return;
1191
1192 if (status & VR_ISR_RX_DROPPED) {
1193 printf("vr%d: rx packet lost\n", sc->vr_unit);
1194 ifp->if_ierrors++;
1195 }
1196
1197 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1198 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) {
1199 printf("vr%d: receive error (%04x)",
1200 sc->vr_unit, status);
1201 if (status & VR_ISR_RX_NOBUF)
1202 printf(" no buffers");
1203 if (status & VR_ISR_RX_OFLOW)
1204 printf(" overflow");
1205 if (status & VR_ISR_RX_DROPPED)
1206 printf(" packet lost");
1207 printf("\n");
1208 vr_rxeoc(sc);
1209 }
1210
1211 if ((status & VR_ISR_BUSERR) ||
1212 (status & VR_ISR_TX_UNDERRUN)) {
1213 vr_reset(sc);
1214 vr_init_locked(sc);
1215 return;
1216 }
1217
1218 if ((status & VR_ISR_UDFI) ||
1219 (status & VR_ISR_TX_ABRT2) ||
1220 (status & VR_ISR_TX_ABRT)) {
1221 ifp->if_oerrors++;
1222 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) {
1223 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1224 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1225 }
1226 }
1227 }
1228 }
1229 #endif /* DEVICE_POLLING */
1230
1231 static void
1232 vr_intr(void *arg)
1233 {
1234 struct vr_softc *sc = arg;
1235 struct ifnet *ifp = sc->vr_ifp;
1236 uint16_t status;
1237
1238 VR_LOCK(sc);
1239
1240 if (sc->suspended) {
1241 /*
1242 * Forcibly disable interrupts.
1243 * XXX: Mobile VIA based platforms may need
1244 * interrupt re-enable on resume.
1245 */
1246 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1247 goto done_locked;
1248 }
1249
1250 #ifdef DEVICE_POLLING
1251 if (ifp->if_capenable & IFCAP_POLLING)
1252 goto done_locked;
1253 #endif
1254
1255 /* Suppress unwanted interrupts. */
1256 if (!(ifp->if_flags & IFF_UP)) {
1257 vr_stop(sc);
1258 goto done_locked;
1259 }
1260
1261 /* Disable interrupts. */
1262 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1263
1264 for (;;) {
1265 status = CSR_READ_2(sc, VR_ISR);
1266 if (status)
1267 CSR_WRITE_2(sc, VR_ISR, status);
1268
1269 if ((status & VR_INTRS) == 0)
1270 break;
1271
1272 if (status & VR_ISR_RX_OK)
1273 vr_rxeof(sc);
1274
1275 if (status & VR_ISR_RX_DROPPED) {
1276 printf("vr%d: rx packet lost\n", sc->vr_unit);
1277 ifp->if_ierrors++;
1278 }
1279
1280 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1281 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) {
1282 printf("vr%d: receive error (%04x)",
1283 sc->vr_unit, status);
1284 if (status & VR_ISR_RX_NOBUF)
1285 printf(" no buffers");
1286 if (status & VR_ISR_RX_OFLOW)
1287 printf(" overflow");
1288 if (status & VR_ISR_RX_DROPPED)
1289 printf(" packet lost");
1290 printf("\n");
1291 vr_rxeoc(sc);
1292 }
1293
1294 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1295 vr_reset(sc);
1296 vr_init_locked(sc);
1297 break;
1298 }
1299
1300 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1301 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1302 vr_txeof(sc);
1303 if ((status & VR_ISR_UDFI) ||
1304 (status & VR_ISR_TX_ABRT2) ||
1305 (status & VR_ISR_TX_ABRT)) {
1306 ifp->if_oerrors++;
1307 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) {
1308 VR_SETBIT16(sc, VR_COMMAND,
1309 VR_CMD_TX_ON);
1310 VR_SETBIT16(sc, VR_COMMAND,
1311 VR_CMD_TX_GO);
1312 }
1313 }
1314 }
1315 }
1316
1317 /* Re-enable interrupts. */
1318 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1319
1320 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1321 vr_start_locked(ifp);
1322
1323 done_locked:
1324 VR_UNLOCK(sc);
1325 }
1326
1327 /*
1328 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1329 * pointers to the fragment pointers.
1330 */
1331 static int
1332 vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head)
1333 {
1334 struct vr_desc *f = NULL;
1335 struct mbuf *m;
1336
1337 VR_LOCK_ASSERT(sc);
1338 /*
1339 * The VIA Rhine wants packet buffers to be longword
1340 * aligned, but very often our mbufs aren't. Rather than
1341 * waste time trying to decide when to copy and when not
1342 * to copy, just do it all the time.
1343 */
1344 m = m_defrag(m_head, M_DONTWAIT);
1345 if (m == NULL)
1346 return (1);
1347
1348 /*
1349 * The Rhine chip doesn't auto-pad, so we have to make
1350 * sure to pad short frames out to the minimum frame length
1351 * ourselves.
1352 */
1353 if (m->m_len < VR_MIN_FRAMELEN) {
1354 m->m_pkthdr.len += VR_MIN_FRAMELEN - m->m_len;
1355 m->m_len = m->m_pkthdr.len;
1356 }
1357
1358 c->vr_mbuf = m;
1359 f = c->vr_ptr;
1360 f->vr_data = vtophys(mtod(m, caddr_t));
1361 f->vr_ctl = m->m_len;
1362 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1363 f->vr_status = 0;
1364 f->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1365 f->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1366
1367 return (0);
1368 }
1369
1370 /*
1371 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1372 * to the mbuf data regions directly in the transmit lists. We also save a
1373 * copy of the pointers since the transmit list fragment pointers are
1374 * physical addresses.
1375 */
1376
1377 static void
1378 vr_start(struct ifnet *ifp)
1379 {
1380 struct vr_softc *sc = ifp->if_softc;
1381
1382 VR_LOCK(sc);
1383 vr_start_locked(ifp);
1384 VR_UNLOCK(sc);
1385 }
1386
1387 static void
1388 vr_start_locked(struct ifnet *ifp)
1389 {
1390 struct vr_softc *sc = ifp->if_softc;
1391 struct mbuf *m_head;
1392 struct vr_chain *cur_tx;
1393
1394 if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1395 return;
1396
1397 cur_tx = sc->vr_cdata.vr_tx_prod;
1398 while (cur_tx->vr_mbuf == NULL) {
1399 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1400 if (m_head == NULL)
1401 break;
1402
1403 /* Pack the data into the descriptor. */
1404 if (vr_encap(sc, cur_tx, m_head)) {
1405 /* Rollback, send what we were able to encap. */
1406 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1407 break;
1408 }
1409
1410 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1411
1412 /*
1413 * If there's a BPF listener, bounce a copy of this frame
1414 * to him.
1415 */
1416 BPF_MTAP(ifp, cur_tx->vr_mbuf);
1417
1418 cur_tx = cur_tx->vr_nextdesc;
1419 }
1420 if (cur_tx != sc->vr_cdata.vr_tx_prod || cur_tx->vr_mbuf != NULL) {
1421 sc->vr_cdata.vr_tx_prod = cur_tx;
1422
1423 /* Tell the chip to start transmitting. */
1424 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/ VR_CMD_TX_GO);
1425
1426 /* Set a timeout in case the chip goes out to lunch. */
1427 ifp->if_timer = 5;
1428
1429 if (cur_tx->vr_mbuf != NULL)
1430 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1431 }
1432 }
1433
1434 static void
1435 vr_init(void *xsc)
1436 {
1437 struct vr_softc *sc = xsc;
1438
1439 VR_LOCK(sc);
1440 vr_init_locked(sc);
1441 VR_UNLOCK(sc);
1442 }
1443
1444 static void
1445 vr_init_locked(struct vr_softc *sc)
1446 {
1447 struct ifnet *ifp = sc->vr_ifp;
1448 struct mii_data *mii;
1449 int i;
1450
1451 VR_LOCK_ASSERT(sc);
1452
1453 mii = device_get_softc(sc->vr_miibus);
1454
1455 /* Cancel pending I/O and free all RX/TX buffers. */
1456 vr_stop(sc);
1457 vr_reset(sc);
1458
1459 /* Set our station address. */
1460 for (i = 0; i < ETHER_ADDR_LEN; i++)
1461 CSR_WRITE_1(sc, VR_PAR0 + i, IFP2ENADDR(sc->vr_ifp)[i]);
1462
1463 /* Set DMA size. */
1464 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1465 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1466
1467 /*
1468 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1469 * so we must set both.
1470 */
1471 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1472 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1473
1474 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1475 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1476
1477 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1478 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1479
1480 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1481 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1482
1483 /* Init circular RX list. */
1484 if (vr_list_rx_init(sc) == ENOBUFS) {
1485 printf(
1486 "vr%d: initialization failed: no memory for rx buffers\n", sc->vr_unit);
1487 vr_stop(sc);
1488 return;
1489 }
1490
1491 /* Init tx descriptors. */
1492 vr_list_tx_init(sc);
1493
1494 /* If we want promiscuous mode, set the allframes bit. */
1495 if (ifp->if_flags & IFF_PROMISC)
1496 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1497 else
1498 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1499
1500 /* Set capture broadcast bit to capture broadcast frames. */
1501 if (ifp->if_flags & IFF_BROADCAST)
1502 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1503 else
1504 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1505
1506 /*
1507 * Program the multicast filter, if necessary.
1508 */
1509 vr_setmulti(sc);
1510
1511 /*
1512 * Load the address of the RX list.
1513 */
1514 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1515
1516 /* Enable receiver and transmitter. */
1517 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1518 VR_CMD_TX_ON|VR_CMD_RX_ON|
1519 VR_CMD_RX_GO);
1520
1521 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1522
1523 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1524 #ifdef DEVICE_POLLING
1525 /*
1526 * Disable interrupts if we are polling.
1527 */
1528 if (ifp->if_capenable & IFCAP_POLLING)
1529 CSR_WRITE_2(sc, VR_IMR, 0);
1530 else
1531 #endif
1532 /*
1533 * Enable interrupts.
1534 */
1535 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1536
1537 mii_mediachg(mii);
1538
1539 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1540 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1541
1542 sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1543 }
1544
1545 /*
1546 * Set media options.
1547 */
1548 static int
1549 vr_ifmedia_upd(struct ifnet *ifp)
1550 {
1551 struct vr_softc *sc = ifp->if_softc;
1552
1553 if (ifp->if_flags & IFF_UP)
1554 vr_init(sc);
1555
1556 return (0);
1557 }
1558
1559 /*
1560 * Report current media status.
1561 */
1562 static void
1563 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1564 {
1565 struct vr_softc *sc = ifp->if_softc;
1566 struct mii_data *mii;
1567
1568 mii = device_get_softc(sc->vr_miibus);
1569 VR_LOCK(sc);
1570 mii_pollstat(mii);
1571 VR_UNLOCK(sc);
1572 ifmr->ifm_active = mii->mii_media_active;
1573 ifmr->ifm_status = mii->mii_media_status;
1574 }
1575
1576 static int
1577 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1578 {
1579 struct vr_softc *sc = ifp->if_softc;
1580 struct ifreq *ifr = (struct ifreq *) data;
1581 struct mii_data *mii;
1582 int error = 0;
1583
1584 switch (command) {
1585 case SIOCSIFFLAGS:
1586 VR_LOCK(sc);
1587 if (ifp->if_flags & IFF_UP) {
1588 vr_init_locked(sc);
1589 } else {
1590 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1591 vr_stop(sc);
1592 }
1593 VR_UNLOCK(sc);
1594 error = 0;
1595 break;
1596 case SIOCADDMULTI:
1597 case SIOCDELMULTI:
1598 VR_LOCK(sc);
1599 vr_setmulti(sc);
1600 VR_UNLOCK(sc);
1601 error = 0;
1602 break;
1603 case SIOCGIFMEDIA:
1604 case SIOCSIFMEDIA:
1605 mii = device_get_softc(sc->vr_miibus);
1606 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1607 break;
1608 case SIOCSIFCAP:
1609 #ifdef DEVICE_POLLING
1610 if (ifr->ifr_reqcap & IFCAP_POLLING &&
1611 !(ifp->if_capenable & IFCAP_POLLING)) {
1612 error = ether_poll_register(vr_poll, ifp);
1613 if (error)
1614 return(error);
1615 VR_LOCK(sc);
1616 /* Disable interrupts */
1617 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1618 ifp->if_capenable |= IFCAP_POLLING;
1619 VR_UNLOCK(sc);
1620 return (error);
1621
1622 }
1623 if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1624 ifp->if_capenable & IFCAP_POLLING) {
1625 error = ether_poll_deregister(ifp);
1626 /* Enable interrupts. */
1627 VR_LOCK(sc);
1628 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1629 ifp->if_capenable &= ~IFCAP_POLLING;
1630 VR_UNLOCK(sc);
1631 return (error);
1632 }
1633 #endif /* DEVICE_POLLING */
1634 break;
1635 default:
1636 error = ether_ioctl(ifp, command, data);
1637 break;
1638 }
1639
1640 return (error);
1641 }
1642
1643 static void
1644 vr_watchdog(struct ifnet *ifp)
1645 {
1646 struct vr_softc *sc = ifp->if_softc;
1647
1648 VR_LOCK(sc);
1649
1650 ifp->if_oerrors++;
1651 printf("vr%d: watchdog timeout\n", sc->vr_unit);
1652
1653 vr_stop(sc);
1654 vr_reset(sc);
1655 vr_init_locked(sc);
1656
1657 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1658 vr_start_locked(ifp);
1659
1660 VR_UNLOCK(sc);
1661 }
1662
1663 /*
1664 * Stop the adapter and free any mbufs allocated to the
1665 * RX and TX lists.
1666 */
1667 static void
1668 vr_stop(struct vr_softc *sc)
1669 {
1670 register int i;
1671 struct ifnet *ifp;
1672
1673 VR_LOCK_ASSERT(sc);
1674
1675 ifp = sc->vr_ifp;
1676 ifp->if_timer = 0;
1677
1678 untimeout(vr_tick, sc, sc->vr_stat_ch);
1679 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1680
1681 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1682 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1683 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1684 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1685 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1686
1687 /*
1688 * Free data in the RX lists.
1689 */
1690 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1691 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1692 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1693 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1694 }
1695 }
1696 bzero((char *)&sc->vr_ldata->vr_rx_list,
1697 sizeof(sc->vr_ldata->vr_rx_list));
1698
1699 /*
1700 * Free the TX list buffers.
1701 */
1702 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1703 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1704 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1705 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1706 }
1707 }
1708 bzero((char *)&sc->vr_ldata->vr_tx_list,
1709 sizeof(sc->vr_ldata->vr_tx_list));
1710 }
1711
1712 /*
1713 * Stop all chip I/O so that the kernel's probe routines don't
1714 * get confused by errant DMAs when rebooting.
1715 */
1716 static void
1717 vr_shutdown(device_t dev)
1718 {
1719
1720 vr_detach(dev);
1721 }
Cache object: c253c5413d7135c6f70cf23298503ea7
|