FreeBSD/Linux Kernel Cross Reference
sys/pci/if_rl.c
1 /*-
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 /*
37 * RealTek 8129/8139 PCI NIC driver
38 *
39 * Supports several extremely cheap PCI 10/100 adapters based on
40 * the RealTek chipset. Datasheets can be obtained from
41 * www.realtek.com.tw.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47 /*
48 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
49 * probably the worst PCI ethernet controller ever made, with the possible
50 * exception of the FEAST chip made by SMC. The 8139 supports bus-master
51 * DMA, but it has a terrible interface that nullifies any performance
52 * gains that bus-master DMA usually offers.
53 *
54 * For transmission, the chip offers a series of four TX descriptor
55 * registers. Each transmit frame must be in a contiguous buffer, aligned
56 * on a longword (32-bit) boundary. This means we almost always have to
57 * do mbuf copies in order to transmit a frame, except in the unlikely
58 * case where a) the packet fits into a single mbuf, and b) the packet
59 * is 32-bit aligned within the mbuf's data area. The presence of only
60 * four descriptor registers means that we can never have more than four
61 * packets queued for transmission at any one time.
62 *
63 * Reception is not much better. The driver has to allocate a single large
64 * buffer area (up to 64K in size) into which the chip will DMA received
65 * frames. Because we don't know where within this region received packets
66 * will begin or end, we have no choice but to copy data from the buffer
67 * area into mbufs in order to pass the packets up to the higher protocol
68 * levels.
69 *
70 * It's impossible given this rotten design to really achieve decent
71 * performance at 100Mbps, unless you happen to have a 400Mhz PII or
72 * some equally overmuscled CPU to drive it.
73 *
74 * On the bright side, the 8139 does have a built-in PHY, although
75 * rather than using an MDIO serial interface like most other NICs, the
76 * PHY registers are directly accessible through the 8139's register
77 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
78 * filter.
79 *
80 * The 8129 chip is an older version of the 8139 that uses an external PHY
81 * chip. The 8129 has a serial MDIO interface for accessing the MII where
82 * the 8139 lets you directly access the on-board PHY registers. We need
83 * to select which interface to use depending on the chip type.
84 */
85
86 #ifdef HAVE_KERNEL_OPTION_HEADERS
87 #include "opt_device_polling.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/endian.h>
92 #include <sys/systm.h>
93 #include <sys/sockio.h>
94 #include <sys/mbuf.h>
95 #include <sys/malloc.h>
96 #include <sys/kernel.h>
97 #include <sys/module.h>
98 #include <sys/socket.h>
99 #include <sys/sysctl.h>
100
101 #include <net/if.h>
102 #include <net/if_arp.h>
103 #include <net/ethernet.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_types.h>
107
108 #include <net/bpf.h>
109
110 #include <machine/bus.h>
111 #include <machine/resource.h>
112 #include <sys/bus.h>
113 #include <sys/rman.h>
114
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117
118 #include <dev/pci/pcireg.h>
119 #include <dev/pci/pcivar.h>
120
121 MODULE_DEPEND(rl, pci, 1, 1, 1);
122 MODULE_DEPEND(rl, ether, 1, 1, 1);
123 MODULE_DEPEND(rl, miibus, 1, 1, 1);
124
125 /* "device miibus" required. See GENERIC if you get errors here. */
126 #include "miibus_if.h"
127
128 #include <pci/if_rlreg.h>
129
130 /*
131 * Various supported device vendors/types and their names.
132 */
133 static struct rl_type rl_devs[] = {
134 { RT_VENDORID, RT_DEVICEID_8129, RL_8129,
135 "RealTek 8129 10/100BaseTX" },
136 { RT_VENDORID, RT_DEVICEID_8139, RL_8139,
137 "RealTek 8139 10/100BaseTX" },
138 { RT_VENDORID, RT_DEVICEID_8139D, RL_8139,
139 "RealTek 8139 10/100BaseTX" },
140 { RT_VENDORID, RT_DEVICEID_8138, RL_8139,
141 "RealTek 8139 10/100BaseTX CardBus" },
142 { RT_VENDORID, RT_DEVICEID_8100, RL_8139,
143 "RealTek 8100 10/100BaseTX" },
144 { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
145 "Accton MPX 5030/5038 10/100BaseTX" },
146 { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
147 "Delta Electronics 8139 10/100BaseTX" },
148 { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
149 "Addtron Technology 8139 10/100BaseTX" },
150 { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
151 "D-Link DFE-530TX+ 10/100BaseTX" },
152 { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
153 "D-Link DFE-690TXD 10/100BaseTX" },
154 { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
155 "Nortel Networks 10/100BaseTX" },
156 { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
157 "Corega FEther CB-TXD" },
158 { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
159 "Corega FEtherII CB-TXD" },
160 { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
161 "Peppercon AG ROL-F" },
162 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139,
163 "Planex FNW-3603-TX" },
164 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
165 "Planex FNW-3800-TX" },
166 { CP_VENDORID, RT_DEVICEID_8139, RL_8139,
167 "Compaq HNE-300" },
168 { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
169 "LevelOne FPC-0106TX" },
170 { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
171 "Edimax EP-4103DL CardBus" }
172 };
173
174 static int rl_attach(device_t);
175 static int rl_detach(device_t);
176 static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int);
177 static int rl_dma_alloc(struct rl_softc *);
178 static void rl_dma_free(struct rl_softc *);
179 static void rl_eeprom_putbyte(struct rl_softc *, int);
180 static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *);
181 static int rl_encap(struct rl_softc *, struct mbuf **);
182 static int rl_list_tx_init(struct rl_softc *);
183 static int rl_list_rx_init(struct rl_softc *);
184 static int rl_ifmedia_upd(struct ifnet *);
185 static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
186 static int rl_ioctl(struct ifnet *, u_long, caddr_t);
187 static void rl_intr(void *);
188 static void rl_init(void *);
189 static void rl_init_locked(struct rl_softc *sc);
190 static void rl_mii_send(struct rl_softc *, uint32_t, int);
191 static void rl_mii_sync(struct rl_softc *);
192 static int rl_mii_readreg(struct rl_softc *, struct rl_mii_frame *);
193 static int rl_mii_writereg(struct rl_softc *, struct rl_mii_frame *);
194 static int rl_miibus_readreg(device_t, int, int);
195 static void rl_miibus_statchg(device_t);
196 static int rl_miibus_writereg(device_t, int, int, int);
197 #ifdef DEVICE_POLLING
198 static void rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
199 static void rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
200 #endif
201 static int rl_probe(device_t);
202 static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
203 static void rl_reset(struct rl_softc *);
204 static int rl_resume(device_t);
205 static void rl_rxeof(struct rl_softc *);
206 static void rl_rxfilter(struct rl_softc *);
207 static int rl_shutdown(device_t);
208 static void rl_start(struct ifnet *);
209 static void rl_start_locked(struct ifnet *);
210 static void rl_stop(struct rl_softc *);
211 static int rl_suspend(device_t);
212 static void rl_tick(void *);
213 static void rl_txeof(struct rl_softc *);
214 static void rl_watchdog(struct rl_softc *);
215 static void rl_setwol(struct rl_softc *);
216 static void rl_clrwol(struct rl_softc *);
217
218 static device_method_t rl_methods[] = {
219 /* Device interface */
220 DEVMETHOD(device_probe, rl_probe),
221 DEVMETHOD(device_attach, rl_attach),
222 DEVMETHOD(device_detach, rl_detach),
223 DEVMETHOD(device_suspend, rl_suspend),
224 DEVMETHOD(device_resume, rl_resume),
225 DEVMETHOD(device_shutdown, rl_shutdown),
226
227 /* bus interface */
228 DEVMETHOD(bus_print_child, bus_generic_print_child),
229 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
230
231 /* MII interface */
232 DEVMETHOD(miibus_readreg, rl_miibus_readreg),
233 DEVMETHOD(miibus_writereg, rl_miibus_writereg),
234 DEVMETHOD(miibus_statchg, rl_miibus_statchg),
235
236 { 0, 0 }
237 };
238
239 static driver_t rl_driver = {
240 "rl",
241 rl_methods,
242 sizeof(struct rl_softc)
243 };
244
245 static devclass_t rl_devclass;
246
247 DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0);
248 DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0);
249 DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0);
250
251 #define EE_SET(x) \
252 CSR_WRITE_1(sc, RL_EECMD, \
253 CSR_READ_1(sc, RL_EECMD) | x)
254
255 #define EE_CLR(x) \
256 CSR_WRITE_1(sc, RL_EECMD, \
257 CSR_READ_1(sc, RL_EECMD) & ~x)
258
259 /*
260 * Send a read command and address to the EEPROM, check for ACK.
261 */
262 static void
263 rl_eeprom_putbyte(struct rl_softc *sc, int addr)
264 {
265 register int d, i;
266
267 d = addr | sc->rl_eecmd_read;
268
269 /*
270 * Feed in each bit and strobe the clock.
271 */
272 for (i = 0x400; i; i >>= 1) {
273 if (d & i) {
274 EE_SET(RL_EE_DATAIN);
275 } else {
276 EE_CLR(RL_EE_DATAIN);
277 }
278 DELAY(100);
279 EE_SET(RL_EE_CLK);
280 DELAY(150);
281 EE_CLR(RL_EE_CLK);
282 DELAY(100);
283 }
284 }
285
286 /*
287 * Read a word of data stored in the EEPROM at address 'addr.'
288 */
289 static void
290 rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest)
291 {
292 register int i;
293 uint16_t word = 0;
294
295 /* Enter EEPROM access mode. */
296 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
297
298 /*
299 * Send address of word we want to read.
300 */
301 rl_eeprom_putbyte(sc, addr);
302
303 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
304
305 /*
306 * Start reading bits from EEPROM.
307 */
308 for (i = 0x8000; i; i >>= 1) {
309 EE_SET(RL_EE_CLK);
310 DELAY(100);
311 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
312 word |= i;
313 EE_CLR(RL_EE_CLK);
314 DELAY(100);
315 }
316
317 /* Turn off EEPROM access mode. */
318 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
319
320 *dest = word;
321 }
322
323 /*
324 * Read a sequence of words from the EEPROM.
325 */
326 static void
327 rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap)
328 {
329 int i;
330 uint16_t word = 0, *ptr;
331
332 for (i = 0; i < cnt; i++) {
333 rl_eeprom_getword(sc, off + i, &word);
334 ptr = (uint16_t *)(dest + (i * 2));
335 if (swap)
336 *ptr = ntohs(word);
337 else
338 *ptr = word;
339 }
340 }
341
342 /*
343 * MII access routines are provided for the 8129, which
344 * doesn't have a built-in PHY. For the 8139, we fake things
345 * up by diverting rl_phy_readreg()/rl_phy_writereg() to the
346 * direct access PHY registers.
347 */
348 #define MII_SET(x) \
349 CSR_WRITE_1(sc, RL_MII, \
350 CSR_READ_1(sc, RL_MII) | (x))
351
352 #define MII_CLR(x) \
353 CSR_WRITE_1(sc, RL_MII, \
354 CSR_READ_1(sc, RL_MII) & ~(x))
355
356 /*
357 * Sync the PHYs by setting data bit and strobing the clock 32 times.
358 */
359 static void
360 rl_mii_sync(struct rl_softc *sc)
361 {
362 register int i;
363
364 MII_SET(RL_MII_DIR|RL_MII_DATAOUT);
365
366 for (i = 0; i < 32; i++) {
367 MII_SET(RL_MII_CLK);
368 DELAY(1);
369 MII_CLR(RL_MII_CLK);
370 DELAY(1);
371 }
372 }
373
374 /*
375 * Clock a series of bits through the MII.
376 */
377 static void
378 rl_mii_send(struct rl_softc *sc, uint32_t bits, int cnt)
379 {
380 int i;
381
382 MII_CLR(RL_MII_CLK);
383
384 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
385 if (bits & i) {
386 MII_SET(RL_MII_DATAOUT);
387 } else {
388 MII_CLR(RL_MII_DATAOUT);
389 }
390 DELAY(1);
391 MII_CLR(RL_MII_CLK);
392 DELAY(1);
393 MII_SET(RL_MII_CLK);
394 }
395 }
396
397 /*
398 * Read an PHY register through the MII.
399 */
400 static int
401 rl_mii_readreg(struct rl_softc *sc, struct rl_mii_frame *frame)
402 {
403 int i, ack;
404
405 /* Set up frame for RX. */
406 frame->mii_stdelim = RL_MII_STARTDELIM;
407 frame->mii_opcode = RL_MII_READOP;
408 frame->mii_turnaround = 0;
409 frame->mii_data = 0;
410
411 CSR_WRITE_2(sc, RL_MII, 0);
412
413 /* Turn on data xmit. */
414 MII_SET(RL_MII_DIR);
415
416 rl_mii_sync(sc);
417
418 /* Send command/address info. */
419 rl_mii_send(sc, frame->mii_stdelim, 2);
420 rl_mii_send(sc, frame->mii_opcode, 2);
421 rl_mii_send(sc, frame->mii_phyaddr, 5);
422 rl_mii_send(sc, frame->mii_regaddr, 5);
423
424 /* Idle bit */
425 MII_CLR((RL_MII_CLK|RL_MII_DATAOUT));
426 DELAY(1);
427 MII_SET(RL_MII_CLK);
428 DELAY(1);
429
430 /* Turn off xmit. */
431 MII_CLR(RL_MII_DIR);
432
433 /* Check for ack */
434 MII_CLR(RL_MII_CLK);
435 DELAY(1);
436 ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN;
437 MII_SET(RL_MII_CLK);
438 DELAY(1);
439
440 /*
441 * Now try reading data bits. If the ack failed, we still
442 * need to clock through 16 cycles to keep the PHY(s) in sync.
443 */
444 if (ack) {
445 for(i = 0; i < 16; i++) {
446 MII_CLR(RL_MII_CLK);
447 DELAY(1);
448 MII_SET(RL_MII_CLK);
449 DELAY(1);
450 }
451 goto fail;
452 }
453
454 for (i = 0x8000; i; i >>= 1) {
455 MII_CLR(RL_MII_CLK);
456 DELAY(1);
457 if (!ack) {
458 if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN)
459 frame->mii_data |= i;
460 DELAY(1);
461 }
462 MII_SET(RL_MII_CLK);
463 DELAY(1);
464 }
465
466 fail:
467 MII_CLR(RL_MII_CLK);
468 DELAY(1);
469 MII_SET(RL_MII_CLK);
470 DELAY(1);
471
472 return (ack ? 1 : 0);
473 }
474
475 /*
476 * Write to a PHY register through the MII.
477 */
478 static int
479 rl_mii_writereg(struct rl_softc *sc, struct rl_mii_frame *frame)
480 {
481
482 /* Set up frame for TX. */
483 frame->mii_stdelim = RL_MII_STARTDELIM;
484 frame->mii_opcode = RL_MII_WRITEOP;
485 frame->mii_turnaround = RL_MII_TURNAROUND;
486
487 /* Turn on data output. */
488 MII_SET(RL_MII_DIR);
489
490 rl_mii_sync(sc);
491
492 rl_mii_send(sc, frame->mii_stdelim, 2);
493 rl_mii_send(sc, frame->mii_opcode, 2);
494 rl_mii_send(sc, frame->mii_phyaddr, 5);
495 rl_mii_send(sc, frame->mii_regaddr, 5);
496 rl_mii_send(sc, frame->mii_turnaround, 2);
497 rl_mii_send(sc, frame->mii_data, 16);
498
499 /* Idle bit. */
500 MII_SET(RL_MII_CLK);
501 DELAY(1);
502 MII_CLR(RL_MII_CLK);
503 DELAY(1);
504
505 /* Turn off xmit. */
506 MII_CLR(RL_MII_DIR);
507
508 return (0);
509 }
510
511 static int
512 rl_miibus_readreg(device_t dev, int phy, int reg)
513 {
514 struct rl_softc *sc;
515 struct rl_mii_frame frame;
516 uint16_t rval = 0;
517 uint16_t rl8139_reg = 0;
518
519 sc = device_get_softc(dev);
520
521 if (sc->rl_type == RL_8139) {
522 switch (reg) {
523 case MII_BMCR:
524 rl8139_reg = RL_BMCR;
525 break;
526 case MII_BMSR:
527 rl8139_reg = RL_BMSR;
528 break;
529 case MII_ANAR:
530 rl8139_reg = RL_ANAR;
531 break;
532 case MII_ANER:
533 rl8139_reg = RL_ANER;
534 break;
535 case MII_ANLPAR:
536 rl8139_reg = RL_LPAR;
537 break;
538 case MII_PHYIDR1:
539 case MII_PHYIDR2:
540 return (0);
541 /*
542 * Allow the rlphy driver to read the media status
543 * register. If we have a link partner which does not
544 * support NWAY, this is the register which will tell
545 * us the results of parallel detection.
546 */
547 case RL_MEDIASTAT:
548 rval = CSR_READ_1(sc, RL_MEDIASTAT);
549 return (rval);
550 default:
551 device_printf(sc->rl_dev, "bad phy register\n");
552 return (0);
553 }
554 rval = CSR_READ_2(sc, rl8139_reg);
555 return (rval);
556 }
557
558 bzero((char *)&frame, sizeof(frame));
559 frame.mii_phyaddr = phy;
560 frame.mii_regaddr = reg;
561 rl_mii_readreg(sc, &frame);
562
563 return (frame.mii_data);
564 }
565
566 static int
567 rl_miibus_writereg(device_t dev, int phy, int reg, int data)
568 {
569 struct rl_softc *sc;
570 struct rl_mii_frame frame;
571 uint16_t rl8139_reg = 0;
572
573 sc = device_get_softc(dev);
574
575 if (sc->rl_type == RL_8139) {
576 switch (reg) {
577 case MII_BMCR:
578 rl8139_reg = RL_BMCR;
579 break;
580 case MII_BMSR:
581 rl8139_reg = RL_BMSR;
582 break;
583 case MII_ANAR:
584 rl8139_reg = RL_ANAR;
585 break;
586 case MII_ANER:
587 rl8139_reg = RL_ANER;
588 break;
589 case MII_ANLPAR:
590 rl8139_reg = RL_LPAR;
591 break;
592 case MII_PHYIDR1:
593 case MII_PHYIDR2:
594 return (0);
595 break;
596 default:
597 device_printf(sc->rl_dev, "bad phy register\n");
598 return (0);
599 }
600 CSR_WRITE_2(sc, rl8139_reg, data);
601 return (0);
602 }
603
604 bzero((char *)&frame, sizeof(frame));
605 frame.mii_phyaddr = phy;
606 frame.mii_regaddr = reg;
607 frame.mii_data = data;
608 rl_mii_writereg(sc, &frame);
609
610 return (0);
611 }
612
613 static void
614 rl_miibus_statchg(device_t dev)
615 {
616 struct rl_softc *sc;
617 struct ifnet *ifp;
618 struct mii_data *mii;
619
620 sc = device_get_softc(dev);
621 mii = device_get_softc(sc->rl_miibus);
622 ifp = sc->rl_ifp;
623 if (mii == NULL || ifp == NULL ||
624 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
625 return;
626
627 sc->rl_flags &= ~RL_FLAG_LINK;
628 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
629 (IFM_ACTIVE | IFM_AVALID)) {
630 switch (IFM_SUBTYPE(mii->mii_media_active)) {
631 case IFM_10_T:
632 case IFM_100_TX:
633 sc->rl_flags |= RL_FLAG_LINK;
634 break;
635 default:
636 break;
637 }
638 }
639 /*
640 * RealTek controllers do not provide any interface to
641 * Tx/Rx MACs for resolved speed, duplex and flow-control
642 * parameters.
643 */
644 }
645
646 /*
647 * Program the 64-bit multicast hash filter.
648 */
649 static void
650 rl_rxfilter(struct rl_softc *sc)
651 {
652 struct ifnet *ifp = sc->rl_ifp;
653 int h = 0;
654 uint32_t hashes[2] = { 0, 0 };
655 struct ifmultiaddr *ifma;
656 uint32_t rxfilt;
657
658 RL_LOCK_ASSERT(sc);
659
660 rxfilt = CSR_READ_4(sc, RL_RXCFG);
661 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
662 RL_RXCFG_RX_MULTI);
663 /* Always accept frames destined for this host. */
664 rxfilt |= RL_RXCFG_RX_INDIV;
665 /* Set capture broadcast bit to capture broadcast frames. */
666 if (ifp->if_flags & IFF_BROADCAST)
667 rxfilt |= RL_RXCFG_RX_BROAD;
668 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
669 rxfilt |= RL_RXCFG_RX_MULTI;
670 if (ifp->if_flags & IFF_PROMISC)
671 rxfilt |= RL_RXCFG_RX_ALLPHYS;
672 hashes[0] = 0xFFFFFFFF;
673 hashes[1] = 0xFFFFFFFF;
674 } else {
675 /* Now program new ones. */
676 IF_ADDR_LOCK(ifp);
677 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
678 if (ifma->ifma_addr->sa_family != AF_LINK)
679 continue;
680 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
681 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
682 if (h < 32)
683 hashes[0] |= (1 << h);
684 else
685 hashes[1] |= (1 << (h - 32));
686 }
687 IF_ADDR_UNLOCK(ifp);
688 if (hashes[0] != 0 || hashes[1] != 0)
689 rxfilt |= RL_RXCFG_RX_MULTI;
690 }
691
692 CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
693 CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
694 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
695 }
696
697 static void
698 rl_reset(struct rl_softc *sc)
699 {
700 register int i;
701
702 RL_LOCK_ASSERT(sc);
703
704 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
705
706 for (i = 0; i < RL_TIMEOUT; i++) {
707 DELAY(10);
708 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
709 break;
710 }
711 if (i == RL_TIMEOUT)
712 device_printf(sc->rl_dev, "reset never completed!\n");
713 }
714
715 /*
716 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
717 * IDs against our list and return a device name if we find a match.
718 */
719 static int
720 rl_probe(device_t dev)
721 {
722 struct rl_type *t;
723 uint16_t devid, revid, vendor;
724 int i;
725
726 vendor = pci_get_vendor(dev);
727 devid = pci_get_device(dev);
728 revid = pci_get_revid(dev);
729
730 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
731 if (revid == 0x20) {
732 /* 8139C+, let re(4) take care of this device. */
733 return (ENXIO);
734 }
735 }
736 t = rl_devs;
737 for (i = 0; i < sizeof(rl_devs) / sizeof(rl_devs[0]); i++, t++) {
738 if (vendor == t->rl_vid && devid == t->rl_did) {
739 device_set_desc(dev, t->rl_name);
740 return (BUS_PROBE_DEFAULT);
741 }
742 }
743
744 return (ENXIO);
745 }
746
747 struct rl_dmamap_arg {
748 bus_addr_t rl_busaddr;
749 };
750
751 static void
752 rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
753 {
754 struct rl_dmamap_arg *ctx;
755
756 if (error != 0)
757 return;
758
759 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
760
761 ctx = (struct rl_dmamap_arg *)arg;
762 ctx->rl_busaddr = segs[0].ds_addr;
763 }
764
765 /*
766 * Attach the interface. Allocate softc structures, do ifmedia
767 * setup and ethernet/BPF attach.
768 */
769 static int
770 rl_attach(device_t dev)
771 {
772 uint8_t eaddr[ETHER_ADDR_LEN];
773 uint16_t as[3];
774 struct ifnet *ifp;
775 struct rl_softc *sc;
776 struct rl_type *t;
777 struct sysctl_ctx_list *ctx;
778 struct sysctl_oid_list *children;
779 int error = 0, hwrev, i, phy, pmc, rid;
780 int prefer_iomap, unit;
781 uint16_t rl_did = 0;
782 char tn[32];
783
784 sc = device_get_softc(dev);
785 unit = device_get_unit(dev);
786 sc->rl_dev = dev;
787
788 sc->rl_twister_enable = 0;
789 snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit);
790 TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable);
791 ctx = device_get_sysctl_ctx(sc->rl_dev);
792 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
793 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD,
794 &sc->rl_twister_enable, 0, "");
795
796 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
797 MTX_DEF);
798 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
799
800 pci_enable_busmaster(dev);
801
802
803 /*
804 * Map control/status registers.
805 * Default to using PIO access for this driver. On SMP systems,
806 * there appear to be problems with memory mapped mode: it looks
807 * like doing too many memory mapped access back to back in rapid
808 * succession can hang the bus. I'm inclined to blame this on
809 * crummy design/construction on the part of RealTek. Memory
810 * mapped mode does appear to work on uniprocessor systems though.
811 */
812 prefer_iomap = 1;
813 snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit);
814 TUNABLE_INT_FETCH(tn, &prefer_iomap);
815 if (prefer_iomap) {
816 sc->rl_res_id = PCIR_BAR(0);
817 sc->rl_res_type = SYS_RES_IOPORT;
818 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
819 &sc->rl_res_id, RF_ACTIVE);
820 }
821 if (prefer_iomap == 0 || sc->rl_res == NULL) {
822 sc->rl_res_id = PCIR_BAR(1);
823 sc->rl_res_type = SYS_RES_MEMORY;
824 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
825 &sc->rl_res_id, RF_ACTIVE);
826 }
827 if (sc->rl_res == NULL) {
828 device_printf(dev, "couldn't map ports/memory\n");
829 error = ENXIO;
830 goto fail;
831 }
832
833 #ifdef notdef
834 /*
835 * Detect the Realtek 8139B. For some reason, this chip is very
836 * unstable when left to autoselect the media
837 * The best workaround is to set the device to the required
838 * media type or to set it to the 10 Meg speed.
839 */
840 if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF)
841 device_printf(dev,
842 "Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n");
843 #endif
844
845 sc->rl_btag = rman_get_bustag(sc->rl_res);
846 sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
847
848 /* Allocate interrupt */
849 rid = 0;
850 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
851 RF_SHAREABLE | RF_ACTIVE);
852
853 if (sc->rl_irq[0] == NULL) {
854 device_printf(dev, "couldn't map interrupt\n");
855 error = ENXIO;
856 goto fail;
857 }
858
859 sc->rl_cfg0 = RL_8139_CFG0;
860 sc->rl_cfg1 = RL_8139_CFG1;
861 sc->rl_cfg2 = 0;
862 sc->rl_cfg3 = RL_8139_CFG3;
863 sc->rl_cfg4 = RL_8139_CFG4;
864 sc->rl_cfg5 = RL_8139_CFG5;
865
866 /*
867 * Reset the adapter. Only take the lock here as it's needed in
868 * order to call rl_reset().
869 */
870 RL_LOCK(sc);
871 rl_reset(sc);
872 RL_UNLOCK(sc);
873
874 sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
875 rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0);
876 if (rl_did != 0x8129)
877 sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
878
879 /*
880 * Get station address from the EEPROM.
881 */
882 rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0);
883 for (i = 0; i < 3; i++) {
884 eaddr[(i * 2) + 0] = as[i] & 0xff;
885 eaddr[(i * 2) + 1] = as[i] >> 8;
886 }
887
888 /*
889 * Now read the exact device type from the EEPROM to find
890 * out if it's an 8129 or 8139.
891 */
892 rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0);
893
894 t = rl_devs;
895 sc->rl_type = 0;
896 while(t->rl_name != NULL) {
897 if (rl_did == t->rl_did) {
898 sc->rl_type = t->rl_basetype;
899 break;
900 }
901 t++;
902 }
903
904 if (sc->rl_type == 0) {
905 device_printf(dev, "unknown device ID: %x assuming 8139\n",
906 rl_did);
907 sc->rl_type = RL_8139;
908 /*
909 * Read RL_IDR register to get ethernet address as accessing
910 * EEPROM may not extract correct address.
911 */
912 for (i = 0; i < ETHER_ADDR_LEN; i++)
913 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
914 }
915
916 if ((error = rl_dma_alloc(sc)) != 0)
917 goto fail;
918
919 ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
920 if (ifp == NULL) {
921 device_printf(dev, "can not if_alloc()\n");
922 error = ENOSPC;
923 goto fail;
924 }
925
926 #define RL_PHYAD_INTERNAL 0
927
928 /* Do MII setup */
929 phy = MII_PHY_ANY;
930 if (sc->rl_type == RL_8139)
931 phy = RL_PHYAD_INTERNAL;
932 error = mii_attach(dev, &sc->rl_miibus, ifp, rl_ifmedia_upd,
933 rl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
934 if (error != 0) {
935 device_printf(dev, "attaching PHYs failed\n");
936 goto fail;
937 }
938
939 ifp->if_softc = sc;
940 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
941 ifp->if_mtu = ETHERMTU;
942 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
943 ifp->if_ioctl = rl_ioctl;
944 ifp->if_start = rl_start;
945 ifp->if_init = rl_init;
946 ifp->if_capabilities = IFCAP_VLAN_MTU;
947 /* Check WOL for RTL8139B or newer controllers. */
948 if (sc->rl_type == RL_8139 &&
949 pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
950 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
951 switch (hwrev) {
952 case RL_HWREV_8139B:
953 case RL_HWREV_8130:
954 case RL_HWREV_8139C:
955 case RL_HWREV_8139D:
956 case RL_HWREV_8101:
957 case RL_HWREV_8100:
958 ifp->if_capabilities |= IFCAP_WOL;
959 /* Disable WOL. */
960 rl_clrwol(sc);
961 break;
962 default:
963 break;
964 }
965 }
966 ifp->if_capenable = ifp->if_capabilities;
967 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST);
968 #ifdef DEVICE_POLLING
969 ifp->if_capabilities |= IFCAP_POLLING;
970 #endif
971 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
972 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
973 IFQ_SET_READY(&ifp->if_snd);
974
975 /*
976 * Call MI attach routine.
977 */
978 ether_ifattach(ifp, eaddr);
979
980 /* Hook interrupt last to avoid having to lock softc */
981 error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
982 NULL, rl_intr, sc, &sc->rl_intrhand[0]);
983 if (error) {
984 device_printf(sc->rl_dev, "couldn't set up irq\n");
985 ether_ifdetach(ifp);
986 }
987
988 fail:
989 if (error)
990 rl_detach(dev);
991
992 return (error);
993 }
994
995 /*
996 * Shutdown hardware and free up resources. This can be called any
997 * time after the mutex has been initialized. It is called in both
998 * the error case in attach and the normal detach case so it needs
999 * to be careful about only freeing resources that have actually been
1000 * allocated.
1001 */
1002 static int
1003 rl_detach(device_t dev)
1004 {
1005 struct rl_softc *sc;
1006 struct ifnet *ifp;
1007
1008 sc = device_get_softc(dev);
1009 ifp = sc->rl_ifp;
1010
1011 KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
1012
1013 #ifdef DEVICE_POLLING
1014 if (ifp->if_capenable & IFCAP_POLLING)
1015 ether_poll_deregister(ifp);
1016 #endif
1017 /* These should only be active if attach succeeded */
1018 if (device_is_attached(dev)) {
1019 RL_LOCK(sc);
1020 rl_stop(sc);
1021 RL_UNLOCK(sc);
1022 callout_drain(&sc->rl_stat_callout);
1023 ether_ifdetach(ifp);
1024 }
1025 #if 0
1026 sc->suspended = 1;
1027 #endif
1028 if (sc->rl_miibus)
1029 device_delete_child(dev, sc->rl_miibus);
1030 bus_generic_detach(dev);
1031
1032 if (sc->rl_intrhand[0])
1033 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1034 if (sc->rl_irq[0])
1035 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]);
1036 if (sc->rl_res)
1037 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1038 sc->rl_res);
1039
1040 if (ifp)
1041 if_free(ifp);
1042
1043 rl_dma_free(sc);
1044
1045 mtx_destroy(&sc->rl_mtx);
1046
1047 return (0);
1048 }
1049
1050 static int
1051 rl_dma_alloc(struct rl_softc *sc)
1052 {
1053 struct rl_dmamap_arg ctx;
1054 int error, i;
1055
1056 /*
1057 * Allocate the parent bus DMA tag appropriate for PCI.
1058 */
1059 error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev), /* parent */
1060 1, 0, /* alignment, boundary */
1061 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1062 BUS_SPACE_MAXADDR, /* highaddr */
1063 NULL, NULL, /* filter, filterarg */
1064 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
1065 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1066 0, /* flags */
1067 NULL, NULL, /* lockfunc, lockarg */
1068 &sc->rl_parent_tag);
1069 if (error) {
1070 device_printf(sc->rl_dev,
1071 "failed to create parent DMA tag.\n");
1072 goto fail;
1073 }
1074 /* Create DMA tag for Rx memory block. */
1075 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */
1076 RL_RX_8139_BUF_ALIGN, 0, /* alignment, boundary */
1077 BUS_SPACE_MAXADDR, /* lowaddr */
1078 BUS_SPACE_MAXADDR, /* highaddr */
1079 NULL, NULL, /* filter, filterarg */
1080 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1, /* maxsize,nsegments */
1081 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, /* maxsegsize */
1082 0, /* flags */
1083 NULL, NULL, /* lockfunc, lockarg */
1084 &sc->rl_cdata.rl_rx_tag);
1085 if (error) {
1086 device_printf(sc->rl_dev,
1087 "failed to create Rx memory block DMA tag.\n");
1088 goto fail;
1089 }
1090 /* Create DMA tag for Tx buffer. */
1091 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */
1092 RL_TX_8139_BUF_ALIGN, 0, /* alignment, boundary */
1093 BUS_SPACE_MAXADDR, /* lowaddr */
1094 BUS_SPACE_MAXADDR, /* highaddr */
1095 NULL, NULL, /* filter, filterarg */
1096 MCLBYTES, 1, /* maxsize, nsegments */
1097 MCLBYTES, /* maxsegsize */
1098 0, /* flags */
1099 NULL, NULL, /* lockfunc, lockarg */
1100 &sc->rl_cdata.rl_tx_tag);
1101 if (error) {
1102 device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n");
1103 goto fail;
1104 }
1105
1106 /*
1107 * Allocate DMA'able memory and load DMA map for Rx memory block.
1108 */
1109 error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag,
1110 (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK |
1111 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap);
1112 if (error != 0) {
1113 device_printf(sc->rl_dev,
1114 "failed to allocate Rx DMA memory block.\n");
1115 goto fail;
1116 }
1117 ctx.rl_busaddr = 0;
1118 error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag,
1119 sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf,
1120 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx,
1121 BUS_DMA_NOWAIT);
1122 if (error != 0 || ctx.rl_busaddr == 0) {
1123 device_printf(sc->rl_dev,
1124 "could not load Rx DMA memory block.\n");
1125 goto fail;
1126 }
1127 sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr;
1128
1129 /* Create DMA maps for Tx buffers. */
1130 for (i = 0; i < RL_TX_LIST_CNT; i++) {
1131 sc->rl_cdata.rl_tx_chain[i] = NULL;
1132 sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1133 error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0,
1134 &sc->rl_cdata.rl_tx_dmamap[i]);
1135 if (error != 0) {
1136 device_printf(sc->rl_dev,
1137 "could not create Tx dmamap.\n");
1138 goto fail;
1139 }
1140 }
1141
1142 /* Leave a few bytes before the start of the RX ring buffer. */
1143 sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
1144 sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE;
1145
1146 fail:
1147 return (error);
1148 }
1149
1150 static void
1151 rl_dma_free(struct rl_softc *sc)
1152 {
1153 int i;
1154
1155 /* Rx memory block. */
1156 if (sc->rl_cdata.rl_rx_tag != NULL) {
1157 if (sc->rl_cdata.rl_rx_dmamap != NULL)
1158 bus_dmamap_unload(sc->rl_cdata.rl_rx_tag,
1159 sc->rl_cdata.rl_rx_dmamap);
1160 if (sc->rl_cdata.rl_rx_dmamap != NULL &&
1161 sc->rl_cdata.rl_rx_buf_ptr != NULL)
1162 bus_dmamem_free(sc->rl_cdata.rl_rx_tag,
1163 sc->rl_cdata.rl_rx_buf_ptr,
1164 sc->rl_cdata.rl_rx_dmamap);
1165 sc->rl_cdata.rl_rx_buf_ptr = NULL;
1166 sc->rl_cdata.rl_rx_buf = NULL;
1167 sc->rl_cdata.rl_rx_dmamap = NULL;
1168 bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag);
1169 sc->rl_cdata.rl_tx_tag = NULL;
1170 }
1171
1172 /* Tx buffers. */
1173 if (sc->rl_cdata.rl_tx_tag != NULL) {
1174 for (i = 0; i < RL_TX_LIST_CNT; i++) {
1175 if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) {
1176 bus_dmamap_destroy(
1177 sc->rl_cdata.rl_tx_tag,
1178 sc->rl_cdata.rl_tx_dmamap[i]);
1179 sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1180 }
1181 }
1182 bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag);
1183 sc->rl_cdata.rl_tx_tag = NULL;
1184 }
1185
1186 if (sc->rl_parent_tag != NULL) {
1187 bus_dma_tag_destroy(sc->rl_parent_tag);
1188 sc->rl_parent_tag = NULL;
1189 }
1190 }
1191
1192 /*
1193 * Initialize the transmit descriptors.
1194 */
1195 static int
1196 rl_list_tx_init(struct rl_softc *sc)
1197 {
1198 struct rl_chain_data *cd;
1199 int i;
1200
1201 RL_LOCK_ASSERT(sc);
1202
1203 cd = &sc->rl_cdata;
1204 for (i = 0; i < RL_TX_LIST_CNT; i++) {
1205 cd->rl_tx_chain[i] = NULL;
1206 CSR_WRITE_4(sc,
1207 RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000);
1208 }
1209
1210 sc->rl_cdata.cur_tx = 0;
1211 sc->rl_cdata.last_tx = 0;
1212
1213 return (0);
1214 }
1215
1216 static int
1217 rl_list_rx_init(struct rl_softc *sc)
1218 {
1219
1220 RL_LOCK_ASSERT(sc);
1221
1222 bzero(sc->rl_cdata.rl_rx_buf_ptr,
1223 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ);
1224 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap,
1225 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1226
1227 return (0);
1228 }
1229
1230 /*
1231 * A frame has been uploaded: pass the resulting mbuf chain up to
1232 * the higher level protocols.
1233 *
1234 * You know there's something wrong with a PCI bus-master chip design
1235 * when you have to use m_devget().
1236 *
1237 * The receive operation is badly documented in the datasheet, so I'll
1238 * attempt to document it here. The driver provides a buffer area and
1239 * places its base address in the RX buffer start address register.
1240 * The chip then begins copying frames into the RX buffer. Each frame
1241 * is preceded by a 32-bit RX status word which specifies the length
1242 * of the frame and certain other status bits. Each frame (starting with
1243 * the status word) is also 32-bit aligned. The frame length is in the
1244 * first 16 bits of the status word; the lower 15 bits correspond with
1245 * the 'rx status register' mentioned in the datasheet.
1246 *
1247 * Note: to make the Alpha happy, the frame payload needs to be aligned
1248 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
1249 * as the offset argument to m_devget().
1250 */
1251 static void
1252 rl_rxeof(struct rl_softc *sc)
1253 {
1254 struct mbuf *m;
1255 struct ifnet *ifp = sc->rl_ifp;
1256 uint8_t *rxbufpos;
1257 int total_len = 0;
1258 int wrap = 0;
1259 uint32_t rxstat;
1260 uint16_t cur_rx;
1261 uint16_t limit;
1262 uint16_t max_bytes, rx_bytes = 0;
1263
1264 RL_LOCK_ASSERT(sc);
1265
1266 bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap,
1267 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1268
1269 cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
1270
1271 /* Do not try to read past this point. */
1272 limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
1273
1274 if (limit < cur_rx)
1275 max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
1276 else
1277 max_bytes = limit - cur_rx;
1278
1279 while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
1280 #ifdef DEVICE_POLLING
1281 if (ifp->if_capenable & IFCAP_POLLING) {
1282 if (sc->rxcycles <= 0)
1283 break;
1284 sc->rxcycles--;
1285 }
1286 #endif
1287 rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
1288 rxstat = le32toh(*(uint32_t *)rxbufpos);
1289
1290 /*
1291 * Here's a totally undocumented fact for you. When the
1292 * RealTek chip is in the process of copying a packet into
1293 * RAM for you, the length will be 0xfff0. If you spot a
1294 * packet header with this value, you need to stop. The
1295 * datasheet makes absolutely no mention of this and
1296 * RealTek should be shot for this.
1297 */
1298 total_len = rxstat >> 16;
1299 if (total_len == RL_RXSTAT_UNFINISHED)
1300 break;
1301
1302 if (!(rxstat & RL_RXSTAT_RXOK) ||
1303 total_len < ETHER_MIN_LEN ||
1304 total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
1305 ifp->if_ierrors++;
1306 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1307 rl_init_locked(sc);
1308 return;
1309 }
1310
1311 /* No errors; receive the packet. */
1312 rx_bytes += total_len + 4;
1313
1314 /*
1315 * XXX The RealTek chip includes the CRC with every
1316 * received frame, and there's no way to turn this
1317 * behavior off (at least, I can't find anything in
1318 * the manual that explains how to do it) so we have
1319 * to trim off the CRC manually.
1320 */
1321 total_len -= ETHER_CRC_LEN;
1322
1323 /*
1324 * Avoid trying to read more bytes than we know
1325 * the chip has prepared for us.
1326 */
1327 if (rx_bytes > max_bytes)
1328 break;
1329
1330 rxbufpos = sc->rl_cdata.rl_rx_buf +
1331 ((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN);
1332 if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
1333 rxbufpos = sc->rl_cdata.rl_rx_buf;
1334
1335 wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
1336 if (total_len > wrap) {
1337 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1338 NULL);
1339 if (m != NULL)
1340 m_copyback(m, wrap, total_len - wrap,
1341 sc->rl_cdata.rl_rx_buf);
1342 cur_rx = (total_len - wrap + ETHER_CRC_LEN);
1343 } else {
1344 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1345 NULL);
1346 cur_rx += total_len + 4 + ETHER_CRC_LEN;
1347 }
1348
1349 /* Round up to 32-bit boundary. */
1350 cur_rx = (cur_rx + 3) & ~3;
1351 CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
1352
1353 if (m == NULL) {
1354 ifp->if_iqdrops++;
1355 continue;
1356 }
1357
1358 ifp->if_ipackets++;
1359 RL_UNLOCK(sc);
1360 (*ifp->if_input)(ifp, m);
1361 RL_LOCK(sc);
1362 }
1363
1364 /* No need to sync Rx memory block as we didn't modify it. */
1365 }
1366
1367 /*
1368 * A frame was downloaded to the chip. It's safe for us to clean up
1369 * the list buffers.
1370 */
1371 static void
1372 rl_txeof(struct rl_softc *sc)
1373 {
1374 struct ifnet *ifp = sc->rl_ifp;
1375 uint32_t txstat;
1376
1377 RL_LOCK_ASSERT(sc);
1378
1379 /*
1380 * Go through our tx list and free mbufs for those
1381 * frames that have been uploaded.
1382 */
1383 do {
1384 if (RL_LAST_TXMBUF(sc) == NULL)
1385 break;
1386 txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
1387 if (!(txstat & (RL_TXSTAT_TX_OK|
1388 RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
1389 break;
1390
1391 ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24;
1392
1393 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc),
1394 BUS_DMASYNC_POSTWRITE);
1395 bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc));
1396 m_freem(RL_LAST_TXMBUF(sc));
1397 RL_LAST_TXMBUF(sc) = NULL;
1398 /*
1399 * If there was a transmit underrun, bump the TX threshold.
1400 * Make sure not to overflow the 63 * 32byte we can address
1401 * with the 6 available bit.
1402 */
1403 if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
1404 (sc->rl_txthresh < 2016))
1405 sc->rl_txthresh += 32;
1406 if (txstat & RL_TXSTAT_TX_OK)
1407 ifp->if_opackets++;
1408 else {
1409 int oldthresh;
1410 ifp->if_oerrors++;
1411 if ((txstat & RL_TXSTAT_TXABRT) ||
1412 (txstat & RL_TXSTAT_OUTOFWIN))
1413 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1414 oldthresh = sc->rl_txthresh;
1415 /* error recovery */
1416 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1417 rl_init_locked(sc);
1418 /* restore original threshold */
1419 sc->rl_txthresh = oldthresh;
1420 return;
1421 }
1422 RL_INC(sc->rl_cdata.last_tx);
1423 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1424 } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
1425
1426 if (RL_LAST_TXMBUF(sc) == NULL)
1427 sc->rl_watchdog_timer = 0;
1428 }
1429
1430 static void
1431 rl_twister_update(struct rl_softc *sc)
1432 {
1433 uint16_t linktest;
1434 /*
1435 * Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for
1436 * Linux driver. Values undocumented otherwise.
1437 */
1438 static const uint32_t param[4][4] = {
1439 {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
1440 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1441 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1442 {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
1443 };
1444
1445 /*
1446 * Tune the so-called twister registers of the RTL8139. These
1447 * are used to compensate for impedance mismatches. The
1448 * method for tuning these registers is undocumented and the
1449 * following procedure is collected from public sources.
1450 */
1451 switch (sc->rl_twister)
1452 {
1453 case CHK_LINK:
1454 /*
1455 * If we have a sufficient link, then we can proceed in
1456 * the state machine to the next stage. If not, then
1457 * disable further tuning after writing sane defaults.
1458 */
1459 if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) {
1460 CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD);
1461 sc->rl_twister = FIND_ROW;
1462 } else {
1463 CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD);
1464 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1465 CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1466 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1467 sc->rl_twister = DONE;
1468 }
1469 break;
1470 case FIND_ROW:
1471 /*
1472 * Read how long it took to see the echo to find the tuning
1473 * row to use.
1474 */
1475 linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1476 if (linktest == RL_CSCFG_ROW3)
1477 sc->rl_twist_row = 3;
1478 else if (linktest == RL_CSCFG_ROW2)
1479 sc->rl_twist_row = 2;
1480 else if (linktest == RL_CSCFG_ROW1)
1481 sc->rl_twist_row = 1;
1482 else
1483 sc->rl_twist_row = 0;
1484 sc->rl_twist_col = 0;
1485 sc->rl_twister = SET_PARAM;
1486 break;
1487 case SET_PARAM:
1488 if (sc->rl_twist_col == 0)
1489 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1490 CSR_WRITE_4(sc, RL_PARA7C,
1491 param[sc->rl_twist_row][sc->rl_twist_col]);
1492 if (++sc->rl_twist_col == 4) {
1493 if (sc->rl_twist_row == 3)
1494 sc->rl_twister = RECHK_LONG;
1495 else
1496 sc->rl_twister = DONE;
1497 }
1498 break;
1499 case RECHK_LONG:
1500 /*
1501 * For long cables, we have to double check to make sure we
1502 * don't mistune.
1503 */
1504 linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1505 if (linktest == RL_CSCFG_ROW3)
1506 sc->rl_twister = DONE;
1507 else {
1508 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE);
1509 sc->rl_twister = RETUNE;
1510 }
1511 break;
1512 case RETUNE:
1513 /* Retune for a shorter cable (try column 2) */
1514 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1515 CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1516 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1517 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1518 sc->rl_twist_row--;
1519 sc->rl_twist_col = 0;
1520 sc->rl_twister = SET_PARAM;
1521 break;
1522
1523 case DONE:
1524 break;
1525 }
1526
1527 }
1528
1529 static void
1530 rl_tick(void *xsc)
1531 {
1532 struct rl_softc *sc = xsc;
1533 struct mii_data *mii;
1534 int ticks;
1535
1536 RL_LOCK_ASSERT(sc);
1537 /*
1538 * If we're doing the twister cable calibration, then we need to defer
1539 * watchdog timeouts. This is a no-op in normal operations, but
1540 * can falsely trigger when the cable calibration takes a while and
1541 * there was traffic ready to go when rl was started.
1542 *
1543 * We don't defer mii_tick since that updates the mii status, which
1544 * helps the twister process, at least according to similar patches
1545 * for the Linux driver I found online while doing the fixes. Worst
1546 * case is a few extra mii reads during calibration.
1547 */
1548 mii = device_get_softc(sc->rl_miibus);
1549 mii_tick(mii);
1550 if ((sc->rl_flags & RL_FLAG_LINK) == 0)
1551 rl_miibus_statchg(sc->rl_dev);
1552 if (sc->rl_twister_enable) {
1553 if (sc->rl_twister == DONE)
1554 rl_watchdog(sc);
1555 else
1556 rl_twister_update(sc);
1557 if (sc->rl_twister == DONE)
1558 ticks = hz;
1559 else
1560 ticks = hz / 10;
1561 } else {
1562 rl_watchdog(sc);
1563 ticks = hz;
1564 }
1565
1566 callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc);
1567 }
1568
1569 #ifdef DEVICE_POLLING
1570 static void
1571 rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1572 {
1573 struct rl_softc *sc = ifp->if_softc;
1574
1575 RL_LOCK(sc);
1576 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1577 rl_poll_locked(ifp, cmd, count);
1578 RL_UNLOCK(sc);
1579 }
1580
1581 static void
1582 rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1583 {
1584 struct rl_softc *sc = ifp->if_softc;
1585
1586 RL_LOCK_ASSERT(sc);
1587
1588 sc->rxcycles = count;
1589 rl_rxeof(sc);
1590 rl_txeof(sc);
1591
1592 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1593 rl_start_locked(ifp);
1594
1595 if (cmd == POLL_AND_CHECK_STATUS) {
1596 uint16_t status;
1597
1598 /* We should also check the status register. */
1599 status = CSR_READ_2(sc, RL_ISR);
1600 if (status == 0xffff)
1601 return;
1602 if (status != 0)
1603 CSR_WRITE_2(sc, RL_ISR, status);
1604
1605 /* XXX We should check behaviour on receiver stalls. */
1606
1607 if (status & RL_ISR_SYSTEM_ERR) {
1608 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1609 rl_init_locked(sc);
1610 }
1611 }
1612 }
1613 #endif /* DEVICE_POLLING */
1614
1615 static void
1616 rl_intr(void *arg)
1617 {
1618 struct rl_softc *sc = arg;
1619 struct ifnet *ifp = sc->rl_ifp;
1620 uint16_t status;
1621 int count;
1622
1623 RL_LOCK(sc);
1624
1625 if (sc->suspended)
1626 goto done_locked;
1627
1628 #ifdef DEVICE_POLLING
1629 if (ifp->if_capenable & IFCAP_POLLING)
1630 goto done_locked;
1631 #endif
1632
1633 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1634 goto done_locked2;
1635 status = CSR_READ_2(sc, RL_ISR);
1636 if (status == 0xffff || (status & RL_INTRS) == 0)
1637 goto done_locked;
1638 /*
1639 * Ours, disable further interrupts.
1640 */
1641 CSR_WRITE_2(sc, RL_IMR, 0);
1642 for (count = 16; count > 0; count--) {
1643 CSR_WRITE_2(sc, RL_ISR, status);
1644 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1645 if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR))
1646 rl_rxeof(sc);
1647 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR))
1648 rl_txeof(sc);
1649 if (status & RL_ISR_SYSTEM_ERR) {
1650 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1651 rl_init_locked(sc);
1652 RL_UNLOCK(sc);
1653 return;
1654 }
1655 }
1656 status = CSR_READ_2(sc, RL_ISR);
1657 /* If the card has gone away, the read returns 0xffff. */
1658 if (status == 0xffff || (status & RL_INTRS) == 0)
1659 break;
1660 }
1661
1662 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1663 rl_start_locked(ifp);
1664
1665 done_locked2:
1666 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1667 CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1668 done_locked:
1669 RL_UNLOCK(sc);
1670 }
1671
1672 /*
1673 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1674 * pointers to the fragment pointers.
1675 */
1676 static int
1677 rl_encap(struct rl_softc *sc, struct mbuf **m_head)
1678 {
1679 struct mbuf *m;
1680 bus_dma_segment_t txsegs[1];
1681 int error, nsegs, padlen;
1682
1683 RL_LOCK_ASSERT(sc);
1684
1685 m = *m_head;
1686 padlen = 0;
1687 /*
1688 * Hardware doesn't auto-pad, so we have to make sure
1689 * pad short frames out to the minimum frame length.
1690 */
1691 if (m->m_pkthdr.len < RL_MIN_FRAMELEN)
1692 padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len;
1693 /*
1694 * The RealTek is brain damaged and wants longword-aligned
1695 * TX buffers, plus we can only have one fragment buffer
1696 * per packet. We have to copy pretty much all the time.
1697 */
1698 if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 ||
1699 (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) {
1700 m = m_defrag(*m_head, M_DONTWAIT);
1701 if (m == NULL) {
1702 m_freem(*m_head);
1703 *m_head = NULL;
1704 return (ENOMEM);
1705 }
1706 }
1707 *m_head = m;
1708
1709 if (padlen > 0) {
1710 /*
1711 * Make security-conscious people happy: zero out the
1712 * bytes in the pad area, since we don't know what
1713 * this mbuf cluster buffer's previous user might
1714 * have left in it.
1715 */
1716 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1717 m->m_pkthdr.len += padlen;
1718 m->m_len = m->m_pkthdr.len;
1719 }
1720
1721 error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag,
1722 RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0);
1723 if (error != 0)
1724 return (error);
1725 if (nsegs == 0) {
1726 m_freem(*m_head);
1727 *m_head = NULL;
1728 return (EIO);
1729 }
1730
1731 RL_CUR_TXMBUF(sc) = m;
1732 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc),
1733 BUS_DMASYNC_PREWRITE);
1734 CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr));
1735
1736 return (0);
1737 }
1738
1739 /*
1740 * Main transmit routine.
1741 */
1742 static void
1743 rl_start(struct ifnet *ifp)
1744 {
1745 struct rl_softc *sc = ifp->if_softc;
1746
1747 RL_LOCK(sc);
1748 rl_start_locked(ifp);
1749 RL_UNLOCK(sc);
1750 }
1751
1752 static void
1753 rl_start_locked(struct ifnet *ifp)
1754 {
1755 struct rl_softc *sc = ifp->if_softc;
1756 struct mbuf *m_head = NULL;
1757
1758 RL_LOCK_ASSERT(sc);
1759
1760 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1761 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
1762 return;
1763
1764 while (RL_CUR_TXMBUF(sc) == NULL) {
1765
1766 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1767
1768 if (m_head == NULL)
1769 break;
1770
1771 if (rl_encap(sc, &m_head)) {
1772 if (m_head == NULL)
1773 break;
1774 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1775 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1776 break;
1777 }
1778
1779 /* Pass a copy of this mbuf chain to the bpf subsystem. */
1780 BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
1781
1782 /* Transmit the frame. */
1783 CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
1784 RL_TXTHRESH(sc->rl_txthresh) |
1785 RL_CUR_TXMBUF(sc)->m_pkthdr.len);
1786
1787 RL_INC(sc->rl_cdata.cur_tx);
1788
1789 /* Set a timeout in case the chip goes out to lunch. */
1790 sc->rl_watchdog_timer = 5;
1791 }
1792
1793 /*
1794 * We broke out of the loop because all our TX slots are
1795 * full. Mark the NIC as busy until it drains some of the
1796 * packets from the queue.
1797 */
1798 if (RL_CUR_TXMBUF(sc) != NULL)
1799 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1800 }
1801
1802 static void
1803 rl_init(void *xsc)
1804 {
1805 struct rl_softc *sc = xsc;
1806
1807 RL_LOCK(sc);
1808 rl_init_locked(sc);
1809 RL_UNLOCK(sc);
1810 }
1811
1812 static void
1813 rl_init_locked(struct rl_softc *sc)
1814 {
1815 struct ifnet *ifp = sc->rl_ifp;
1816 struct mii_data *mii;
1817 uint32_t eaddr[2];
1818
1819 RL_LOCK_ASSERT(sc);
1820
1821 mii = device_get_softc(sc->rl_miibus);
1822
1823 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1824 return;
1825
1826 /*
1827 * Cancel pending I/O and free all RX/TX buffers.
1828 */
1829 rl_stop(sc);
1830
1831 rl_reset(sc);
1832 if (sc->rl_twister_enable) {
1833 /*
1834 * Reset twister register tuning state. The twister
1835 * registers and their tuning are undocumented, but
1836 * are necessary to cope with bad links. rl_twister =
1837 * DONE here will disable this entirely.
1838 */
1839 sc->rl_twister = CHK_LINK;
1840 }
1841
1842 /*
1843 * Init our MAC address. Even though the chipset
1844 * documentation doesn't mention it, we need to enter "Config
1845 * register write enable" mode to modify the ID registers.
1846 */
1847 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1848 bzero(eaddr, sizeof(eaddr));
1849 bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN);
1850 CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]);
1851 CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]);
1852 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1853
1854 /* Init the RX memory block pointer register. */
1855 CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr +
1856 RL_RX_8139_BUF_RESERVE);
1857 /* Init TX descriptors. */
1858 rl_list_tx_init(sc);
1859 /* Init Rx memory block. */
1860 rl_list_rx_init(sc);
1861
1862 /*
1863 * Enable transmit and receive.
1864 */
1865 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1866
1867 /*
1868 * Set the initial TX and RX configuration.
1869 */
1870 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1871 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
1872
1873 /* Set RX filter. */
1874 rl_rxfilter(sc);
1875
1876 #ifdef DEVICE_POLLING
1877 /* Disable interrupts if we are polling. */
1878 if (ifp->if_capenable & IFCAP_POLLING)
1879 CSR_WRITE_2(sc, RL_IMR, 0);
1880 else
1881 #endif
1882 /* Enable interrupts. */
1883 CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1884
1885 /* Set initial TX threshold */
1886 sc->rl_txthresh = RL_TX_THRESH_INIT;
1887
1888 /* Start RX/TX process. */
1889 CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1890
1891 /* Enable receiver and transmitter. */
1892 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1893
1894 sc->rl_flags &= ~RL_FLAG_LINK;
1895 mii_mediachg(mii);
1896
1897 CSR_WRITE_1(sc, sc->rl_cfg1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
1898
1899 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1900 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1901
1902 callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
1903 }
1904
1905 /*
1906 * Set media options.
1907 */
1908 static int
1909 rl_ifmedia_upd(struct ifnet *ifp)
1910 {
1911 struct rl_softc *sc = ifp->if_softc;
1912 struct mii_data *mii;
1913
1914 mii = device_get_softc(sc->rl_miibus);
1915
1916 RL_LOCK(sc);
1917 mii_mediachg(mii);
1918 RL_UNLOCK(sc);
1919
1920 return (0);
1921 }
1922
1923 /*
1924 * Report current media status.
1925 */
1926 static void
1927 rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1928 {
1929 struct rl_softc *sc = ifp->if_softc;
1930 struct mii_data *mii;
1931
1932 mii = device_get_softc(sc->rl_miibus);
1933
1934 RL_LOCK(sc);
1935 mii_pollstat(mii);
1936 ifmr->ifm_active = mii->mii_media_active;
1937 ifmr->ifm_status = mii->mii_media_status;
1938 RL_UNLOCK(sc);
1939 }
1940
1941 static int
1942 rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1943 {
1944 struct ifreq *ifr = (struct ifreq *)data;
1945 struct mii_data *mii;
1946 struct rl_softc *sc = ifp->if_softc;
1947 int error = 0, mask;
1948
1949 switch (command) {
1950 case SIOCSIFFLAGS:
1951 RL_LOCK(sc);
1952 if (ifp->if_flags & IFF_UP) {
1953 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1954 ((ifp->if_flags ^ sc->rl_if_flags) &
1955 (IFF_PROMISC | IFF_ALLMULTI)))
1956 rl_rxfilter(sc);
1957 else
1958 rl_init_locked(sc);
1959 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1960 rl_stop(sc);
1961 sc->rl_if_flags = ifp->if_flags;
1962 RL_UNLOCK(sc);
1963 break;
1964 case SIOCADDMULTI:
1965 case SIOCDELMULTI:
1966 RL_LOCK(sc);
1967 rl_rxfilter(sc);
1968 RL_UNLOCK(sc);
1969 break;
1970 case SIOCGIFMEDIA:
1971 case SIOCSIFMEDIA:
1972 mii = device_get_softc(sc->rl_miibus);
1973 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1974 break;
1975 case SIOCSIFCAP:
1976 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1977 #ifdef DEVICE_POLLING
1978 if (ifr->ifr_reqcap & IFCAP_POLLING &&
1979 !(ifp->if_capenable & IFCAP_POLLING)) {
1980 error = ether_poll_register(rl_poll, ifp);
1981 if (error)
1982 return(error);
1983 RL_LOCK(sc);
1984 /* Disable interrupts */
1985 CSR_WRITE_2(sc, RL_IMR, 0x0000);
1986 ifp->if_capenable |= IFCAP_POLLING;
1987 RL_UNLOCK(sc);
1988 return (error);
1989
1990 }
1991 if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1992 ifp->if_capenable & IFCAP_POLLING) {
1993 error = ether_poll_deregister(ifp);
1994 /* Enable interrupts. */
1995 RL_LOCK(sc);
1996 CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1997 ifp->if_capenable &= ~IFCAP_POLLING;
1998 RL_UNLOCK(sc);
1999 return (error);
2000 }
2001 #endif /* DEVICE_POLLING */
2002 if ((mask & IFCAP_WOL) != 0 &&
2003 (ifp->if_capabilities & IFCAP_WOL) != 0) {
2004 if ((mask & IFCAP_WOL_UCAST) != 0)
2005 ifp->if_capenable ^= IFCAP_WOL_UCAST;
2006 if ((mask & IFCAP_WOL_MCAST) != 0)
2007 ifp->if_capenable ^= IFCAP_WOL_MCAST;
2008 if ((mask & IFCAP_WOL_MAGIC) != 0)
2009 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2010 }
2011 break;
2012 default:
2013 error = ether_ioctl(ifp, command, data);
2014 break;
2015 }
2016
2017 return (error);
2018 }
2019
2020 static void
2021 rl_watchdog(struct rl_softc *sc)
2022 {
2023
2024 RL_LOCK_ASSERT(sc);
2025
2026 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0)
2027 return;
2028
2029 device_printf(sc->rl_dev, "watchdog timeout\n");
2030 sc->rl_ifp->if_oerrors++;
2031
2032 rl_txeof(sc);
2033 rl_rxeof(sc);
2034 sc->rl_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2035 rl_init_locked(sc);
2036 }
2037
2038 /*
2039 * Stop the adapter and free any mbufs allocated to the
2040 * RX and TX lists.
2041 */
2042 static void
2043 rl_stop(struct rl_softc *sc)
2044 {
2045 register int i;
2046 struct ifnet *ifp = sc->rl_ifp;
2047
2048 RL_LOCK_ASSERT(sc);
2049
2050 sc->rl_watchdog_timer = 0;
2051 callout_stop(&sc->rl_stat_callout);
2052 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2053 sc->rl_flags &= ~RL_FLAG_LINK;
2054
2055 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2056 CSR_WRITE_2(sc, RL_IMR, 0x0000);
2057 for (i = 0; i < RL_TIMEOUT; i++) {
2058 DELAY(10);
2059 if ((CSR_READ_1(sc, RL_COMMAND) &
2060 (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0)
2061 break;
2062 }
2063 if (i == RL_TIMEOUT)
2064 device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n");
2065
2066 /*
2067 * Free the TX list buffers.
2068 */
2069 for (i = 0; i < RL_TX_LIST_CNT; i++) {
2070 if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
2071 if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
2072 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
2073 sc->rl_cdata.rl_tx_dmamap[i],
2074 BUS_DMASYNC_POSTWRITE);
2075 bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
2076 sc->rl_cdata.rl_tx_dmamap[i]);
2077 m_freem(sc->rl_cdata.rl_tx_chain[i]);
2078 sc->rl_cdata.rl_tx_chain[i] = NULL;
2079 }
2080 CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
2081 0x0000000);
2082 }
2083 }
2084 }
2085
2086 /*
2087 * Device suspend routine. Stop the interface and save some PCI
2088 * settings in case the BIOS doesn't restore them properly on
2089 * resume.
2090 */
2091 static int
2092 rl_suspend(device_t dev)
2093 {
2094 struct rl_softc *sc;
2095
2096 sc = device_get_softc(dev);
2097
2098 RL_LOCK(sc);
2099 rl_stop(sc);
2100 rl_setwol(sc);
2101 sc->suspended = 1;
2102 RL_UNLOCK(sc);
2103
2104 return (0);
2105 }
2106
2107 /*
2108 * Device resume routine. Restore some PCI settings in case the BIOS
2109 * doesn't, re-enable busmastering, and restart the interface if
2110 * appropriate.
2111 */
2112 static int
2113 rl_resume(device_t dev)
2114 {
2115 struct rl_softc *sc;
2116 struct ifnet *ifp;
2117 int pmc;
2118 uint16_t pmstat;
2119
2120 sc = device_get_softc(dev);
2121 ifp = sc->rl_ifp;
2122
2123 RL_LOCK(sc);
2124
2125 if ((ifp->if_capabilities & IFCAP_WOL) != 0 &&
2126 pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
2127 /* Disable PME and clear PME status. */
2128 pmstat = pci_read_config(sc->rl_dev,
2129 pmc + PCIR_POWER_STATUS, 2);
2130 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2131 pmstat &= ~PCIM_PSTAT_PMEENABLE;
2132 pci_write_config(sc->rl_dev,
2133 pmc + PCIR_POWER_STATUS, pmstat, 2);
2134 }
2135 /*
2136 * Clear WOL matching such that normal Rx filtering
2137 * wouldn't interfere with WOL patterns.
2138 */
2139 rl_clrwol(sc);
2140 }
2141
2142 /* reinitialize interface if necessary */
2143 if (ifp->if_flags & IFF_UP)
2144 rl_init_locked(sc);
2145
2146 sc->suspended = 0;
2147
2148 RL_UNLOCK(sc);
2149
2150 return (0);
2151 }
2152
2153 /*
2154 * Stop all chip I/O so that the kernel's probe routines don't
2155 * get confused by errant DMAs when rebooting.
2156 */
2157 static int
2158 rl_shutdown(device_t dev)
2159 {
2160 struct rl_softc *sc;
2161
2162 sc = device_get_softc(dev);
2163
2164 RL_LOCK(sc);
2165 rl_stop(sc);
2166 /*
2167 * Mark interface as down since otherwise we will panic if
2168 * interrupt comes in later on, which can happen in some
2169 * cases.
2170 */
2171 sc->rl_ifp->if_flags &= ~IFF_UP;
2172 rl_setwol(sc);
2173 RL_UNLOCK(sc);
2174
2175 return (0);
2176 }
2177
2178 static void
2179 rl_setwol(struct rl_softc *sc)
2180 {
2181 struct ifnet *ifp;
2182 int pmc;
2183 uint16_t pmstat;
2184 uint8_t v;
2185
2186 RL_LOCK_ASSERT(sc);
2187
2188 ifp = sc->rl_ifp;
2189 if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2190 return;
2191 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
2192 return;
2193
2194 /* Enable config register write. */
2195 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2196
2197 /* Enable PME. */
2198 v = CSR_READ_1(sc, sc->rl_cfg1);
2199 v &= ~RL_CFG1_PME;
2200 if ((ifp->if_capenable & IFCAP_WOL) != 0)
2201 v |= RL_CFG1_PME;
2202 CSR_WRITE_1(sc, sc->rl_cfg1, v);
2203
2204 v = CSR_READ_1(sc, sc->rl_cfg3);
2205 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2206 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2207 v |= RL_CFG3_WOL_MAGIC;
2208 CSR_WRITE_1(sc, sc->rl_cfg3, v);
2209
2210 v = CSR_READ_1(sc, sc->rl_cfg5);
2211 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2212 v &= ~RL_CFG5_WOL_LANWAKE;
2213 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2214 v |= RL_CFG5_WOL_UCAST;
2215 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2216 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
2217 if ((ifp->if_capenable & IFCAP_WOL) != 0)
2218 v |= RL_CFG5_WOL_LANWAKE;
2219 CSR_WRITE_1(sc, sc->rl_cfg5, v);
2220
2221 /* Config register write done. */
2222 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2223
2224 /* Request PME if WOL is requested. */
2225 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
2226 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2227 if ((ifp->if_capenable & IFCAP_WOL) != 0)
2228 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2229 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2230 }
2231
2232 static void
2233 rl_clrwol(struct rl_softc *sc)
2234 {
2235 struct ifnet *ifp;
2236 uint8_t v;
2237
2238 ifp = sc->rl_ifp;
2239 if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2240 return;
2241
2242 /* Enable config register write. */
2243 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2244
2245 v = CSR_READ_1(sc, sc->rl_cfg3);
2246 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2247 CSR_WRITE_1(sc, sc->rl_cfg3, v);
2248
2249 /* Config register write done. */
2250 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2251
2252 v = CSR_READ_1(sc, sc->rl_cfg5);
2253 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2254 v &= ~RL_CFG5_WOL_LANWAKE;
2255 CSR_WRITE_1(sc, sc->rl_cfg5, v);
2256 }
Cache object: c219b78d0b0253e96985a010bbd14aaa
|