FreeBSD/Linux Kernel Cross Reference
sys/dev/rl/if_rl.c
1 /*-
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 /*
37 * RealTek 8129/8139 PCI NIC driver
38 *
39 * Supports several extremely cheap PCI 10/100 adapters based on
40 * the RealTek chipset. Datasheets can be obtained from
41 * www.realtek.com.tw.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47 /*
48 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
49 * probably the worst PCI ethernet controller ever made, with the possible
50 * exception of the FEAST chip made by SMC. The 8139 supports bus-master
51 * DMA, but it has a terrible interface that nullifies any performance
52 * gains that bus-master DMA usually offers.
53 *
54 * For transmission, the chip offers a series of four TX descriptor
55 * registers. Each transmit frame must be in a contiguous buffer, aligned
56 * on a longword (32-bit) boundary. This means we almost always have to
57 * do mbuf copies in order to transmit a frame, except in the unlikely
58 * case where a) the packet fits into a single mbuf, and b) the packet
59 * is 32-bit aligned within the mbuf's data area. The presence of only
60 * four descriptor registers means that we can never have more than four
61 * packets queued for transmission at any one time.
62 *
63 * Reception is not much better. The driver has to allocate a single large
64 * buffer area (up to 64K in size) into which the chip will DMA received
65 * frames. Because we don't know where within this region received packets
66 * will begin or end, we have no choice but to copy data from the buffer
67 * area into mbufs in order to pass the packets up to the higher protocol
68 * levels.
69 *
70 * It's impossible given this rotten design to really achieve decent
71 * performance at 100Mbps, unless you happen to have a 400Mhz PII or
72 * some equally overmuscled CPU to drive it.
73 *
74 * On the bright side, the 8139 does have a built-in PHY, although
75 * rather than using an MDIO serial interface like most other NICs, the
76 * PHY registers are directly accessible through the 8139's register
77 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
78 * filter.
79 *
80 * The 8129 chip is an older version of the 8139 that uses an external PHY
81 * chip. The 8129 has a serial MDIO interface for accessing the MII where
82 * the 8139 lets you directly access the on-board PHY registers. We need
83 * to select which interface to use depending on the chip type.
84 */
85
86 #ifdef HAVE_KERNEL_OPTION_HEADERS
87 #include "opt_device_polling.h"
88 #endif
89
90 #include <sys/param.h>
91 #include <sys/endian.h>
92 #include <sys/systm.h>
93 #include <sys/sockio.h>
94 #include <sys/mbuf.h>
95 #include <sys/malloc.h>
96 #include <sys/kernel.h>
97 #include <sys/module.h>
98 #include <sys/socket.h>
99 #include <sys/sysctl.h>
100
101 #include <net/if.h>
102 #include <net/if_var.h>
103 #include <net/if_arp.h>
104 #include <net/ethernet.h>
105 #include <net/if_dl.h>
106 #include <net/if_media.h>
107 #include <net/if_types.h>
108
109 #include <net/bpf.h>
110
111 #include <machine/bus.h>
112 #include <machine/resource.h>
113 #include <sys/bus.h>
114 #include <sys/rman.h>
115
116 #include <dev/mii/mii.h>
117 #include <dev/mii/mii_bitbang.h>
118 #include <dev/mii/miivar.h>
119
120 #include <dev/pci/pcireg.h>
121 #include <dev/pci/pcivar.h>
122
123 MODULE_DEPEND(rl, pci, 1, 1, 1);
124 MODULE_DEPEND(rl, ether, 1, 1, 1);
125 MODULE_DEPEND(rl, miibus, 1, 1, 1);
126
127 /* "device miibus" required. See GENERIC if you get errors here. */
128 #include "miibus_if.h"
129
130 #include <dev/rl/if_rlreg.h>
131
132 /*
133 * Various supported device vendors/types and their names.
134 */
135 static const struct rl_type rl_devs[] = {
136 { RT_VENDORID, RT_DEVICEID_8129, RL_8129,
137 "RealTek 8129 10/100BaseTX" },
138 { RT_VENDORID, RT_DEVICEID_8139, RL_8139,
139 "RealTek 8139 10/100BaseTX" },
140 { RT_VENDORID, RT_DEVICEID_8139D, RL_8139,
141 "RealTek 8139 10/100BaseTX" },
142 { RT_VENDORID, RT_DEVICEID_8138, RL_8139,
143 "RealTek 8139 10/100BaseTX CardBus" },
144 { RT_VENDORID, RT_DEVICEID_8100, RL_8139,
145 "RealTek 8100 10/100BaseTX" },
146 { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
147 "Accton MPX 5030/5038 10/100BaseTX" },
148 { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
149 "Delta Electronics 8139 10/100BaseTX" },
150 { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
151 "Addtron Technology 8139 10/100BaseTX" },
152 { DLINK_VENDORID, DLINK_DEVICEID_520TX_REVC1, RL_8139,
153 "D-Link DFE-520TX (rev. C1) 10/100BaseTX" },
154 { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
155 "D-Link DFE-530TX+ 10/100BaseTX" },
156 { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
157 "D-Link DFE-690TXD 10/100BaseTX" },
158 { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
159 "Nortel Networks 10/100BaseTX" },
160 { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
161 "Corega FEther CB-TXD" },
162 { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
163 "Corega FEtherII CB-TXD" },
164 { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
165 "Peppercon AG ROL-F" },
166 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139,
167 "Planex FNW-3603-TX" },
168 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
169 "Planex FNW-3800-TX" },
170 { CP_VENDORID, RT_DEVICEID_8139, RL_8139,
171 "Compaq HNE-300" },
172 { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
173 "LevelOne FPC-0106TX" },
174 { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
175 "Edimax EP-4103DL CardBus" }
176 };
177
178 static int rl_attach(device_t);
179 static int rl_detach(device_t);
180 static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int);
181 static int rl_dma_alloc(struct rl_softc *);
182 static void rl_dma_free(struct rl_softc *);
183 static void rl_eeprom_putbyte(struct rl_softc *, int);
184 static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *);
185 static int rl_encap(struct rl_softc *, struct mbuf **);
186 static int rl_list_tx_init(struct rl_softc *);
187 static int rl_list_rx_init(struct rl_softc *);
188 static int rl_ifmedia_upd(struct ifnet *);
189 static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
190 static int rl_ioctl(struct ifnet *, u_long, caddr_t);
191 static void rl_intr(void *);
192 static void rl_init(void *);
193 static void rl_init_locked(struct rl_softc *sc);
194 static int rl_miibus_readreg(device_t, int, int);
195 static void rl_miibus_statchg(device_t);
196 static int rl_miibus_writereg(device_t, int, int, int);
197 #ifdef DEVICE_POLLING
198 static int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
199 static int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
200 #endif
201 static int rl_probe(device_t);
202 static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
203 static void rl_reset(struct rl_softc *);
204 static int rl_resume(device_t);
205 static int rl_rxeof(struct rl_softc *);
206 static void rl_rxfilter(struct rl_softc *);
207 static int rl_shutdown(device_t);
208 static void rl_start(struct ifnet *);
209 static void rl_start_locked(struct ifnet *);
210 static void rl_stop(struct rl_softc *);
211 static int rl_suspend(device_t);
212 static void rl_tick(void *);
213 static void rl_txeof(struct rl_softc *);
214 static void rl_watchdog(struct rl_softc *);
215 static void rl_setwol(struct rl_softc *);
216 static void rl_clrwol(struct rl_softc *);
217
218 /*
219 * MII bit-bang glue
220 */
221 static uint32_t rl_mii_bitbang_read(device_t);
222 static void rl_mii_bitbang_write(device_t, uint32_t);
223
224 static const struct mii_bitbang_ops rl_mii_bitbang_ops = {
225 rl_mii_bitbang_read,
226 rl_mii_bitbang_write,
227 {
228 RL_MII_DATAOUT, /* MII_BIT_MDO */
229 RL_MII_DATAIN, /* MII_BIT_MDI */
230 RL_MII_CLK, /* MII_BIT_MDC */
231 RL_MII_DIR, /* MII_BIT_DIR_HOST_PHY */
232 0, /* MII_BIT_DIR_PHY_HOST */
233 }
234 };
235
236 static device_method_t rl_methods[] = {
237 /* Device interface */
238 DEVMETHOD(device_probe, rl_probe),
239 DEVMETHOD(device_attach, rl_attach),
240 DEVMETHOD(device_detach, rl_detach),
241 DEVMETHOD(device_suspend, rl_suspend),
242 DEVMETHOD(device_resume, rl_resume),
243 DEVMETHOD(device_shutdown, rl_shutdown),
244
245 /* MII interface */
246 DEVMETHOD(miibus_readreg, rl_miibus_readreg),
247 DEVMETHOD(miibus_writereg, rl_miibus_writereg),
248 DEVMETHOD(miibus_statchg, rl_miibus_statchg),
249
250 DEVMETHOD_END
251 };
252
253 static driver_t rl_driver = {
254 "rl",
255 rl_methods,
256 sizeof(struct rl_softc)
257 };
258
259 static devclass_t rl_devclass;
260
261 DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0);
262 MODULE_PNP_INFO("U16:vendor;U16:device", pci, rl, rl_devs,
263 nitems(rl_devs) - 1);
264 DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0);
265 DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0);
266
267 #define EE_SET(x) \
268 CSR_WRITE_1(sc, RL_EECMD, \
269 CSR_READ_1(sc, RL_EECMD) | x)
270
271 #define EE_CLR(x) \
272 CSR_WRITE_1(sc, RL_EECMD, \
273 CSR_READ_1(sc, RL_EECMD) & ~x)
274
275 /*
276 * Send a read command and address to the EEPROM, check for ACK.
277 */
278 static void
279 rl_eeprom_putbyte(struct rl_softc *sc, int addr)
280 {
281 int d, i;
282
283 d = addr | sc->rl_eecmd_read;
284
285 /*
286 * Feed in each bit and strobe the clock.
287 */
288 for (i = 0x400; i; i >>= 1) {
289 if (d & i) {
290 EE_SET(RL_EE_DATAIN);
291 } else {
292 EE_CLR(RL_EE_DATAIN);
293 }
294 DELAY(100);
295 EE_SET(RL_EE_CLK);
296 DELAY(150);
297 EE_CLR(RL_EE_CLK);
298 DELAY(100);
299 }
300 }
301
302 /*
303 * Read a word of data stored in the EEPROM at address 'addr.'
304 */
305 static void
306 rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest)
307 {
308 int i;
309 uint16_t word = 0;
310
311 /* Enter EEPROM access mode. */
312 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
313
314 /*
315 * Send address of word we want to read.
316 */
317 rl_eeprom_putbyte(sc, addr);
318
319 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
320
321 /*
322 * Start reading bits from EEPROM.
323 */
324 for (i = 0x8000; i; i >>= 1) {
325 EE_SET(RL_EE_CLK);
326 DELAY(100);
327 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
328 word |= i;
329 EE_CLR(RL_EE_CLK);
330 DELAY(100);
331 }
332
333 /* Turn off EEPROM access mode. */
334 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
335
336 *dest = word;
337 }
338
339 /*
340 * Read a sequence of words from the EEPROM.
341 */
342 static void
343 rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap)
344 {
345 int i;
346 uint16_t word = 0, *ptr;
347
348 for (i = 0; i < cnt; i++) {
349 rl_eeprom_getword(sc, off + i, &word);
350 ptr = (uint16_t *)(dest + (i * 2));
351 if (swap)
352 *ptr = ntohs(word);
353 else
354 *ptr = word;
355 }
356 }
357
358 /*
359 * Read the MII serial port for the MII bit-bang module.
360 */
361 static uint32_t
362 rl_mii_bitbang_read(device_t dev)
363 {
364 struct rl_softc *sc;
365 uint32_t val;
366
367 sc = device_get_softc(dev);
368
369 val = CSR_READ_1(sc, RL_MII);
370 CSR_BARRIER(sc, RL_MII, 1,
371 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
372
373 return (val);
374 }
375
376 /*
377 * Write the MII serial port for the MII bit-bang module.
378 */
379 static void
380 rl_mii_bitbang_write(device_t dev, uint32_t val)
381 {
382 struct rl_softc *sc;
383
384 sc = device_get_softc(dev);
385
386 CSR_WRITE_1(sc, RL_MII, val);
387 CSR_BARRIER(sc, RL_MII, 1,
388 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
389 }
390
391 static int
392 rl_miibus_readreg(device_t dev, int phy, int reg)
393 {
394 struct rl_softc *sc;
395 uint16_t rl8139_reg;
396
397 sc = device_get_softc(dev);
398
399 if (sc->rl_type == RL_8139) {
400 switch (reg) {
401 case MII_BMCR:
402 rl8139_reg = RL_BMCR;
403 break;
404 case MII_BMSR:
405 rl8139_reg = RL_BMSR;
406 break;
407 case MII_ANAR:
408 rl8139_reg = RL_ANAR;
409 break;
410 case MII_ANER:
411 rl8139_reg = RL_ANER;
412 break;
413 case MII_ANLPAR:
414 rl8139_reg = RL_LPAR;
415 break;
416 case MII_PHYIDR1:
417 case MII_PHYIDR2:
418 return (0);
419 /*
420 * Allow the rlphy driver to read the media status
421 * register. If we have a link partner which does not
422 * support NWAY, this is the register which will tell
423 * us the results of parallel detection.
424 */
425 case RL_MEDIASTAT:
426 return (CSR_READ_1(sc, RL_MEDIASTAT));
427 default:
428 device_printf(sc->rl_dev, "bad phy register\n");
429 return (0);
430 }
431 return (CSR_READ_2(sc, rl8139_reg));
432 }
433
434 return (mii_bitbang_readreg(dev, &rl_mii_bitbang_ops, phy, reg));
435 }
436
437 static int
438 rl_miibus_writereg(device_t dev, int phy, int reg, int data)
439 {
440 struct rl_softc *sc;
441 uint16_t rl8139_reg;
442
443 sc = device_get_softc(dev);
444
445 if (sc->rl_type == RL_8139) {
446 switch (reg) {
447 case MII_BMCR:
448 rl8139_reg = RL_BMCR;
449 break;
450 case MII_BMSR:
451 rl8139_reg = RL_BMSR;
452 break;
453 case MII_ANAR:
454 rl8139_reg = RL_ANAR;
455 break;
456 case MII_ANER:
457 rl8139_reg = RL_ANER;
458 break;
459 case MII_ANLPAR:
460 rl8139_reg = RL_LPAR;
461 break;
462 case MII_PHYIDR1:
463 case MII_PHYIDR2:
464 return (0);
465 break;
466 default:
467 device_printf(sc->rl_dev, "bad phy register\n");
468 return (0);
469 }
470 CSR_WRITE_2(sc, rl8139_reg, data);
471 return (0);
472 }
473
474 mii_bitbang_writereg(dev, &rl_mii_bitbang_ops, phy, reg, data);
475
476 return (0);
477 }
478
479 static void
480 rl_miibus_statchg(device_t dev)
481 {
482 struct rl_softc *sc;
483 struct ifnet *ifp;
484 struct mii_data *mii;
485
486 sc = device_get_softc(dev);
487 mii = device_get_softc(sc->rl_miibus);
488 ifp = sc->rl_ifp;
489 if (mii == NULL || ifp == NULL ||
490 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
491 return;
492
493 sc->rl_flags &= ~RL_FLAG_LINK;
494 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
495 (IFM_ACTIVE | IFM_AVALID)) {
496 switch (IFM_SUBTYPE(mii->mii_media_active)) {
497 case IFM_10_T:
498 case IFM_100_TX:
499 sc->rl_flags |= RL_FLAG_LINK;
500 break;
501 default:
502 break;
503 }
504 }
505 /*
506 * RealTek controllers do not provide any interface to
507 * Tx/Rx MACs for resolved speed, duplex and flow-control
508 * parameters.
509 */
510 }
511
512 /*
513 * Program the 64-bit multicast hash filter.
514 */
515 static void
516 rl_rxfilter(struct rl_softc *sc)
517 {
518 struct ifnet *ifp = sc->rl_ifp;
519 int h = 0;
520 uint32_t hashes[2] = { 0, 0 };
521 struct ifmultiaddr *ifma;
522 uint32_t rxfilt;
523
524 RL_LOCK_ASSERT(sc);
525
526 rxfilt = CSR_READ_4(sc, RL_RXCFG);
527 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
528 RL_RXCFG_RX_MULTI);
529 /* Always accept frames destined for this host. */
530 rxfilt |= RL_RXCFG_RX_INDIV;
531 /* Set capture broadcast bit to capture broadcast frames. */
532 if (ifp->if_flags & IFF_BROADCAST)
533 rxfilt |= RL_RXCFG_RX_BROAD;
534 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
535 rxfilt |= RL_RXCFG_RX_MULTI;
536 if (ifp->if_flags & IFF_PROMISC)
537 rxfilt |= RL_RXCFG_RX_ALLPHYS;
538 hashes[0] = 0xFFFFFFFF;
539 hashes[1] = 0xFFFFFFFF;
540 } else {
541 /* Now program new ones. */
542 if_maddr_rlock(ifp);
543 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
544 if (ifma->ifma_addr->sa_family != AF_LINK)
545 continue;
546 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
547 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
548 if (h < 32)
549 hashes[0] |= (1 << h);
550 else
551 hashes[1] |= (1 << (h - 32));
552 }
553 if_maddr_runlock(ifp);
554 if (hashes[0] != 0 || hashes[1] != 0)
555 rxfilt |= RL_RXCFG_RX_MULTI;
556 }
557
558 CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
559 CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
560 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
561 }
562
563 static void
564 rl_reset(struct rl_softc *sc)
565 {
566 int i;
567
568 RL_LOCK_ASSERT(sc);
569
570 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
571
572 for (i = 0; i < RL_TIMEOUT; i++) {
573 DELAY(10);
574 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
575 break;
576 }
577 if (i == RL_TIMEOUT)
578 device_printf(sc->rl_dev, "reset never completed!\n");
579 }
580
581 /*
582 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
583 * IDs against our list and return a device name if we find a match.
584 */
585 static int
586 rl_probe(device_t dev)
587 {
588 const struct rl_type *t;
589 uint16_t devid, revid, vendor;
590 int i;
591
592 vendor = pci_get_vendor(dev);
593 devid = pci_get_device(dev);
594 revid = pci_get_revid(dev);
595
596 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
597 if (revid == 0x20) {
598 /* 8139C+, let re(4) take care of this device. */
599 return (ENXIO);
600 }
601 }
602 t = rl_devs;
603 for (i = 0; i < nitems(rl_devs); i++, t++) {
604 if (vendor == t->rl_vid && devid == t->rl_did) {
605 device_set_desc(dev, t->rl_name);
606 return (BUS_PROBE_DEFAULT);
607 }
608 }
609
610 return (ENXIO);
611 }
612
613 struct rl_dmamap_arg {
614 bus_addr_t rl_busaddr;
615 };
616
617 static void
618 rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
619 {
620 struct rl_dmamap_arg *ctx;
621
622 if (error != 0)
623 return;
624
625 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
626
627 ctx = (struct rl_dmamap_arg *)arg;
628 ctx->rl_busaddr = segs[0].ds_addr;
629 }
630
631 /*
632 * Attach the interface. Allocate softc structures, do ifmedia
633 * setup and ethernet/BPF attach.
634 */
635 static int
636 rl_attach(device_t dev)
637 {
638 uint8_t eaddr[ETHER_ADDR_LEN];
639 uint16_t as[3];
640 struct ifnet *ifp;
641 struct rl_softc *sc;
642 const struct rl_type *t;
643 struct sysctl_ctx_list *ctx;
644 struct sysctl_oid_list *children;
645 int error = 0, hwrev, i, phy, pmc, rid;
646 int prefer_iomap, unit;
647 uint16_t rl_did = 0;
648 char tn[32];
649
650 sc = device_get_softc(dev);
651 unit = device_get_unit(dev);
652 sc->rl_dev = dev;
653
654 sc->rl_twister_enable = 0;
655 snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit);
656 TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable);
657 ctx = device_get_sysctl_ctx(sc->rl_dev);
658 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
659 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD,
660 &sc->rl_twister_enable, 0, "");
661
662 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
663 MTX_DEF);
664 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
665
666 pci_enable_busmaster(dev);
667
668
669 /*
670 * Map control/status registers.
671 * Default to using PIO access for this driver. On SMP systems,
672 * there appear to be problems with memory mapped mode: it looks
673 * like doing too many memory mapped access back to back in rapid
674 * succession can hang the bus. I'm inclined to blame this on
675 * crummy design/construction on the part of RealTek. Memory
676 * mapped mode does appear to work on uniprocessor systems though.
677 */
678 prefer_iomap = 1;
679 snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit);
680 TUNABLE_INT_FETCH(tn, &prefer_iomap);
681 if (prefer_iomap) {
682 sc->rl_res_id = PCIR_BAR(0);
683 sc->rl_res_type = SYS_RES_IOPORT;
684 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
685 &sc->rl_res_id, RF_ACTIVE);
686 }
687 if (prefer_iomap == 0 || sc->rl_res == NULL) {
688 sc->rl_res_id = PCIR_BAR(1);
689 sc->rl_res_type = SYS_RES_MEMORY;
690 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
691 &sc->rl_res_id, RF_ACTIVE);
692 }
693 if (sc->rl_res == NULL) {
694 device_printf(dev, "couldn't map ports/memory\n");
695 error = ENXIO;
696 goto fail;
697 }
698
699 #ifdef notdef
700 /*
701 * Detect the Realtek 8139B. For some reason, this chip is very
702 * unstable when left to autoselect the media
703 * The best workaround is to set the device to the required
704 * media type or to set it to the 10 Meg speed.
705 */
706 if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF)
707 device_printf(dev,
708 "Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n");
709 #endif
710
711 sc->rl_btag = rman_get_bustag(sc->rl_res);
712 sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
713
714 /* Allocate interrupt */
715 rid = 0;
716 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
717 RF_SHAREABLE | RF_ACTIVE);
718
719 if (sc->rl_irq[0] == NULL) {
720 device_printf(dev, "couldn't map interrupt\n");
721 error = ENXIO;
722 goto fail;
723 }
724
725 sc->rl_cfg0 = RL_8139_CFG0;
726 sc->rl_cfg1 = RL_8139_CFG1;
727 sc->rl_cfg2 = 0;
728 sc->rl_cfg3 = RL_8139_CFG3;
729 sc->rl_cfg4 = RL_8139_CFG4;
730 sc->rl_cfg5 = RL_8139_CFG5;
731
732 /*
733 * Reset the adapter. Only take the lock here as it's needed in
734 * order to call rl_reset().
735 */
736 RL_LOCK(sc);
737 rl_reset(sc);
738 RL_UNLOCK(sc);
739
740 sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
741 rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0);
742 if (rl_did != 0x8129)
743 sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
744
745 /*
746 * Get station address from the EEPROM.
747 */
748 rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0);
749 for (i = 0; i < 3; i++) {
750 eaddr[(i * 2) + 0] = as[i] & 0xff;
751 eaddr[(i * 2) + 1] = as[i] >> 8;
752 }
753
754 /*
755 * Now read the exact device type from the EEPROM to find
756 * out if it's an 8129 or 8139.
757 */
758 rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0);
759
760 t = rl_devs;
761 sc->rl_type = 0;
762 while(t->rl_name != NULL) {
763 if (rl_did == t->rl_did) {
764 sc->rl_type = t->rl_basetype;
765 break;
766 }
767 t++;
768 }
769
770 if (sc->rl_type == 0) {
771 device_printf(dev, "unknown device ID: %x assuming 8139\n",
772 rl_did);
773 sc->rl_type = RL_8139;
774 /*
775 * Read RL_IDR register to get ethernet address as accessing
776 * EEPROM may not extract correct address.
777 */
778 for (i = 0; i < ETHER_ADDR_LEN; i++)
779 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
780 }
781
782 if ((error = rl_dma_alloc(sc)) != 0)
783 goto fail;
784
785 ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
786 if (ifp == NULL) {
787 device_printf(dev, "can not if_alloc()\n");
788 error = ENOSPC;
789 goto fail;
790 }
791
792 #define RL_PHYAD_INTERNAL 0
793
794 /* Do MII setup */
795 phy = MII_PHY_ANY;
796 if (sc->rl_type == RL_8139)
797 phy = RL_PHYAD_INTERNAL;
798 error = mii_attach(dev, &sc->rl_miibus, ifp, rl_ifmedia_upd,
799 rl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
800 if (error != 0) {
801 device_printf(dev, "attaching PHYs failed\n");
802 goto fail;
803 }
804
805 ifp->if_softc = sc;
806 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
807 ifp->if_mtu = ETHERMTU;
808 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
809 ifp->if_ioctl = rl_ioctl;
810 ifp->if_start = rl_start;
811 ifp->if_init = rl_init;
812 ifp->if_capabilities = IFCAP_VLAN_MTU;
813 /* Check WOL for RTL8139B or newer controllers. */
814 if (sc->rl_type == RL_8139 &&
815 pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
816 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
817 switch (hwrev) {
818 case RL_HWREV_8139B:
819 case RL_HWREV_8130:
820 case RL_HWREV_8139C:
821 case RL_HWREV_8139D:
822 case RL_HWREV_8101:
823 case RL_HWREV_8100:
824 ifp->if_capabilities |= IFCAP_WOL;
825 /* Disable WOL. */
826 rl_clrwol(sc);
827 break;
828 default:
829 break;
830 }
831 }
832 ifp->if_capenable = ifp->if_capabilities;
833 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST);
834 #ifdef DEVICE_POLLING
835 ifp->if_capabilities |= IFCAP_POLLING;
836 #endif
837 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
838 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
839 IFQ_SET_READY(&ifp->if_snd);
840
841 /*
842 * Call MI attach routine.
843 */
844 ether_ifattach(ifp, eaddr);
845
846 /* Hook interrupt last to avoid having to lock softc */
847 error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
848 NULL, rl_intr, sc, &sc->rl_intrhand[0]);
849 if (error) {
850 device_printf(sc->rl_dev, "couldn't set up irq\n");
851 ether_ifdetach(ifp);
852 }
853
854 fail:
855 if (error)
856 rl_detach(dev);
857
858 return (error);
859 }
860
861 /*
862 * Shutdown hardware and free up resources. This can be called any
863 * time after the mutex has been initialized. It is called in both
864 * the error case in attach and the normal detach case so it needs
865 * to be careful about only freeing resources that have actually been
866 * allocated.
867 */
868 static int
869 rl_detach(device_t dev)
870 {
871 struct rl_softc *sc;
872 struct ifnet *ifp;
873
874 sc = device_get_softc(dev);
875 ifp = sc->rl_ifp;
876
877 KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
878
879 #ifdef DEVICE_POLLING
880 if (ifp->if_capenable & IFCAP_POLLING)
881 ether_poll_deregister(ifp);
882 #endif
883 /* These should only be active if attach succeeded */
884 if (device_is_attached(dev)) {
885 RL_LOCK(sc);
886 rl_stop(sc);
887 RL_UNLOCK(sc);
888 callout_drain(&sc->rl_stat_callout);
889 ether_ifdetach(ifp);
890 }
891 #if 0
892 sc->suspended = 1;
893 #endif
894 if (sc->rl_miibus)
895 device_delete_child(dev, sc->rl_miibus);
896 bus_generic_detach(dev);
897
898 if (sc->rl_intrhand[0])
899 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
900 if (sc->rl_irq[0])
901 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]);
902 if (sc->rl_res)
903 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
904 sc->rl_res);
905
906 if (ifp)
907 if_free(ifp);
908
909 rl_dma_free(sc);
910
911 mtx_destroy(&sc->rl_mtx);
912
913 return (0);
914 }
915
916 static int
917 rl_dma_alloc(struct rl_softc *sc)
918 {
919 struct rl_dmamap_arg ctx;
920 int error, i;
921
922 /*
923 * Allocate the parent bus DMA tag appropriate for PCI.
924 */
925 error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev), /* parent */
926 1, 0, /* alignment, boundary */
927 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
928 BUS_SPACE_MAXADDR, /* highaddr */
929 NULL, NULL, /* filter, filterarg */
930 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
931 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
932 0, /* flags */
933 NULL, NULL, /* lockfunc, lockarg */
934 &sc->rl_parent_tag);
935 if (error) {
936 device_printf(sc->rl_dev,
937 "failed to create parent DMA tag.\n");
938 goto fail;
939 }
940 /* Create DMA tag for Rx memory block. */
941 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */
942 RL_RX_8139_BUF_ALIGN, 0, /* alignment, boundary */
943 BUS_SPACE_MAXADDR, /* lowaddr */
944 BUS_SPACE_MAXADDR, /* highaddr */
945 NULL, NULL, /* filter, filterarg */
946 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1, /* maxsize,nsegments */
947 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, /* maxsegsize */
948 0, /* flags */
949 NULL, NULL, /* lockfunc, lockarg */
950 &sc->rl_cdata.rl_rx_tag);
951 if (error) {
952 device_printf(sc->rl_dev,
953 "failed to create Rx memory block DMA tag.\n");
954 goto fail;
955 }
956 /* Create DMA tag for Tx buffer. */
957 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */
958 RL_TX_8139_BUF_ALIGN, 0, /* alignment, boundary */
959 BUS_SPACE_MAXADDR, /* lowaddr */
960 BUS_SPACE_MAXADDR, /* highaddr */
961 NULL, NULL, /* filter, filterarg */
962 MCLBYTES, 1, /* maxsize, nsegments */
963 MCLBYTES, /* maxsegsize */
964 0, /* flags */
965 NULL, NULL, /* lockfunc, lockarg */
966 &sc->rl_cdata.rl_tx_tag);
967 if (error) {
968 device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n");
969 goto fail;
970 }
971
972 /*
973 * Allocate DMA'able memory and load DMA map for Rx memory block.
974 */
975 error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag,
976 (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK |
977 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap);
978 if (error != 0) {
979 device_printf(sc->rl_dev,
980 "failed to allocate Rx DMA memory block.\n");
981 goto fail;
982 }
983 ctx.rl_busaddr = 0;
984 error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag,
985 sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf,
986 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx,
987 BUS_DMA_NOWAIT);
988 if (error != 0 || ctx.rl_busaddr == 0) {
989 device_printf(sc->rl_dev,
990 "could not load Rx DMA memory block.\n");
991 goto fail;
992 }
993 sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr;
994
995 /* Create DMA maps for Tx buffers. */
996 for (i = 0; i < RL_TX_LIST_CNT; i++) {
997 sc->rl_cdata.rl_tx_chain[i] = NULL;
998 sc->rl_cdata.rl_tx_dmamap[i] = NULL;
999 error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0,
1000 &sc->rl_cdata.rl_tx_dmamap[i]);
1001 if (error != 0) {
1002 device_printf(sc->rl_dev,
1003 "could not create Tx dmamap.\n");
1004 goto fail;
1005 }
1006 }
1007
1008 /* Leave a few bytes before the start of the RX ring buffer. */
1009 sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
1010 sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE;
1011
1012 fail:
1013 return (error);
1014 }
1015
1016 static void
1017 rl_dma_free(struct rl_softc *sc)
1018 {
1019 int i;
1020
1021 /* Rx memory block. */
1022 if (sc->rl_cdata.rl_rx_tag != NULL) {
1023 if (sc->rl_cdata.rl_rx_buf_paddr != 0)
1024 bus_dmamap_unload(sc->rl_cdata.rl_rx_tag,
1025 sc->rl_cdata.rl_rx_dmamap);
1026 if (sc->rl_cdata.rl_rx_buf_ptr != NULL)
1027 bus_dmamem_free(sc->rl_cdata.rl_rx_tag,
1028 sc->rl_cdata.rl_rx_buf_ptr,
1029 sc->rl_cdata.rl_rx_dmamap);
1030 sc->rl_cdata.rl_rx_buf_ptr = NULL;
1031 sc->rl_cdata.rl_rx_buf = NULL;
1032 sc->rl_cdata.rl_rx_buf_paddr = 0;
1033 bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag);
1034 sc->rl_cdata.rl_tx_tag = NULL;
1035 }
1036
1037 /* Tx buffers. */
1038 if (sc->rl_cdata.rl_tx_tag != NULL) {
1039 for (i = 0; i < RL_TX_LIST_CNT; i++) {
1040 if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) {
1041 bus_dmamap_destroy(
1042 sc->rl_cdata.rl_tx_tag,
1043 sc->rl_cdata.rl_tx_dmamap[i]);
1044 sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1045 }
1046 }
1047 bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag);
1048 sc->rl_cdata.rl_tx_tag = NULL;
1049 }
1050
1051 if (sc->rl_parent_tag != NULL) {
1052 bus_dma_tag_destroy(sc->rl_parent_tag);
1053 sc->rl_parent_tag = NULL;
1054 }
1055 }
1056
1057 /*
1058 * Initialize the transmit descriptors.
1059 */
1060 static int
1061 rl_list_tx_init(struct rl_softc *sc)
1062 {
1063 struct rl_chain_data *cd;
1064 int i;
1065
1066 RL_LOCK_ASSERT(sc);
1067
1068 cd = &sc->rl_cdata;
1069 for (i = 0; i < RL_TX_LIST_CNT; i++) {
1070 cd->rl_tx_chain[i] = NULL;
1071 CSR_WRITE_4(sc,
1072 RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000);
1073 }
1074
1075 sc->rl_cdata.cur_tx = 0;
1076 sc->rl_cdata.last_tx = 0;
1077
1078 return (0);
1079 }
1080
1081 static int
1082 rl_list_rx_init(struct rl_softc *sc)
1083 {
1084
1085 RL_LOCK_ASSERT(sc);
1086
1087 bzero(sc->rl_cdata.rl_rx_buf_ptr,
1088 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ);
1089 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap,
1090 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1091
1092 return (0);
1093 }
1094
1095 /*
1096 * A frame has been uploaded: pass the resulting mbuf chain up to
1097 * the higher level protocols.
1098 *
1099 * You know there's something wrong with a PCI bus-master chip design
1100 * when you have to use m_devget().
1101 *
1102 * The receive operation is badly documented in the datasheet, so I'll
1103 * attempt to document it here. The driver provides a buffer area and
1104 * places its base address in the RX buffer start address register.
1105 * The chip then begins copying frames into the RX buffer. Each frame
1106 * is preceded by a 32-bit RX status word which specifies the length
1107 * of the frame and certain other status bits. Each frame (starting with
1108 * the status word) is also 32-bit aligned. The frame length is in the
1109 * first 16 bits of the status word; the lower 15 bits correspond with
1110 * the 'rx status register' mentioned in the datasheet.
1111 *
1112 * Note: to make the Alpha happy, the frame payload needs to be aligned
1113 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
1114 * as the offset argument to m_devget().
1115 */
1116 static int
1117 rl_rxeof(struct rl_softc *sc)
1118 {
1119 struct mbuf *m;
1120 struct ifnet *ifp = sc->rl_ifp;
1121 uint8_t *rxbufpos;
1122 int total_len = 0;
1123 int wrap = 0;
1124 int rx_npkts = 0;
1125 uint32_t rxstat;
1126 uint16_t cur_rx;
1127 uint16_t limit;
1128 uint16_t max_bytes, rx_bytes = 0;
1129
1130 RL_LOCK_ASSERT(sc);
1131
1132 bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap,
1133 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1134
1135 cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
1136
1137 /* Do not try to read past this point. */
1138 limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
1139
1140 if (limit < cur_rx)
1141 max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
1142 else
1143 max_bytes = limit - cur_rx;
1144
1145 while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
1146 #ifdef DEVICE_POLLING
1147 if (ifp->if_capenable & IFCAP_POLLING) {
1148 if (sc->rxcycles <= 0)
1149 break;
1150 sc->rxcycles--;
1151 }
1152 #endif
1153 rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
1154 rxstat = le32toh(*(uint32_t *)rxbufpos);
1155
1156 /*
1157 * Here's a totally undocumented fact for you. When the
1158 * RealTek chip is in the process of copying a packet into
1159 * RAM for you, the length will be 0xfff0. If you spot a
1160 * packet header with this value, you need to stop. The
1161 * datasheet makes absolutely no mention of this and
1162 * RealTek should be shot for this.
1163 */
1164 total_len = rxstat >> 16;
1165 if (total_len == RL_RXSTAT_UNFINISHED)
1166 break;
1167
1168 if (!(rxstat & RL_RXSTAT_RXOK) ||
1169 total_len < ETHER_MIN_LEN ||
1170 total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
1171 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1172 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1173 rl_init_locked(sc);
1174 return (rx_npkts);
1175 }
1176
1177 /* No errors; receive the packet. */
1178 rx_bytes += total_len + 4;
1179
1180 /*
1181 * XXX The RealTek chip includes the CRC with every
1182 * received frame, and there's no way to turn this
1183 * behavior off (at least, I can't find anything in
1184 * the manual that explains how to do it) so we have
1185 * to trim off the CRC manually.
1186 */
1187 total_len -= ETHER_CRC_LEN;
1188
1189 /*
1190 * Avoid trying to read more bytes than we know
1191 * the chip has prepared for us.
1192 */
1193 if (rx_bytes > max_bytes)
1194 break;
1195
1196 rxbufpos = sc->rl_cdata.rl_rx_buf +
1197 ((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN);
1198 if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
1199 rxbufpos = sc->rl_cdata.rl_rx_buf;
1200
1201 wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
1202 if (total_len > wrap) {
1203 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1204 NULL);
1205 if (m != NULL)
1206 m_copyback(m, wrap, total_len - wrap,
1207 sc->rl_cdata.rl_rx_buf);
1208 cur_rx = (total_len - wrap + ETHER_CRC_LEN);
1209 } else {
1210 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1211 NULL);
1212 cur_rx += total_len + 4 + ETHER_CRC_LEN;
1213 }
1214
1215 /* Round up to 32-bit boundary. */
1216 cur_rx = (cur_rx + 3) & ~3;
1217 CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
1218
1219 if (m == NULL) {
1220 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1221 continue;
1222 }
1223
1224 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1225 RL_UNLOCK(sc);
1226 (*ifp->if_input)(ifp, m);
1227 RL_LOCK(sc);
1228 rx_npkts++;
1229 }
1230
1231 /* No need to sync Rx memory block as we didn't modify it. */
1232 return (rx_npkts);
1233 }
1234
1235 /*
1236 * A frame was downloaded to the chip. It's safe for us to clean up
1237 * the list buffers.
1238 */
1239 static void
1240 rl_txeof(struct rl_softc *sc)
1241 {
1242 struct ifnet *ifp = sc->rl_ifp;
1243 uint32_t txstat;
1244
1245 RL_LOCK_ASSERT(sc);
1246
1247 /*
1248 * Go through our tx list and free mbufs for those
1249 * frames that have been uploaded.
1250 */
1251 do {
1252 if (RL_LAST_TXMBUF(sc) == NULL)
1253 break;
1254 txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
1255 if (!(txstat & (RL_TXSTAT_TX_OK|
1256 RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
1257 break;
1258
1259 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & RL_TXSTAT_COLLCNT) >> 24);
1260
1261 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc),
1262 BUS_DMASYNC_POSTWRITE);
1263 bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc));
1264 m_freem(RL_LAST_TXMBUF(sc));
1265 RL_LAST_TXMBUF(sc) = NULL;
1266 /*
1267 * If there was a transmit underrun, bump the TX threshold.
1268 * Make sure not to overflow the 63 * 32byte we can address
1269 * with the 6 available bit.
1270 */
1271 if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
1272 (sc->rl_txthresh < 2016))
1273 sc->rl_txthresh += 32;
1274 if (txstat & RL_TXSTAT_TX_OK)
1275 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1276 else {
1277 int oldthresh;
1278 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1279 if ((txstat & RL_TXSTAT_TXABRT) ||
1280 (txstat & RL_TXSTAT_OUTOFWIN))
1281 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1282 oldthresh = sc->rl_txthresh;
1283 /* error recovery */
1284 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1285 rl_init_locked(sc);
1286 /* restore original threshold */
1287 sc->rl_txthresh = oldthresh;
1288 return;
1289 }
1290 RL_INC(sc->rl_cdata.last_tx);
1291 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1292 } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
1293
1294 if (RL_LAST_TXMBUF(sc) == NULL)
1295 sc->rl_watchdog_timer = 0;
1296 }
1297
1298 static void
1299 rl_twister_update(struct rl_softc *sc)
1300 {
1301 uint16_t linktest;
1302 /*
1303 * Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for
1304 * Linux driver. Values undocumented otherwise.
1305 */
1306 static const uint32_t param[4][4] = {
1307 {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
1308 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1309 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1310 {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
1311 };
1312
1313 /*
1314 * Tune the so-called twister registers of the RTL8139. These
1315 * are used to compensate for impedance mismatches. The
1316 * method for tuning these registers is undocumented and the
1317 * following procedure is collected from public sources.
1318 */
1319 switch (sc->rl_twister)
1320 {
1321 case CHK_LINK:
1322 /*
1323 * If we have a sufficient link, then we can proceed in
1324 * the state machine to the next stage. If not, then
1325 * disable further tuning after writing sane defaults.
1326 */
1327 if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) {
1328 CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD);
1329 sc->rl_twister = FIND_ROW;
1330 } else {
1331 CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD);
1332 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1333 CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1334 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1335 sc->rl_twister = DONE;
1336 }
1337 break;
1338 case FIND_ROW:
1339 /*
1340 * Read how long it took to see the echo to find the tuning
1341 * row to use.
1342 */
1343 linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1344 if (linktest == RL_CSCFG_ROW3)
1345 sc->rl_twist_row = 3;
1346 else if (linktest == RL_CSCFG_ROW2)
1347 sc->rl_twist_row = 2;
1348 else if (linktest == RL_CSCFG_ROW1)
1349 sc->rl_twist_row = 1;
1350 else
1351 sc->rl_twist_row = 0;
1352 sc->rl_twist_col = 0;
1353 sc->rl_twister = SET_PARAM;
1354 break;
1355 case SET_PARAM:
1356 if (sc->rl_twist_col == 0)
1357 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1358 CSR_WRITE_4(sc, RL_PARA7C,
1359 param[sc->rl_twist_row][sc->rl_twist_col]);
1360 if (++sc->rl_twist_col == 4) {
1361 if (sc->rl_twist_row == 3)
1362 sc->rl_twister = RECHK_LONG;
1363 else
1364 sc->rl_twister = DONE;
1365 }
1366 break;
1367 case RECHK_LONG:
1368 /*
1369 * For long cables, we have to double check to make sure we
1370 * don't mistune.
1371 */
1372 linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1373 if (linktest == RL_CSCFG_ROW3)
1374 sc->rl_twister = DONE;
1375 else {
1376 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE);
1377 sc->rl_twister = RETUNE;
1378 }
1379 break;
1380 case RETUNE:
1381 /* Retune for a shorter cable (try column 2) */
1382 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1383 CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1384 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1385 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1386 sc->rl_twist_row--;
1387 sc->rl_twist_col = 0;
1388 sc->rl_twister = SET_PARAM;
1389 break;
1390
1391 case DONE:
1392 break;
1393 }
1394
1395 }
1396
1397 static void
1398 rl_tick(void *xsc)
1399 {
1400 struct rl_softc *sc = xsc;
1401 struct mii_data *mii;
1402 int ticks;
1403
1404 RL_LOCK_ASSERT(sc);
1405 /*
1406 * If we're doing the twister cable calibration, then we need to defer
1407 * watchdog timeouts. This is a no-op in normal operations, but
1408 * can falsely trigger when the cable calibration takes a while and
1409 * there was traffic ready to go when rl was started.
1410 *
1411 * We don't defer mii_tick since that updates the mii status, which
1412 * helps the twister process, at least according to similar patches
1413 * for the Linux driver I found online while doing the fixes. Worst
1414 * case is a few extra mii reads during calibration.
1415 */
1416 mii = device_get_softc(sc->rl_miibus);
1417 mii_tick(mii);
1418 if ((sc->rl_flags & RL_FLAG_LINK) == 0)
1419 rl_miibus_statchg(sc->rl_dev);
1420 if (sc->rl_twister_enable) {
1421 if (sc->rl_twister == DONE)
1422 rl_watchdog(sc);
1423 else
1424 rl_twister_update(sc);
1425 if (sc->rl_twister == DONE)
1426 ticks = hz;
1427 else
1428 ticks = hz / 10;
1429 } else {
1430 rl_watchdog(sc);
1431 ticks = hz;
1432 }
1433
1434 callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc);
1435 }
1436
1437 #ifdef DEVICE_POLLING
1438 static int
1439 rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1440 {
1441 struct rl_softc *sc = ifp->if_softc;
1442 int rx_npkts = 0;
1443
1444 RL_LOCK(sc);
1445 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1446 rx_npkts = rl_poll_locked(ifp, cmd, count);
1447 RL_UNLOCK(sc);
1448 return (rx_npkts);
1449 }
1450
1451 static int
1452 rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1453 {
1454 struct rl_softc *sc = ifp->if_softc;
1455 int rx_npkts;
1456
1457 RL_LOCK_ASSERT(sc);
1458
1459 sc->rxcycles = count;
1460 rx_npkts = rl_rxeof(sc);
1461 rl_txeof(sc);
1462
1463 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1464 rl_start_locked(ifp);
1465
1466 if (cmd == POLL_AND_CHECK_STATUS) {
1467 uint16_t status;
1468
1469 /* We should also check the status register. */
1470 status = CSR_READ_2(sc, RL_ISR);
1471 if (status == 0xffff)
1472 return (rx_npkts);
1473 if (status != 0)
1474 CSR_WRITE_2(sc, RL_ISR, status);
1475
1476 /* XXX We should check behaviour on receiver stalls. */
1477
1478 if (status & RL_ISR_SYSTEM_ERR) {
1479 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1480 rl_init_locked(sc);
1481 }
1482 }
1483 return (rx_npkts);
1484 }
1485 #endif /* DEVICE_POLLING */
1486
1487 static void
1488 rl_intr(void *arg)
1489 {
1490 struct rl_softc *sc = arg;
1491 struct ifnet *ifp = sc->rl_ifp;
1492 uint16_t status;
1493 int count;
1494
1495 RL_LOCK(sc);
1496
1497 if (sc->suspended)
1498 goto done_locked;
1499
1500 #ifdef DEVICE_POLLING
1501 if (ifp->if_capenable & IFCAP_POLLING)
1502 goto done_locked;
1503 #endif
1504
1505 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1506 goto done_locked2;
1507 status = CSR_READ_2(sc, RL_ISR);
1508 if (status == 0xffff || (status & RL_INTRS) == 0)
1509 goto done_locked;
1510 /*
1511 * Ours, disable further interrupts.
1512 */
1513 CSR_WRITE_2(sc, RL_IMR, 0);
1514 for (count = 16; count > 0; count--) {
1515 CSR_WRITE_2(sc, RL_ISR, status);
1516 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1517 if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR))
1518 rl_rxeof(sc);
1519 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR))
1520 rl_txeof(sc);
1521 if (status & RL_ISR_SYSTEM_ERR) {
1522 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1523 rl_init_locked(sc);
1524 RL_UNLOCK(sc);
1525 return;
1526 }
1527 }
1528 status = CSR_READ_2(sc, RL_ISR);
1529 /* If the card has gone away, the read returns 0xffff. */
1530 if (status == 0xffff || (status & RL_INTRS) == 0)
1531 break;
1532 }
1533
1534 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1535 rl_start_locked(ifp);
1536
1537 done_locked2:
1538 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1539 CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1540 done_locked:
1541 RL_UNLOCK(sc);
1542 }
1543
1544 /*
1545 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1546 * pointers to the fragment pointers.
1547 */
1548 static int
1549 rl_encap(struct rl_softc *sc, struct mbuf **m_head)
1550 {
1551 struct mbuf *m;
1552 bus_dma_segment_t txsegs[1];
1553 int error, nsegs, padlen;
1554
1555 RL_LOCK_ASSERT(sc);
1556
1557 m = *m_head;
1558 padlen = 0;
1559 /*
1560 * Hardware doesn't auto-pad, so we have to make sure
1561 * pad short frames out to the minimum frame length.
1562 */
1563 if (m->m_pkthdr.len < RL_MIN_FRAMELEN)
1564 padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len;
1565 /*
1566 * The RealTek is brain damaged and wants longword-aligned
1567 * TX buffers, plus we can only have one fragment buffer
1568 * per packet. We have to copy pretty much all the time.
1569 */
1570 if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 ||
1571 (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) {
1572 m = m_defrag(*m_head, M_NOWAIT);
1573 if (m == NULL) {
1574 m_freem(*m_head);
1575 *m_head = NULL;
1576 return (ENOMEM);
1577 }
1578 }
1579 *m_head = m;
1580
1581 if (padlen > 0) {
1582 /*
1583 * Make security-conscious people happy: zero out the
1584 * bytes in the pad area, since we don't know what
1585 * this mbuf cluster buffer's previous user might
1586 * have left in it.
1587 */
1588 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1589 m->m_pkthdr.len += padlen;
1590 m->m_len = m->m_pkthdr.len;
1591 }
1592
1593 error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag,
1594 RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0);
1595 if (error != 0)
1596 return (error);
1597 if (nsegs == 0) {
1598 m_freem(*m_head);
1599 *m_head = NULL;
1600 return (EIO);
1601 }
1602
1603 RL_CUR_TXMBUF(sc) = m;
1604 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc),
1605 BUS_DMASYNC_PREWRITE);
1606 CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr));
1607
1608 return (0);
1609 }
1610
1611 /*
1612 * Main transmit routine.
1613 */
1614 static void
1615 rl_start(struct ifnet *ifp)
1616 {
1617 struct rl_softc *sc = ifp->if_softc;
1618
1619 RL_LOCK(sc);
1620 rl_start_locked(ifp);
1621 RL_UNLOCK(sc);
1622 }
1623
1624 static void
1625 rl_start_locked(struct ifnet *ifp)
1626 {
1627 struct rl_softc *sc = ifp->if_softc;
1628 struct mbuf *m_head = NULL;
1629
1630 RL_LOCK_ASSERT(sc);
1631
1632 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1633 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
1634 return;
1635
1636 while (RL_CUR_TXMBUF(sc) == NULL) {
1637
1638 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1639
1640 if (m_head == NULL)
1641 break;
1642
1643 if (rl_encap(sc, &m_head)) {
1644 if (m_head == NULL)
1645 break;
1646 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1647 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1648 break;
1649 }
1650
1651 /* Pass a copy of this mbuf chain to the bpf subsystem. */
1652 BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
1653
1654 /* Transmit the frame. */
1655 CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
1656 RL_TXTHRESH(sc->rl_txthresh) |
1657 RL_CUR_TXMBUF(sc)->m_pkthdr.len);
1658
1659 RL_INC(sc->rl_cdata.cur_tx);
1660
1661 /* Set a timeout in case the chip goes out to lunch. */
1662 sc->rl_watchdog_timer = 5;
1663 }
1664
1665 /*
1666 * We broke out of the loop because all our TX slots are
1667 * full. Mark the NIC as busy until it drains some of the
1668 * packets from the queue.
1669 */
1670 if (RL_CUR_TXMBUF(sc) != NULL)
1671 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1672 }
1673
1674 static void
1675 rl_init(void *xsc)
1676 {
1677 struct rl_softc *sc = xsc;
1678
1679 RL_LOCK(sc);
1680 rl_init_locked(sc);
1681 RL_UNLOCK(sc);
1682 }
1683
1684 static void
1685 rl_init_locked(struct rl_softc *sc)
1686 {
1687 struct ifnet *ifp = sc->rl_ifp;
1688 struct mii_data *mii;
1689 uint32_t eaddr[2];
1690
1691 RL_LOCK_ASSERT(sc);
1692
1693 mii = device_get_softc(sc->rl_miibus);
1694
1695 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1696 return;
1697
1698 /*
1699 * Cancel pending I/O and free all RX/TX buffers.
1700 */
1701 rl_stop(sc);
1702
1703 rl_reset(sc);
1704 if (sc->rl_twister_enable) {
1705 /*
1706 * Reset twister register tuning state. The twister
1707 * registers and their tuning are undocumented, but
1708 * are necessary to cope with bad links. rl_twister =
1709 * DONE here will disable this entirely.
1710 */
1711 sc->rl_twister = CHK_LINK;
1712 }
1713
1714 /*
1715 * Init our MAC address. Even though the chipset
1716 * documentation doesn't mention it, we need to enter "Config
1717 * register write enable" mode to modify the ID registers.
1718 */
1719 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1720 bzero(eaddr, sizeof(eaddr));
1721 bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN);
1722 CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]);
1723 CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]);
1724 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1725
1726 /* Init the RX memory block pointer register. */
1727 CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr +
1728 RL_RX_8139_BUF_RESERVE);
1729 /* Init TX descriptors. */
1730 rl_list_tx_init(sc);
1731 /* Init Rx memory block. */
1732 rl_list_rx_init(sc);
1733
1734 /*
1735 * Enable transmit and receive.
1736 */
1737 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1738
1739 /*
1740 * Set the initial TX and RX configuration.
1741 */
1742 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1743 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
1744
1745 /* Set RX filter. */
1746 rl_rxfilter(sc);
1747
1748 #ifdef DEVICE_POLLING
1749 /* Disable interrupts if we are polling. */
1750 if (ifp->if_capenable & IFCAP_POLLING)
1751 CSR_WRITE_2(sc, RL_IMR, 0);
1752 else
1753 #endif
1754 /* Enable interrupts. */
1755 CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1756
1757 /* Set initial TX threshold */
1758 sc->rl_txthresh = RL_TX_THRESH_INIT;
1759
1760 /* Start RX/TX process. */
1761 CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1762
1763 /* Enable receiver and transmitter. */
1764 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1765
1766 sc->rl_flags &= ~RL_FLAG_LINK;
1767 mii_mediachg(mii);
1768
1769 CSR_WRITE_1(sc, sc->rl_cfg1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
1770
1771 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1772 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1773
1774 callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
1775 }
1776
1777 /*
1778 * Set media options.
1779 */
1780 static int
1781 rl_ifmedia_upd(struct ifnet *ifp)
1782 {
1783 struct rl_softc *sc = ifp->if_softc;
1784 struct mii_data *mii;
1785
1786 mii = device_get_softc(sc->rl_miibus);
1787
1788 RL_LOCK(sc);
1789 mii_mediachg(mii);
1790 RL_UNLOCK(sc);
1791
1792 return (0);
1793 }
1794
1795 /*
1796 * Report current media status.
1797 */
1798 static void
1799 rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1800 {
1801 struct rl_softc *sc = ifp->if_softc;
1802 struct mii_data *mii;
1803
1804 mii = device_get_softc(sc->rl_miibus);
1805
1806 RL_LOCK(sc);
1807 mii_pollstat(mii);
1808 ifmr->ifm_active = mii->mii_media_active;
1809 ifmr->ifm_status = mii->mii_media_status;
1810 RL_UNLOCK(sc);
1811 }
1812
1813 static int
1814 rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1815 {
1816 struct ifreq *ifr = (struct ifreq *)data;
1817 struct mii_data *mii;
1818 struct rl_softc *sc = ifp->if_softc;
1819 int error = 0, mask;
1820
1821 switch (command) {
1822 case SIOCSIFFLAGS:
1823 RL_LOCK(sc);
1824 if (ifp->if_flags & IFF_UP) {
1825 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1826 ((ifp->if_flags ^ sc->rl_if_flags) &
1827 (IFF_PROMISC | IFF_ALLMULTI)))
1828 rl_rxfilter(sc);
1829 else
1830 rl_init_locked(sc);
1831 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1832 rl_stop(sc);
1833 sc->rl_if_flags = ifp->if_flags;
1834 RL_UNLOCK(sc);
1835 break;
1836 case SIOCADDMULTI:
1837 case SIOCDELMULTI:
1838 RL_LOCK(sc);
1839 rl_rxfilter(sc);
1840 RL_UNLOCK(sc);
1841 break;
1842 case SIOCGIFMEDIA:
1843 case SIOCSIFMEDIA:
1844 mii = device_get_softc(sc->rl_miibus);
1845 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1846 break;
1847 case SIOCSIFCAP:
1848 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1849 #ifdef DEVICE_POLLING
1850 if (ifr->ifr_reqcap & IFCAP_POLLING &&
1851 !(ifp->if_capenable & IFCAP_POLLING)) {
1852 error = ether_poll_register(rl_poll, ifp);
1853 if (error)
1854 return(error);
1855 RL_LOCK(sc);
1856 /* Disable interrupts */
1857 CSR_WRITE_2(sc, RL_IMR, 0x0000);
1858 ifp->if_capenable |= IFCAP_POLLING;
1859 RL_UNLOCK(sc);
1860 return (error);
1861
1862 }
1863 if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1864 ifp->if_capenable & IFCAP_POLLING) {
1865 error = ether_poll_deregister(ifp);
1866 /* Enable interrupts. */
1867 RL_LOCK(sc);
1868 CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1869 ifp->if_capenable &= ~IFCAP_POLLING;
1870 RL_UNLOCK(sc);
1871 return (error);
1872 }
1873 #endif /* DEVICE_POLLING */
1874 if ((mask & IFCAP_WOL) != 0 &&
1875 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1876 if ((mask & IFCAP_WOL_UCAST) != 0)
1877 ifp->if_capenable ^= IFCAP_WOL_UCAST;
1878 if ((mask & IFCAP_WOL_MCAST) != 0)
1879 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1880 if ((mask & IFCAP_WOL_MAGIC) != 0)
1881 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1882 }
1883 break;
1884 default:
1885 error = ether_ioctl(ifp, command, data);
1886 break;
1887 }
1888
1889 return (error);
1890 }
1891
1892 static void
1893 rl_watchdog(struct rl_softc *sc)
1894 {
1895
1896 RL_LOCK_ASSERT(sc);
1897
1898 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0)
1899 return;
1900
1901 device_printf(sc->rl_dev, "watchdog timeout\n");
1902 if_inc_counter(sc->rl_ifp, IFCOUNTER_OERRORS, 1);
1903
1904 rl_txeof(sc);
1905 rl_rxeof(sc);
1906 sc->rl_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1907 rl_init_locked(sc);
1908 }
1909
1910 /*
1911 * Stop the adapter and free any mbufs allocated to the
1912 * RX and TX lists.
1913 */
1914 static void
1915 rl_stop(struct rl_softc *sc)
1916 {
1917 int i;
1918 struct ifnet *ifp = sc->rl_ifp;
1919
1920 RL_LOCK_ASSERT(sc);
1921
1922 sc->rl_watchdog_timer = 0;
1923 callout_stop(&sc->rl_stat_callout);
1924 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1925 sc->rl_flags &= ~RL_FLAG_LINK;
1926
1927 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
1928 CSR_WRITE_2(sc, RL_IMR, 0x0000);
1929 for (i = 0; i < RL_TIMEOUT; i++) {
1930 DELAY(10);
1931 if ((CSR_READ_1(sc, RL_COMMAND) &
1932 (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0)
1933 break;
1934 }
1935 if (i == RL_TIMEOUT)
1936 device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n");
1937
1938 /*
1939 * Free the TX list buffers.
1940 */
1941 for (i = 0; i < RL_TX_LIST_CNT; i++) {
1942 if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1943 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
1944 sc->rl_cdata.rl_tx_dmamap[i],
1945 BUS_DMASYNC_POSTWRITE);
1946 bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
1947 sc->rl_cdata.rl_tx_dmamap[i]);
1948 m_freem(sc->rl_cdata.rl_tx_chain[i]);
1949 sc->rl_cdata.rl_tx_chain[i] = NULL;
1950 CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
1951 0x0000000);
1952 }
1953 }
1954 }
1955
1956 /*
1957 * Device suspend routine. Stop the interface and save some PCI
1958 * settings in case the BIOS doesn't restore them properly on
1959 * resume.
1960 */
1961 static int
1962 rl_suspend(device_t dev)
1963 {
1964 struct rl_softc *sc;
1965
1966 sc = device_get_softc(dev);
1967
1968 RL_LOCK(sc);
1969 rl_stop(sc);
1970 rl_setwol(sc);
1971 sc->suspended = 1;
1972 RL_UNLOCK(sc);
1973
1974 return (0);
1975 }
1976
1977 /*
1978 * Device resume routine. Restore some PCI settings in case the BIOS
1979 * doesn't, re-enable busmastering, and restart the interface if
1980 * appropriate.
1981 */
1982 static int
1983 rl_resume(device_t dev)
1984 {
1985 struct rl_softc *sc;
1986 struct ifnet *ifp;
1987 int pmc;
1988 uint16_t pmstat;
1989
1990 sc = device_get_softc(dev);
1991 ifp = sc->rl_ifp;
1992
1993 RL_LOCK(sc);
1994
1995 if ((ifp->if_capabilities & IFCAP_WOL) != 0 &&
1996 pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
1997 /* Disable PME and clear PME status. */
1998 pmstat = pci_read_config(sc->rl_dev,
1999 pmc + PCIR_POWER_STATUS, 2);
2000 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2001 pmstat &= ~PCIM_PSTAT_PMEENABLE;
2002 pci_write_config(sc->rl_dev,
2003 pmc + PCIR_POWER_STATUS, pmstat, 2);
2004 }
2005 /*
2006 * Clear WOL matching such that normal Rx filtering
2007 * wouldn't interfere with WOL patterns.
2008 */
2009 rl_clrwol(sc);
2010 }
2011
2012 /* reinitialize interface if necessary */
2013 if (ifp->if_flags & IFF_UP)
2014 rl_init_locked(sc);
2015
2016 sc->suspended = 0;
2017
2018 RL_UNLOCK(sc);
2019
2020 return (0);
2021 }
2022
2023 /*
2024 * Stop all chip I/O so that the kernel's probe routines don't
2025 * get confused by errant DMAs when rebooting.
2026 */
2027 static int
2028 rl_shutdown(device_t dev)
2029 {
2030 struct rl_softc *sc;
2031
2032 sc = device_get_softc(dev);
2033
2034 RL_LOCK(sc);
2035 rl_stop(sc);
2036 /*
2037 * Mark interface as down since otherwise we will panic if
2038 * interrupt comes in later on, which can happen in some
2039 * cases.
2040 */
2041 sc->rl_ifp->if_flags &= ~IFF_UP;
2042 rl_setwol(sc);
2043 RL_UNLOCK(sc);
2044
2045 return (0);
2046 }
2047
2048 static void
2049 rl_setwol(struct rl_softc *sc)
2050 {
2051 struct ifnet *ifp;
2052 int pmc;
2053 uint16_t pmstat;
2054 uint8_t v;
2055
2056 RL_LOCK_ASSERT(sc);
2057
2058 ifp = sc->rl_ifp;
2059 if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2060 return;
2061 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
2062 return;
2063
2064 /* Enable config register write. */
2065 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2066
2067 /* Enable PME. */
2068 v = CSR_READ_1(sc, sc->rl_cfg1);
2069 v &= ~RL_CFG1_PME;
2070 if ((ifp->if_capenable & IFCAP_WOL) != 0)
2071 v |= RL_CFG1_PME;
2072 CSR_WRITE_1(sc, sc->rl_cfg1, v);
2073
2074 v = CSR_READ_1(sc, sc->rl_cfg3);
2075 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2076 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2077 v |= RL_CFG3_WOL_MAGIC;
2078 CSR_WRITE_1(sc, sc->rl_cfg3, v);
2079
2080 v = CSR_READ_1(sc, sc->rl_cfg5);
2081 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2082 v &= ~RL_CFG5_WOL_LANWAKE;
2083 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2084 v |= RL_CFG5_WOL_UCAST;
2085 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2086 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
2087 if ((ifp->if_capenable & IFCAP_WOL) != 0)
2088 v |= RL_CFG5_WOL_LANWAKE;
2089 CSR_WRITE_1(sc, sc->rl_cfg5, v);
2090
2091 /* Config register write done. */
2092 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2093
2094 /* Request PME if WOL is requested. */
2095 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
2096 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2097 if ((ifp->if_capenable & IFCAP_WOL) != 0)
2098 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2099 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2100 }
2101
2102 static void
2103 rl_clrwol(struct rl_softc *sc)
2104 {
2105 struct ifnet *ifp;
2106 uint8_t v;
2107
2108 ifp = sc->rl_ifp;
2109 if ((ifp->if_capabilities & IFCAP_WOL) == 0)
2110 return;
2111
2112 /* Enable config register write. */
2113 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2114
2115 v = CSR_READ_1(sc, sc->rl_cfg3);
2116 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2117 CSR_WRITE_1(sc, sc->rl_cfg3, v);
2118
2119 /* Config register write done. */
2120 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2121
2122 v = CSR_READ_1(sc, sc->rl_cfg5);
2123 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2124 v &= ~RL_CFG5_WOL_LANWAKE;
2125 CSR_WRITE_1(sc, sc->rl_cfg5, v);
2126 }
Cache object: 79427428a17e4d027a23b4daf9404aae
|