FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/rtl81x9.c
1 /* $NetBSD: rtl81x9.c,v 1.113 2022/09/25 18:43:32 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * FreeBSD Id: if_rl.c,v 1.17 1999/06/19 20:17:37 wpaul Exp
35 */
36
37 /*
38 * RealTek 8129/8139 PCI NIC driver
39 *
40 * Supports several extremely cheap PCI 10/100 adapters based on
41 * the RealTek chipset. Datasheets can be obtained from
42 * www.realtek.com.tw.
43 *
44 * Written by Bill Paul <wpaul@ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48
49 /*
50 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
51 * probably the worst PCI ethernet controller ever made, with the possible
52 * exception of the FEAST chip made by SMC. The 8139 supports bus-master
53 * DMA, but it has a terrible interface that nullifies any performance
54 * gains that bus-master DMA usually offers.
55 *
56 * For transmission, the chip offers a series of four TX descriptor
57 * registers. Each transmit frame must be in a contiguous buffer, aligned
58 * on a longword (32-bit) boundary. This means we almost always have to
59 * do mbuf copies in order to transmit a frame, except in the unlikely
60 * case where a) the packet fits into a single mbuf, and b) the packet
61 * is 32-bit aligned within the mbuf's data area. The presence of only
62 * four descriptor registers means that we can never have more than four
63 * packets queued for transmission at any one time.
64 *
65 * Reception is not much better. The driver has to allocate a single large
66 * buffer area (up to 64K in size) into which the chip will DMA received
67 * frames. Because we don't know where within this region received packets
68 * will begin or end, we have no choice but to copy data from the buffer
69 * area into mbufs in order to pass the packets up to the higher protocol
70 * levels.
71 *
72 * It's impossible given this rotten design to really achieve decent
73 * performance at 100Mbps, unless you happen to have a 400MHz PII or
74 * some equally overmuscled CPU to drive it.
75 *
76 * On the bright side, the 8139 does have a built-in PHY, although
77 * rather than using an MDIO serial interface like most other NICs, the
78 * PHY registers are directly accessible through the 8139's register
79 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
80 * filter.
81 *
82 * The 8129 chip is an older version of the 8139 that uses an external PHY
83 * chip. The 8129 has a serial MDIO interface for accessing the MII where
84 * the 8139 lets you directly access the on-board PHY registers. We need
85 * to select which interface to use depending on the chip type.
86 */
87
88 #include <sys/cdefs.h>
89 __KERNEL_RCSID(0, "$NetBSD: rtl81x9.c,v 1.113 2022/09/25 18:43:32 thorpej Exp $");
90
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/device.h>
96 #include <sys/sockio.h>
97 #include <sys/mbuf.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100
101 #include <net/if.h>
102 #include <net/if_arp.h>
103 #include <net/if_ether.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106
107 #include <net/bpf.h>
108 #include <sys/rndsource.h>
109
110 #include <sys/bus.h>
111 #include <machine/endian.h>
112
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115
116 #include <dev/ic/rtl81x9reg.h>
117 #include <dev/ic/rtl81x9var.h>
118
119 static void rtk_reset(struct rtk_softc *);
120 static void rtk_rxeof(struct rtk_softc *);
121 static void rtk_txeof(struct rtk_softc *);
122 static void rtk_start(struct ifnet *);
123 static int rtk_ioctl(struct ifnet *, u_long, void *);
124 static int rtk_init(struct ifnet *);
125 static void rtk_stop(struct ifnet *, int);
126
127 static void rtk_watchdog(struct ifnet *);
128
129 static void rtk_eeprom_putbyte(struct rtk_softc *, int, int);
130 static void rtk_mii_sync(struct rtk_softc *);
131 static void rtk_mii_send(struct rtk_softc *, uint32_t, int);
132 static int rtk_mii_readreg(struct rtk_softc *, struct rtk_mii_frame *);
133 static int rtk_mii_writereg(struct rtk_softc *, struct rtk_mii_frame *);
134
135 static int rtk_phy_readreg(device_t, int, int, uint16_t *);
136 static int rtk_phy_writereg(device_t, int, int, uint16_t);
137 static void rtk_phy_statchg(struct ifnet *);
138 static void rtk_tick(void *);
139
140 static int rtk_enable(struct rtk_softc *);
141 static void rtk_disable(struct rtk_softc *);
142
143 static void rtk_list_tx_init(struct rtk_softc *);
144
145 #define EE_SET(x) \
146 CSR_WRITE_1(sc, RTK_EECMD, \
147 CSR_READ_1(sc, RTK_EECMD) | (x))
148
149 #define EE_CLR(x) \
150 CSR_WRITE_1(sc, RTK_EECMD, \
151 CSR_READ_1(sc, RTK_EECMD) & ~(x))
152
153 #define EE_DELAY() DELAY(100)
154
155 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
156
157 /*
158 * Send a read command and address to the EEPROM, check for ACK.
159 */
160 static void
161 rtk_eeprom_putbyte(struct rtk_softc *sc, int addr, int addr_len)
162 {
163 int d, i;
164
165 d = (RTK_EECMD_READ << addr_len) | addr;
166
167 /*
168 * Feed in each bit and stobe the clock.
169 */
170 for (i = RTK_EECMD_LEN + addr_len; i > 0; i--) {
171 if (d & (1 << (i - 1))) {
172 EE_SET(RTK_EE_DATAIN);
173 } else {
174 EE_CLR(RTK_EE_DATAIN);
175 }
176 EE_DELAY();
177 EE_SET(RTK_EE_CLK);
178 EE_DELAY();
179 EE_CLR(RTK_EE_CLK);
180 EE_DELAY();
181 }
182 }
183
184 /*
185 * Read a word of data stored in the EEPROM at address 'addr.'
186 */
187 uint16_t
188 rtk_read_eeprom(struct rtk_softc *sc, int addr, int addr_len)
189 {
190 uint16_t word;
191 int i;
192
193 /* Enter EEPROM access mode. */
194 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_PROGRAM);
195 EE_DELAY();
196 EE_SET(RTK_EE_SEL);
197
198 /*
199 * Send address of word we want to read.
200 */
201 rtk_eeprom_putbyte(sc, addr, addr_len);
202
203 /*
204 * Start reading bits from EEPROM.
205 */
206 word = 0;
207 for (i = 16; i > 0; i--) {
208 EE_SET(RTK_EE_CLK);
209 EE_DELAY();
210 if (CSR_READ_1(sc, RTK_EECMD) & RTK_EE_DATAOUT)
211 word |= 1 << (i - 1);
212 EE_CLR(RTK_EE_CLK);
213 EE_DELAY();
214 }
215
216 /* Turn off EEPROM access mode. */
217 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
218
219 return word;
220 }
221
222 /*
223 * MII access routines are provided for the 8129, which
224 * doesn't have a built-in PHY. For the 8139, we fake things
225 * up by diverting rtk_phy_readreg()/rtk_phy_writereg() to the
226 * direct access PHY registers.
227 */
228 #define MII_SET(x) \
229 CSR_WRITE_1(sc, RTK_MII, \
230 CSR_READ_1(sc, RTK_MII) | (x))
231
232 #define MII_CLR(x) \
233 CSR_WRITE_1(sc, RTK_MII, \
234 CSR_READ_1(sc, RTK_MII) & ~(x))
235
236 /*
237 * Sync the PHYs by setting data bit and strobing the clock 32 times.
238 */
239 static void
240 rtk_mii_sync(struct rtk_softc *sc)
241 {
242 int i;
243
244 MII_SET(RTK_MII_DIR | RTK_MII_DATAOUT);
245
246 for (i = 0; i < 32; i++) {
247 MII_SET(RTK_MII_CLK);
248 DELAY(1);
249 MII_CLR(RTK_MII_CLK);
250 DELAY(1);
251 }
252 }
253
254 /*
255 * Clock a series of bits through the MII.
256 */
257 static void
258 rtk_mii_send(struct rtk_softc *sc, uint32_t bits, int cnt)
259 {
260 int i;
261
262 MII_CLR(RTK_MII_CLK);
263
264 for (i = cnt; i > 0; i--) {
265 if (bits & (1 << (i - 1))) {
266 MII_SET(RTK_MII_DATAOUT);
267 } else {
268 MII_CLR(RTK_MII_DATAOUT);
269 }
270 DELAY(1);
271 MII_CLR(RTK_MII_CLK);
272 DELAY(1);
273 MII_SET(RTK_MII_CLK);
274 }
275 }
276
277 /*
278 * Read an PHY register through the MII.
279 */
280 static int
281 rtk_mii_readreg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
282 {
283 int i, ack, s, rv = 0;
284
285 s = splnet();
286
287 /*
288 * Set up frame for RX.
289 */
290 frame->mii_stdelim = RTK_MII_STARTDELIM;
291 frame->mii_opcode = RTK_MII_READOP;
292 frame->mii_turnaround = 0;
293 frame->mii_data = 0;
294
295 CSR_WRITE_2(sc, RTK_MII, 0);
296
297 /*
298 * Turn on data xmit.
299 */
300 MII_SET(RTK_MII_DIR);
301
302 rtk_mii_sync(sc);
303
304 /*
305 * Send command/address info.
306 */
307 rtk_mii_send(sc, frame->mii_stdelim, 2);
308 rtk_mii_send(sc, frame->mii_opcode, 2);
309 rtk_mii_send(sc, frame->mii_phyaddr, 5);
310 rtk_mii_send(sc, frame->mii_regaddr, 5);
311
312 /* Idle bit */
313 MII_CLR((RTK_MII_CLK | RTK_MII_DATAOUT));
314 DELAY(1);
315 MII_SET(RTK_MII_CLK);
316 DELAY(1);
317
318 /* Turn off xmit. */
319 MII_CLR(RTK_MII_DIR);
320
321 /* Check for ack */
322 MII_CLR(RTK_MII_CLK);
323 DELAY(1);
324 ack = CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN;
325 MII_SET(RTK_MII_CLK);
326 DELAY(1);
327
328 /*
329 * Now try reading data bits. If the ack failed, we still
330 * need to clock through 16 cycles to keep the PHY(s) in sync.
331 */
332 if (ack) {
333 for (i = 0; i < 16; i++) {
334 MII_CLR(RTK_MII_CLK);
335 DELAY(1);
336 MII_SET(RTK_MII_CLK);
337 DELAY(1);
338 }
339 rv = -1;
340 goto fail;
341 }
342
343 for (i = 16; i > 0; i--) {
344 MII_CLR(RTK_MII_CLK);
345 DELAY(1);
346 if (!ack) {
347 if (CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN)
348 frame->mii_data |= 1 << (i - 1);
349 DELAY(1);
350 }
351 MII_SET(RTK_MII_CLK);
352 DELAY(1);
353 }
354
355 fail:
356 MII_CLR(RTK_MII_CLK);
357 DELAY(1);
358 MII_SET(RTK_MII_CLK);
359 DELAY(1);
360
361 splx(s);
362
363 return rv;
364 }
365
366 /*
367 * Write to a PHY register through the MII.
368 */
369 static int
370 rtk_mii_writereg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
371 {
372 int s;
373
374 s = splnet();
375 /*
376 * Set up frame for TX.
377 */
378 frame->mii_stdelim = RTK_MII_STARTDELIM;
379 frame->mii_opcode = RTK_MII_WRITEOP;
380 frame->mii_turnaround = RTK_MII_TURNAROUND;
381
382 /*
383 * Turn on data output.
384 */
385 MII_SET(RTK_MII_DIR);
386
387 rtk_mii_sync(sc);
388
389 rtk_mii_send(sc, frame->mii_stdelim, 2);
390 rtk_mii_send(sc, frame->mii_opcode, 2);
391 rtk_mii_send(sc, frame->mii_phyaddr, 5);
392 rtk_mii_send(sc, frame->mii_regaddr, 5);
393 rtk_mii_send(sc, frame->mii_turnaround, 2);
394 rtk_mii_send(sc, frame->mii_data, 16);
395
396 /* Idle bit. */
397 MII_SET(RTK_MII_CLK);
398 DELAY(1);
399 MII_CLR(RTK_MII_CLK);
400 DELAY(1);
401
402 /*
403 * Turn off xmit.
404 */
405 MII_CLR(RTK_MII_DIR);
406
407 splx(s);
408
409 return 0;
410 }
411
412 static int
413 rtk_phy_readreg(device_t self, int phy, int reg, uint16_t *val)
414 {
415 struct rtk_softc *sc = device_private(self);
416 struct rtk_mii_frame frame;
417 int rv;
418 int rtk8139_reg;
419
420 if ((sc->sc_quirk & RTKQ_8129) == 0) {
421 if (phy != 7)
422 return -1;
423
424 switch (reg) {
425 case MII_BMCR:
426 rtk8139_reg = RTK_BMCR;
427 break;
428 case MII_BMSR:
429 rtk8139_reg = RTK_BMSR;
430 break;
431 case MII_ANAR:
432 rtk8139_reg = RTK_ANAR;
433 break;
434 case MII_ANER:
435 rtk8139_reg = RTK_ANER;
436 break;
437 case MII_ANLPAR:
438 rtk8139_reg = RTK_LPAR;
439 break;
440 case MII_PHYIDR1:
441 case MII_PHYIDR2:
442 *val = 0;
443 return 0;
444 default:
445 #if 0
446 printf("%s: bad phy register\n", device_xname(self));
447 #endif
448 return -1;
449 }
450 *val = CSR_READ_2(sc, rtk8139_reg);
451 return 0;
452 }
453
454 memset(&frame, 0, sizeof(frame));
455
456 frame.mii_phyaddr = phy;
457 frame.mii_regaddr = reg;
458 rv = rtk_mii_readreg(sc, &frame);
459 *val = frame.mii_data;
460
461 return rv;
462 }
463
464 static int
465 rtk_phy_writereg(device_t self, int phy, int reg, uint16_t val)
466 {
467 struct rtk_softc *sc = device_private(self);
468 struct rtk_mii_frame frame;
469 int rtk8139_reg;
470
471 if ((sc->sc_quirk & RTKQ_8129) == 0) {
472 if (phy != 7)
473 return -1;
474
475 switch (reg) {
476 case MII_BMCR:
477 rtk8139_reg = RTK_BMCR;
478 break;
479 case MII_BMSR:
480 rtk8139_reg = RTK_BMSR;
481 break;
482 case MII_ANAR:
483 rtk8139_reg = RTK_ANAR;
484 break;
485 case MII_ANER:
486 rtk8139_reg = RTK_ANER;
487 break;
488 case MII_ANLPAR:
489 rtk8139_reg = RTK_LPAR;
490 break;
491 default:
492 #if 0
493 printf("%s: bad phy register\n", device_xname(self));
494 #endif
495 return -1;
496 }
497 CSR_WRITE_2(sc, rtk8139_reg, val);
498 return 0;
499 }
500
501 memset(&frame, 0, sizeof(frame));
502
503 frame.mii_phyaddr = phy;
504 frame.mii_regaddr = reg;
505 frame.mii_data = val;
506
507 return rtk_mii_writereg(sc, &frame);
508 }
509
510 static void
511 rtk_phy_statchg(struct ifnet *ifp)
512 {
513
514 /* Nothing to do. */
515 }
516
517 #define rtk_calchash(addr) \
518 (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
519
520 /*
521 * Program the 64-bit multicast hash filter.
522 */
523 void
524 rtk_setmulti(struct rtk_softc *sc)
525 {
526 struct ethercom *ec = &sc->ethercom;
527 struct ifnet *ifp = &ec->ec_if;
528 uint32_t hashes[2] = { 0, 0 };
529 uint32_t rxfilt;
530 struct ether_multi *enm;
531 struct ether_multistep step;
532 int h, mcnt;
533
534 rxfilt = CSR_READ_4(sc, RTK_RXCFG);
535
536 if (ifp->if_flags & IFF_PROMISC) {
537 allmulti:
538 ifp->if_flags |= IFF_ALLMULTI;
539 rxfilt |= RTK_RXCFG_RX_MULTI;
540 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
541 CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF);
542 CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF);
543 return;
544 }
545
546 /* first, zot all the existing hash bits */
547 CSR_WRITE_4(sc, RTK_MAR0, 0);
548 CSR_WRITE_4(sc, RTK_MAR4, 0);
549
550 /* now program new ones */
551 ETHER_LOCK(ec);
552 ETHER_FIRST_MULTI(step, ec, enm);
553 mcnt = 0;
554 while (enm != NULL) {
555 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
556 ETHER_ADDR_LEN) != 0) {
557 ETHER_UNLOCK(ec);
558 goto allmulti;
559 }
560
561 h = rtk_calchash(enm->enm_addrlo);
562 if (h < 32)
563 hashes[0] |= __BIT(h);
564 else
565 hashes[1] |= __BIT(h - 32);
566 mcnt++;
567 ETHER_NEXT_MULTI(step, enm);
568 }
569 ETHER_UNLOCK(ec);
570
571 ifp->if_flags &= ~IFF_ALLMULTI;
572
573 if (mcnt)
574 rxfilt |= RTK_RXCFG_RX_MULTI;
575 else
576 rxfilt &= ~RTK_RXCFG_RX_MULTI;
577
578 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
579
580 /*
581 * For some unfathomable reason, RealTek decided to reverse
582 * the order of the multicast hash registers in the PCI Express
583 * parts. This means we have to write the hash pattern in reverse
584 * order for those devices.
585 */
586 if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
587 CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1]));
588 CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0]));
589 } else {
590 CSR_WRITE_4(sc, RTK_MAR0, hashes[0]);
591 CSR_WRITE_4(sc, RTK_MAR4, hashes[1]);
592 }
593 }
594
595 void
596 rtk_reset(struct rtk_softc *sc)
597 {
598 int i;
599
600 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
601
602 for (i = 0; i < RTK_TIMEOUT; i++) {
603 DELAY(10);
604 if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
605 break;
606 }
607 if (i == RTK_TIMEOUT)
608 printf("%s: reset never completed!\n",
609 device_xname(sc->sc_dev));
610 }
611
612 /*
613 * Attach the interface. Allocate softc structures, do ifmedia
614 * setup and ethernet/BPF attach.
615 */
616 void
617 rtk_attach(struct rtk_softc *sc)
618 {
619 device_t self = sc->sc_dev;
620 struct ifnet *ifp;
621 struct mii_data * const mii = &sc->mii;
622 struct rtk_tx_desc *txd;
623 uint16_t val;
624 uint8_t eaddr[ETHER_ADDR_LEN];
625 int error;
626 int i, addr_len;
627
628 callout_init(&sc->rtk_tick_ch, 0);
629 callout_setfunc(&sc->rtk_tick_ch, rtk_tick, sc);
630
631 /*
632 * Check EEPROM type 9346 or 9356.
633 */
634 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
635 addr_len = RTK_EEADDR_LEN1;
636 else
637 addr_len = RTK_EEADDR_LEN0;
638
639 /*
640 * Get station address.
641 */
642 val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len);
643 eaddr[0] = val & 0xff;
644 eaddr[1] = val >> 8;
645 val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len);
646 eaddr[2] = val & 0xff;
647 eaddr[3] = val >> 8;
648 val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len);
649 eaddr[4] = val & 0xff;
650 eaddr[5] = val >> 8;
651
652 if ((error = bus_dmamem_alloc(sc->sc_dmat,
653 RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg,
654 BUS_DMA_NOWAIT)) != 0) {
655 aprint_error_dev(self,
656 "can't allocate recv buffer, error = %d\n", error);
657 goto fail_0;
658 }
659
660 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg,
661 RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf,
662 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
663 aprint_error_dev(self,
664 "can't map recv buffer, error = %d\n", error);
665 goto fail_1;
666 }
667
668 if ((error = bus_dmamap_create(sc->sc_dmat,
669 RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT,
670 &sc->recv_dmamap)) != 0) {
671 aprint_error_dev(self,
672 "can't create recv buffer DMA map, error = %d\n", error);
673 goto fail_2;
674 }
675
676 if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap,
677 sc->rtk_rx_buf, RTK_RXBUFLEN + 16,
678 NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) != 0) {
679 aprint_error_dev(self,
680 "can't load recv buffer DMA map, error = %d\n", error);
681 goto fail_3;
682 }
683
684 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
685 txd = &sc->rtk_tx_descs[i];
686 if ((error = bus_dmamap_create(sc->sc_dmat,
687 MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
688 &txd->txd_dmamap)) != 0) {
689 aprint_error_dev(self,
690 "can't create snd buffer DMA map, error = %d\n",
691 error);
692 goto fail_4;
693 }
694 txd->txd_txaddr = RTK_TXADDR0 + (i * 4);
695 txd->txd_txstat = RTK_TXSTAT0 + (i * 4);
696 }
697 SIMPLEQ_INIT(&sc->rtk_tx_free);
698 SIMPLEQ_INIT(&sc->rtk_tx_dirty);
699
700 /*
701 * From this point forward, the attachment cannot fail. A failure
702 * before this releases all resources thar may have been
703 * allocated.
704 */
705 sc->sc_flags |= RTK_ATTACHED;
706
707 /* Reset the adapter. */
708 rtk_reset(sc);
709
710 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
711
712 ifp = &sc->ethercom.ec_if;
713 ifp->if_softc = sc;
714 strcpy(ifp->if_xname, device_xname(self));
715 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
716 ifp->if_ioctl = rtk_ioctl;
717 ifp->if_start = rtk_start;
718 ifp->if_watchdog = rtk_watchdog;
719 ifp->if_init = rtk_init;
720 ifp->if_stop = rtk_stop;
721 IFQ_SET_READY(&ifp->if_snd);
722
723 /*
724 * Do ifmedia setup.
725 */
726 mii->mii_ifp = ifp;
727 mii->mii_readreg = rtk_phy_readreg;
728 mii->mii_writereg = rtk_phy_writereg;
729 mii->mii_statchg = rtk_phy_statchg;
730 sc->ethercom.ec_mii = mii;
731 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
732 ether_mediastatus);
733 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
734
735 /* Choose a default media. */
736 if (LIST_FIRST(&mii->mii_phys) == NULL) {
737 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
738 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
739 } else
740 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
741
742 /*
743 * Call MI attach routines.
744 */
745 if_attach(ifp);
746 if_deferred_start_init(ifp, NULL);
747 ether_ifattach(ifp, eaddr);
748
749 rnd_attach_source(&sc->rnd_source, device_xname(self),
750 RND_TYPE_NET, RND_FLAG_DEFAULT);
751
752 return;
753 fail_4:
754 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
755 txd = &sc->rtk_tx_descs[i];
756 if (txd->txd_dmamap != NULL)
757 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
758 }
759 fail_3:
760 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
761 fail_2:
762 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
763 RTK_RXBUFLEN + 16);
764 fail_1:
765 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
766 fail_0:
767 return;
768 }
769
770 /*
771 * Initialize the transmit descriptors.
772 */
773 static void
774 rtk_list_tx_init(struct rtk_softc *sc)
775 {
776 struct rtk_tx_desc *txd;
777 int i;
778
779 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL)
780 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
781 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL)
782 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
783
784 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
785 txd = &sc->rtk_tx_descs[i];
786 CSR_WRITE_4(sc, txd->txd_txaddr, 0);
787 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
788 }
789 }
790
791 /*
792 * rtk_activate:
793 * Handle device activation/deactivation requests.
794 */
795 int
796 rtk_activate(device_t self, enum devact act)
797 {
798 struct rtk_softc *sc = device_private(self);
799
800 switch (act) {
801 case DVACT_DEACTIVATE:
802 if_deactivate(&sc->ethercom.ec_if);
803 return 0;
804 default:
805 return EOPNOTSUPP;
806 }
807 }
808
809 /*
810 * rtk_detach:
811 * Detach a rtk interface.
812 */
813 int
814 rtk_detach(struct rtk_softc *sc)
815 {
816 struct ifnet *ifp = &sc->ethercom.ec_if;
817 struct rtk_tx_desc *txd;
818 int i;
819
820 /*
821 * Succeed now if there isn't any work to do.
822 */
823 if ((sc->sc_flags & RTK_ATTACHED) == 0)
824 return 0;
825
826 /* Unhook our tick handler. */
827 callout_stop(&sc->rtk_tick_ch);
828
829 /* Detach all PHYs. */
830 mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
831
832 rnd_detach_source(&sc->rnd_source);
833
834 ether_ifdetach(ifp);
835 if_detach(ifp);
836
837 /* Delete all remaining media. */
838 ifmedia_fini(&sc->mii.mii_media);
839
840 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
841 txd = &sc->rtk_tx_descs[i];
842 if (txd->txd_dmamap != NULL)
843 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
844 }
845 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
846 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
847 RTK_RXBUFLEN + 16);
848 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
849
850 /* we don't want to run again */
851 sc->sc_flags &= ~RTK_ATTACHED;
852
853 return 0;
854 }
855
856 /*
857 * rtk_enable:
858 * Enable the RTL81X9 chip.
859 */
860 int
861 rtk_enable(struct rtk_softc *sc)
862 {
863
864 if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
865 if ((*sc->sc_enable)(sc) != 0) {
866 printf("%s: device enable failed\n",
867 device_xname(sc->sc_dev));
868 return EIO;
869 }
870 sc->sc_flags |= RTK_ENABLED;
871 }
872 return 0;
873 }
874
875 /*
876 * rtk_disable:
877 * Disable the RTL81X9 chip.
878 */
879 void
880 rtk_disable(struct rtk_softc *sc)
881 {
882
883 if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
884 (*sc->sc_disable)(sc);
885 sc->sc_flags &= ~RTK_ENABLED;
886 }
887 }
888
889 /*
890 * A frame has been uploaded: pass the resulting mbuf chain up to
891 * the higher level protocols.
892 *
893 * You know there's something wrong with a PCI bus-master chip design.
894 *
895 * The receive operation is badly documented in the datasheet, so I'll
896 * attempt to document it here. The driver provides a buffer area and
897 * places its base address in the RX buffer start address register.
898 * The chip then begins copying frames into the RX buffer. Each frame
899 * is preceded by a 32-bit RX status word which specifies the length
900 * of the frame and certain other status bits. Each frame (starting with
901 * the status word) is also 32-bit aligned. The frame length is in the
902 * first 16 bits of the status word; the lower 15 bits correspond with
903 * the 'rx status register' mentioned in the datasheet.
904 *
905 * Note: to make the Alpha happy, the frame payload needs to be aligned
906 * on a 32-bit boundary. To achieve this, we copy the data to mbuf
907 * shifted forward 2 bytes.
908 */
909 static void
910 rtk_rxeof(struct rtk_softc *sc)
911 {
912 struct mbuf *m;
913 struct ifnet *ifp;
914 uint8_t *rxbufpos, *dst;
915 u_int total_len, wrap;
916 uint32_t rxstat;
917 uint16_t cur_rx, new_rx;
918 uint16_t limit;
919 uint16_t rx_bytes, max_bytes;
920
921 ifp = &sc->ethercom.ec_if;
922
923 cur_rx = (CSR_READ_2(sc, RTK_CURRXADDR) + 16) % RTK_RXBUFLEN;
924
925 /* Do not try to read past this point. */
926 limit = CSR_READ_2(sc, RTK_CURRXBUF) % RTK_RXBUFLEN;
927
928 if (limit < cur_rx)
929 max_bytes = (RTK_RXBUFLEN - cur_rx) + limit;
930 else
931 max_bytes = limit - cur_rx;
932 rx_bytes = 0;
933
934 while ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_EMPTY_RXBUF) == 0) {
935 rxbufpos = sc->rtk_rx_buf + cur_rx;
936 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
937 RTK_RXSTAT_LEN, BUS_DMASYNC_POSTREAD);
938 rxstat = le32toh(*(uint32_t *)rxbufpos);
939 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
940 RTK_RXSTAT_LEN, BUS_DMASYNC_PREREAD);
941
942 /*
943 * Here's a totally undocumented fact for you. When the
944 * RealTek chip is in the process of copying a packet into
945 * RAM for you, the length will be 0xfff0. If you spot a
946 * packet header with this value, you need to stop. The
947 * datasheet makes absolutely no mention of this and
948 * RealTek should be shot for this.
949 */
950 total_len = rxstat >> 16;
951 if (total_len == RTK_RXSTAT_UNFINISHED)
952 break;
953
954 if ((rxstat & RTK_RXSTAT_RXOK) == 0 ||
955 total_len < ETHER_MIN_LEN ||
956 total_len > (MCLBYTES - RTK_ETHER_ALIGN)) {
957 if_statinc(ifp, if_ierrors);
958
959 /*
960 * submitted by:[netbsd-pcmcia:00484]
961 * Takahiro Kambe <taca@sky.yamashina.kyoto.jp>
962 * obtain from:
963 * FreeBSD if_rl.c rev 1.24->1.25
964 *
965 */
966 #if 0
967 if (rxstat & (RTK_RXSTAT_BADSYM | RTK_RXSTAT_RUNT |
968 RTK_RXSTAT_GIANT | RTK_RXSTAT_CRCERR |
969 RTK_RXSTAT_ALIGNERR)) {
970 CSR_WRITE_2(sc, RTK_COMMAND, RTK_CMD_TX_ENB);
971 CSR_WRITE_2(sc, RTK_COMMAND,
972 RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
973 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
974 CSR_WRITE_4(sc, RTK_RXADDR,
975 sc->recv_dmamap->dm_segs[0].ds_addr);
976 cur_rx = 0;
977 }
978 break;
979 #else
980 rtk_init(ifp);
981 return;
982 #endif
983 }
984
985 /* No errors; receive the packet. */
986 rx_bytes += total_len + RTK_RXSTAT_LEN;
987
988 /*
989 * Avoid trying to read more bytes than we know
990 * the chip has prepared for us.
991 */
992 if (rx_bytes > max_bytes)
993 break;
994
995 /*
996 * Skip the status word, wrapping around to the beginning
997 * of the Rx area, if necessary.
998 */
999 cur_rx = (cur_rx + RTK_RXSTAT_LEN) % RTK_RXBUFLEN;
1000 rxbufpos = sc->rtk_rx_buf + cur_rx;
1001
1002 /*
1003 * Compute the number of bytes at which the packet
1004 * will wrap to the beginning of the ring buffer.
1005 */
1006 wrap = RTK_RXBUFLEN - cur_rx;
1007
1008 /*
1009 * Compute where the next pending packet is.
1010 */
1011 if (total_len > wrap)
1012 new_rx = total_len - wrap;
1013 else
1014 new_rx = cur_rx + total_len;
1015 /* Round up to 32-bit boundary. */
1016 new_rx = roundup2(new_rx, sizeof(uint32_t)) % RTK_RXBUFLEN;
1017
1018 /*
1019 * The RealTek chip includes the CRC with every
1020 * incoming packet; trim it off here.
1021 */
1022 total_len -= ETHER_CRC_LEN;
1023
1024 /*
1025 * Now allocate an mbuf (and possibly a cluster) to hold
1026 * the packet. Note we offset the packet 2 bytes so that
1027 * data after the Ethernet header will be 4-byte aligned.
1028 */
1029 MGETHDR(m, M_DONTWAIT, MT_DATA);
1030 if (m == NULL) {
1031 printf("%s: unable to allocate Rx mbuf\n",
1032 device_xname(sc->sc_dev));
1033 if_statinc(ifp, if_ierrors);
1034 goto next_packet;
1035 }
1036 MCLAIM(m, &sc->ethercom.ec_rx_mowner);
1037 if (total_len > (MHLEN - RTK_ETHER_ALIGN)) {
1038 MCLGET(m, M_DONTWAIT);
1039 if ((m->m_flags & M_EXT) == 0) {
1040 printf("%s: unable to allocate Rx cluster\n",
1041 device_xname(sc->sc_dev));
1042 if_statinc(ifp, if_ierrors);
1043 m_freem(m);
1044 m = NULL;
1045 goto next_packet;
1046 }
1047 }
1048 m->m_data += RTK_ETHER_ALIGN; /* for alignment */
1049 m_set_rcvif(m, ifp);
1050 m->m_pkthdr.len = m->m_len = total_len;
1051 dst = mtod(m, void *);
1052
1053 /*
1054 * If the packet wraps, copy up to the wrapping point.
1055 */
1056 if (total_len > wrap) {
1057 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1058 cur_rx, wrap, BUS_DMASYNC_POSTREAD);
1059 memcpy(dst, rxbufpos, wrap);
1060 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1061 cur_rx, wrap, BUS_DMASYNC_PREREAD);
1062 cur_rx = 0;
1063 rxbufpos = sc->rtk_rx_buf;
1064 total_len -= wrap;
1065 dst += wrap;
1066 }
1067
1068 /*
1069 * ...and now the rest.
1070 */
1071 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1072 cur_rx, total_len, BUS_DMASYNC_POSTREAD);
1073 memcpy(dst, rxbufpos, total_len);
1074 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1075 cur_rx, total_len, BUS_DMASYNC_PREREAD);
1076
1077 next_packet:
1078 CSR_WRITE_2(sc, RTK_CURRXADDR, (new_rx - 16) % RTK_RXBUFLEN);
1079 cur_rx = new_rx;
1080
1081 if (m == NULL)
1082 continue;
1083
1084 /* pass it on. */
1085 if_percpuq_enqueue(ifp->if_percpuq, m);
1086 }
1087 }
1088
1089 /*
1090 * A frame was downloaded to the chip. It's safe for us to clean up
1091 * the list buffers.
1092 */
1093 static void
1094 rtk_txeof(struct rtk_softc *sc)
1095 {
1096 struct ifnet *ifp;
1097 struct rtk_tx_desc *txd;
1098 uint32_t txstat;
1099
1100 ifp = &sc->ethercom.ec_if;
1101
1102 /*
1103 * Go through our tx list and free mbufs for those
1104 * frames that have been uploaded.
1105 */
1106 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1107 txstat = CSR_READ_4(sc, txd->txd_txstat);
1108 if ((txstat & (RTK_TXSTAT_TX_OK |
1109 RTK_TXSTAT_TX_UNDERRUN | RTK_TXSTAT_TXABRT)) == 0)
1110 break;
1111
1112 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1113
1114 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0,
1115 txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1116 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1117 m_freem(txd->txd_mbuf);
1118 txd->txd_mbuf = NULL;
1119
1120 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1121 if_statadd_ref(nsr, if_collisions,
1122 (txstat & RTK_TXSTAT_COLLCNT) >> 24);
1123
1124 if (txstat & RTK_TXSTAT_TX_OK)
1125 if_statinc_ref(nsr, if_opackets);
1126 else {
1127 if_statinc_ref(nsr, if_oerrors);
1128
1129 /*
1130 * Increase Early TX threshold if underrun occurred.
1131 * Increase step 64 bytes.
1132 */
1133 if (txstat & RTK_TXSTAT_TX_UNDERRUN) {
1134 #ifdef DEBUG
1135 printf("%s: transmit underrun;",
1136 device_xname(sc->sc_dev));
1137 #endif
1138 if (sc->sc_txthresh < RTK_TXTH_MAX) {
1139 sc->sc_txthresh += 2;
1140 #ifdef DEBUG
1141 printf(" new threshold: %d bytes",
1142 sc->sc_txthresh * 32);
1143 #endif
1144 }
1145 #ifdef DEBUG
1146 printf("\n");
1147 #endif
1148 }
1149 if (txstat & (RTK_TXSTAT_TXABRT | RTK_TXSTAT_OUTOFWIN))
1150 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1151 }
1152 IF_STAT_PUTREF(ifp);
1153 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
1154 ifp->if_flags &= ~IFF_OACTIVE;
1155 }
1156
1157 /* Clear the timeout timer if there is no pending packet. */
1158 if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty))
1159 ifp->if_timer = 0;
1160
1161 }
1162
1163 int
1164 rtk_intr(void *arg)
1165 {
1166 struct rtk_softc *sc;
1167 struct ifnet *ifp;
1168 uint16_t status, rndstatus = 0;
1169 int handled;
1170
1171 sc = arg;
1172 ifp = &sc->ethercom.ec_if;
1173
1174 if (!device_has_power(sc->sc_dev))
1175 return 0;
1176
1177 /* Disable interrupts. */
1178 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1179
1180 handled = 0;
1181 for (;;) {
1182
1183 status = CSR_READ_2(sc, RTK_ISR);
1184
1185 if (status == 0xffff)
1186 break; /* Card is gone... */
1187
1188 if (status) {
1189 CSR_WRITE_2(sc, RTK_ISR, status);
1190 rndstatus = status;
1191 }
1192
1193 if ((status & RTK_INTRS) == 0)
1194 break;
1195
1196 handled = 1;
1197
1198 if (status & RTK_ISR_RX_OK)
1199 rtk_rxeof(sc);
1200
1201 if (status & RTK_ISR_RX_ERR)
1202 rtk_rxeof(sc);
1203
1204 if (status & (RTK_ISR_TX_OK | RTK_ISR_TX_ERR))
1205 rtk_txeof(sc);
1206
1207 if (status & RTK_ISR_SYSTEM_ERR) {
1208 rtk_reset(sc);
1209 rtk_init(ifp);
1210 }
1211 }
1212
1213 /* Re-enable interrupts. */
1214 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1215
1216 if_schedule_deferred_start(ifp);
1217
1218 rnd_add_uint32(&sc->rnd_source, rndstatus);
1219
1220 return handled;
1221 }
1222
1223 /*
1224 * Main transmit routine.
1225 */
1226
1227 static void
1228 rtk_start(struct ifnet *ifp)
1229 {
1230 struct rtk_softc *sc;
1231 struct rtk_tx_desc *txd;
1232 struct mbuf *m_head, *m_new;
1233 int error, len;
1234
1235 sc = ifp->if_softc;
1236
1237 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) {
1238 IFQ_POLL(&ifp->if_snd, m_head);
1239 if (m_head == NULL)
1240 break;
1241 m_new = NULL;
1242
1243 /*
1244 * Load the DMA map. If this fails, the packet didn't
1245 * fit in one DMA segment, and we need to copy. Note,
1246 * the packet must also be aligned.
1247 * if the packet is too small, copy it too, so we're sure
1248 * so have enough room for the pad buffer.
1249 */
1250 if ((mtod(m_head, uintptr_t) & 3) != 0 ||
1251 m_head->m_pkthdr.len < ETHER_PAD_LEN ||
1252 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap,
1253 m_head, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
1254 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1255 if (m_new == NULL) {
1256 printf("%s: unable to allocate Tx mbuf\n",
1257 device_xname(sc->sc_dev));
1258 break;
1259 }
1260 MCLAIM(m_new, &sc->ethercom.ec_rx_mowner);
1261 if (m_head->m_pkthdr.len > MHLEN) {
1262 MCLGET(m_new, M_DONTWAIT);
1263 if ((m_new->m_flags & M_EXT) == 0) {
1264 printf("%s: unable to allocate Tx "
1265 "cluster\n",
1266 device_xname(sc->sc_dev));
1267 m_freem(m_new);
1268 break;
1269 }
1270 }
1271 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1272 mtod(m_new, void *));
1273 m_new->m_pkthdr.len = m_new->m_len =
1274 m_head->m_pkthdr.len;
1275 if (m_head->m_pkthdr.len < ETHER_PAD_LEN) {
1276 memset(
1277 mtod(m_new, char *) + m_head->m_pkthdr.len,
1278 0, ETHER_PAD_LEN - m_head->m_pkthdr.len);
1279 m_new->m_pkthdr.len = m_new->m_len =
1280 ETHER_PAD_LEN;
1281 }
1282 error = bus_dmamap_load_mbuf(sc->sc_dmat,
1283 txd->txd_dmamap, m_new,
1284 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1285 if (error) {
1286 printf("%s: unable to load Tx buffer, "
1287 "error = %d\n",
1288 device_xname(sc->sc_dev), error);
1289 break;
1290 }
1291 }
1292 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1293 /*
1294 * If there's a BPF listener, bounce a copy of this frame
1295 * to him.
1296 */
1297 bpf_mtap(ifp, m_head, BPF_D_OUT);
1298 if (m_new != NULL) {
1299 m_freem(m_head);
1300 m_head = m_new;
1301 }
1302 txd->txd_mbuf = m_head;
1303
1304 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
1305 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_dirty, txd, txd_q);
1306
1307 /*
1308 * Transmit the frame.
1309 */
1310 bus_dmamap_sync(sc->sc_dmat,
1311 txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize,
1312 BUS_DMASYNC_PREWRITE);
1313
1314 len = txd->txd_dmamap->dm_segs[0].ds_len;
1315
1316 CSR_WRITE_4(sc, txd->txd_txaddr,
1317 txd->txd_dmamap->dm_segs[0].ds_addr);
1318 CSR_WRITE_4(sc, txd->txd_txstat,
1319 RTK_TXSTAT_THRESH(sc->sc_txthresh) | len);
1320
1321 /*
1322 * Set a timeout in case the chip goes out to lunch.
1323 */
1324 ifp->if_timer = 5;
1325 }
1326
1327 /*
1328 * We broke out of the loop because all our TX slots are
1329 * full. Mark the NIC as busy until it drains some of the
1330 * packets from the queue.
1331 */
1332 if (SIMPLEQ_EMPTY(&sc->rtk_tx_free))
1333 ifp->if_flags |= IFF_OACTIVE;
1334 }
1335
1336 static int
1337 rtk_init(struct ifnet *ifp)
1338 {
1339 struct rtk_softc *sc = ifp->if_softc;
1340 int error, i;
1341 uint32_t rxcfg;
1342
1343 if ((error = rtk_enable(sc)) != 0)
1344 goto out;
1345
1346 /*
1347 * Cancel pending I/O.
1348 */
1349 rtk_stop(ifp, 0);
1350
1351 /* Init our MAC address */
1352 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1353 CSR_WRITE_1(sc, RTK_IDR0 + i, CLLADDR(ifp->if_sadl)[i]);
1354 }
1355
1356 /* Init the RX buffer pointer register. */
1357 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 0,
1358 sc->recv_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1359 CSR_WRITE_4(sc, RTK_RXADDR, sc->recv_dmamap->dm_segs[0].ds_addr);
1360
1361 /* Init TX descriptors. */
1362 rtk_list_tx_init(sc);
1363
1364 /* Init Early TX threshold. */
1365 sc->sc_txthresh = RTK_TXTH_256;
1366 /*
1367 * Enable transmit and receive.
1368 */
1369 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1370
1371 /*
1372 * Set the initial TX and RX configuration.
1373 */
1374 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1375 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
1376
1377 /* Set the individual bit to receive frames for this host only. */
1378 rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1379 rxcfg |= RTK_RXCFG_RX_INDIV;
1380
1381 /* If we want promiscuous mode, set the allframes bit. */
1382 if (ifp->if_flags & IFF_PROMISC) {
1383 rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1384 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1385 } else {
1386 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1387 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1388 }
1389
1390 /*
1391 * Set capture broadcast bit to capture broadcast frames.
1392 */
1393 if (ifp->if_flags & IFF_BROADCAST) {
1394 rxcfg |= RTK_RXCFG_RX_BROAD;
1395 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1396 } else {
1397 rxcfg &= ~RTK_RXCFG_RX_BROAD;
1398 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1399 }
1400
1401 /*
1402 * Program the multicast filter, if necessary.
1403 */
1404 rtk_setmulti(sc);
1405
1406 /*
1407 * Enable interrupts.
1408 */
1409 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1410
1411 /* Start RX/TX process. */
1412 CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1413
1414 /* Enable receiver and transmitter. */
1415 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1416
1417 CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD | RTK_CFG1_FULLDUPLEX);
1418
1419 /*
1420 * Set current media.
1421 */
1422 if ((error = ether_mediachange(ifp)) != 0)
1423 goto out;
1424
1425 ifp->if_flags |= IFF_RUNNING;
1426 ifp->if_flags &= ~IFF_OACTIVE;
1427
1428 callout_schedule(&sc->rtk_tick_ch, hz);
1429
1430 out:
1431 if (error) {
1432 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1433 ifp->if_timer = 0;
1434 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1435 }
1436 return error;
1437 }
1438
1439 static int
1440 rtk_ioctl(struct ifnet *ifp, u_long command, void *data)
1441 {
1442 struct rtk_softc *sc = ifp->if_softc;
1443 int s, error;
1444
1445 s = splnet();
1446 error = ether_ioctl(ifp, command, data);
1447 if (error == ENETRESET) {
1448 if (ifp->if_flags & IFF_RUNNING) {
1449 /*
1450 * Multicast list has changed. Set the
1451 * hardware filter accordingly.
1452 */
1453 rtk_setmulti(sc);
1454 }
1455 error = 0;
1456 }
1457 splx(s);
1458
1459 return error;
1460 }
1461
1462 static void
1463 rtk_watchdog(struct ifnet *ifp)
1464 {
1465 struct rtk_softc *sc;
1466
1467 sc = ifp->if_softc;
1468
1469 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1470 if_statinc(ifp, if_oerrors);
1471 rtk_txeof(sc);
1472 rtk_rxeof(sc);
1473 rtk_init(ifp);
1474 }
1475
1476 /*
1477 * Stop the adapter and free any mbufs allocated to the
1478 * RX and TX lists.
1479 */
1480 static void
1481 rtk_stop(struct ifnet *ifp, int disable)
1482 {
1483 struct rtk_softc *sc = ifp->if_softc;
1484 struct rtk_tx_desc *txd;
1485
1486 callout_stop(&sc->rtk_tick_ch);
1487
1488 mii_down(&sc->mii);
1489
1490 CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
1491 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1492
1493 /*
1494 * Free the TX list buffers.
1495 */
1496 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1497 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1498 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1499 m_freem(txd->txd_mbuf);
1500 txd->txd_mbuf = NULL;
1501 CSR_WRITE_4(sc, txd->txd_txaddr, 0);
1502 }
1503
1504 if (disable)
1505 rtk_disable(sc);
1506
1507 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1508 ifp->if_timer = 0;
1509 }
1510
1511 static void
1512 rtk_tick(void *arg)
1513 {
1514 struct rtk_softc *sc = arg;
1515 int s;
1516
1517 s = splnet();
1518 mii_tick(&sc->mii);
1519 splx(s);
1520
1521 callout_schedule(&sc->rtk_tick_ch, hz);
1522 }
Cache object: 8a70cc01e9eeec16e6386e9e9a6d27b5
|