FreeBSD/Linux Kernel Cross Reference
sys/dev/re/if_re.c
1 /*-
2 * Copyright (c) 1997, 1998-2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/8.4/sys/dev/re/if_re.c 242909 2012-11-12 07:47:19Z dim $");
35
36 /*
37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
38 *
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
41 * Wind River Systems
42 */
43
44 /*
45 * This driver is designed to support RealTek's next generation of
46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
49 *
50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
51 * with the older 8139 family, however it also supports a special
52 * C+ mode of operation that provides several new performance enhancing
53 * features. These include:
54 *
55 * o Descriptor based DMA mechanism. Each descriptor represents
56 * a single packet fragment. Data buffers may be aligned on
57 * any byte boundary.
58 *
59 * o 64-bit DMA
60 *
61 * o TCP/IP checksum offload for both RX and TX
62 *
63 * o High and normal priority transmit DMA rings
64 *
65 * o VLAN tag insertion and extraction
66 *
67 * o TCP large send (segmentation offload)
68 *
69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
70 * programming API is fairly straightforward. The RX filtering, EEPROM
71 * access and PHY access is the same as it is on the older 8139 series
72 * chips.
73 *
74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
75 * same programming API and feature set as the 8139C+ with the following
76 * differences and additions:
77 *
78 * o 1000Mbps mode
79 *
80 * o Jumbo frames
81 *
82 * o GMII and TBI ports/registers for interfacing with copper
83 * or fiber PHYs
84 *
85 * o RX and TX DMA rings can have up to 1024 descriptors
86 * (the 8139C+ allows a maximum of 64)
87 *
88 * o Slight differences in register layout from the 8139C+
89 *
90 * The TX start and timer interrupt registers are at different locations
91 * on the 8169 than they are on the 8139C+. Also, the status word in the
92 * RX descriptor has a slightly different bit layout. The 8169 does not
93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
94 * copper gigE PHY.
95 *
96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
97 * (the 'S' stands for 'single-chip'). These devices have the same
98 * programming API as the older 8169, but also have some vendor-specific
99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
101 *
102 * This driver takes advantage of the RX and TX checksum offload and
103 * VLAN tag insertion/extraction features. It also implements TX
104 * interrupt moderation using the timer interrupt registers, which
105 * significantly reduces TX interrupt load. There is also support
106 * for jumbo frames, however the 8169/8169S/8110S can not transmit
107 * jumbo frames larger than 7440, so the max MTU possible with this
108 * driver is 7422 bytes.
109 */
110
111 #ifdef HAVE_KERNEL_OPTION_HEADERS
112 #include "opt_device_polling.h"
113 #endif
114
115 #include <sys/param.h>
116 #include <sys/endian.h>
117 #include <sys/systm.h>
118 #include <sys/sockio.h>
119 #include <sys/mbuf.h>
120 #include <sys/malloc.h>
121 #include <sys/module.h>
122 #include <sys/kernel.h>
123 #include <sys/socket.h>
124 #include <sys/lock.h>
125 #include <sys/mutex.h>
126 #include <sys/sysctl.h>
127 #include <sys/taskqueue.h>
128
129 #include <net/if.h>
130 #include <net/if_arp.h>
131 #include <net/ethernet.h>
132 #include <net/if_dl.h>
133 #include <net/if_media.h>
134 #include <net/if_types.h>
135 #include <net/if_vlan_var.h>
136
137 #include <net/bpf.h>
138
139 #include <machine/bus.h>
140 #include <machine/resource.h>
141 #include <sys/bus.h>
142 #include <sys/rman.h>
143
144 #include <dev/mii/mii.h>
145 #include <dev/mii/miivar.h>
146
147 #include <dev/pci/pcireg.h>
148 #include <dev/pci/pcivar.h>
149
150 #include <pci/if_rlreg.h>
151
152 MODULE_DEPEND(re, pci, 1, 1, 1);
153 MODULE_DEPEND(re, ether, 1, 1, 1);
154 MODULE_DEPEND(re, miibus, 1, 1, 1);
155
156 /* "device miibus" required. See GENERIC if you get errors here. */
157 #include "miibus_if.h"
158
159 /* Tunables. */
160 static int intr_filter = 0;
161 TUNABLE_INT("hw.re.intr_filter", &intr_filter);
162 static int msi_disable = 0;
163 TUNABLE_INT("hw.re.msi_disable", &msi_disable);
164 static int msix_disable = 0;
165 TUNABLE_INT("hw.re.msix_disable", &msix_disable);
166 static int prefer_iomap = 0;
167 TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
168
169 #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
170
171 /*
172 * Various supported device vendors/types and their names.
173 */
174 static const struct rl_type re_devs[] = {
175 { DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
176 "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
177 { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0,
178 "D-Link DGE-530(T) Gigabit Ethernet Adapter" },
179 { RT_VENDORID, RT_DEVICEID_8139, 0,
180 "RealTek 8139C+ 10/100BaseTX" },
181 { RT_VENDORID, RT_DEVICEID_8101E, 0,
182 "RealTek 810xE PCIe 10/100baseTX" },
183 { RT_VENDORID, RT_DEVICEID_8168, 0,
184 "RealTek 8168/8111 B/C/CP/D/DP/E/F PCIe Gigabit Ethernet" },
185 { RT_VENDORID, RT_DEVICEID_8169, 0,
186 "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
187 { RT_VENDORID, RT_DEVICEID_8169SC, 0,
188 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
189 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
190 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
191 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
192 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
193 { USR_VENDORID, USR_DEVICEID_997902, 0,
194 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
195 };
196
197 static const struct rl_hwrev re_hwrevs[] = {
198 { RL_HWREV_8139, RL_8139, "", RL_MTU },
199 { RL_HWREV_8139A, RL_8139, "A", RL_MTU },
200 { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
201 { RL_HWREV_8139B, RL_8139, "B", RL_MTU },
202 { RL_HWREV_8130, RL_8139, "8130", RL_MTU },
203 { RL_HWREV_8139C, RL_8139, "C", RL_MTU },
204 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU },
205 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU },
206 { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU },
207 { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU },
208 { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU },
209 { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU },
210 { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU },
211 { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
212 { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU },
213 { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
214 { RL_HWREV_8100, RL_8139, "8100", RL_MTU },
215 { RL_HWREV_8101, RL_8139, "8101", RL_MTU },
216 { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU },
217 { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU },
218 { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU },
219 { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU },
220 { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
221 { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
222 { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU },
223 { RL_HWREV_8402, RL_8169, "8402", RL_MTU },
224 { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
225 { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU },
226 { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
227 { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
228 { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
229 { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
230 { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K },
231 { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K },
232 { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
233 { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
234 { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
235 { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K},
236 { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K},
237 { 0, 0, NULL, 0 }
238 };
239
240 static int re_probe (device_t);
241 static int re_attach (device_t);
242 static int re_detach (device_t);
243
244 static int re_encap (struct rl_softc *, struct mbuf **);
245
246 static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int);
247 static int re_allocmem (device_t, struct rl_softc *);
248 static __inline void re_discard_rxbuf
249 (struct rl_softc *, int);
250 static int re_newbuf (struct rl_softc *, int);
251 static int re_jumbo_newbuf (struct rl_softc *, int);
252 static int re_rx_list_init (struct rl_softc *);
253 static int re_jrx_list_init (struct rl_softc *);
254 static int re_tx_list_init (struct rl_softc *);
255 #ifdef RE_FIXUP_RX
256 static __inline void re_fixup_rx
257 (struct mbuf *);
258 #endif
259 static int re_rxeof (struct rl_softc *, int *);
260 static void re_txeof (struct rl_softc *);
261 #ifdef DEVICE_POLLING
262 static int re_poll (struct ifnet *, enum poll_cmd, int);
263 static int re_poll_locked (struct ifnet *, enum poll_cmd, int);
264 #endif
265 static int re_intr (void *);
266 static void re_intr_msi (void *);
267 static void re_tick (void *);
268 static void re_int_task (void *, int);
269 static void re_start (struct ifnet *);
270 static void re_start_locked (struct ifnet *);
271 static int re_ioctl (struct ifnet *, u_long, caddr_t);
272 static void re_init (void *);
273 static void re_init_locked (struct rl_softc *);
274 static void re_stop (struct rl_softc *);
275 static void re_watchdog (struct rl_softc *);
276 static int re_suspend (device_t);
277 static int re_resume (device_t);
278 static int re_shutdown (device_t);
279 static int re_ifmedia_upd (struct ifnet *);
280 static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *);
281
282 static void re_eeprom_putbyte (struct rl_softc *, int);
283 static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *);
284 static void re_read_eeprom (struct rl_softc *, caddr_t, int, int);
285 static int re_gmii_readreg (device_t, int, int);
286 static int re_gmii_writereg (device_t, int, int, int);
287
288 static int re_miibus_readreg (device_t, int, int);
289 static int re_miibus_writereg (device_t, int, int, int);
290 static void re_miibus_statchg (device_t);
291
292 static void re_set_jumbo (struct rl_softc *, int);
293 static void re_set_rxmode (struct rl_softc *);
294 static void re_reset (struct rl_softc *);
295 static void re_setwol (struct rl_softc *);
296 static void re_clrwol (struct rl_softc *);
297 static void re_set_linkspeed (struct rl_softc *);
298
299 #ifdef RE_DIAG
300 static int re_diag (struct rl_softc *);
301 #endif
302
303 static void re_add_sysctls (struct rl_softc *);
304 static int re_sysctl_stats (SYSCTL_HANDLER_ARGS);
305 static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int);
306 static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS);
307
308 static device_method_t re_methods[] = {
309 /* Device interface */
310 DEVMETHOD(device_probe, re_probe),
311 DEVMETHOD(device_attach, re_attach),
312 DEVMETHOD(device_detach, re_detach),
313 DEVMETHOD(device_suspend, re_suspend),
314 DEVMETHOD(device_resume, re_resume),
315 DEVMETHOD(device_shutdown, re_shutdown),
316
317 /* MII interface */
318 DEVMETHOD(miibus_readreg, re_miibus_readreg),
319 DEVMETHOD(miibus_writereg, re_miibus_writereg),
320 DEVMETHOD(miibus_statchg, re_miibus_statchg),
321
322 DEVMETHOD_END
323 };
324
325 static driver_t re_driver = {
326 "re",
327 re_methods,
328 sizeof(struct rl_softc)
329 };
330
331 static devclass_t re_devclass;
332
333 DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
334 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
335
336 #define EE_SET(x) \
337 CSR_WRITE_1(sc, RL_EECMD, \
338 CSR_READ_1(sc, RL_EECMD) | x)
339
340 #define EE_CLR(x) \
341 CSR_WRITE_1(sc, RL_EECMD, \
342 CSR_READ_1(sc, RL_EECMD) & ~x)
343
344 /*
345 * Send a read command and address to the EEPROM, check for ACK.
346 */
347 static void
348 re_eeprom_putbyte(struct rl_softc *sc, int addr)
349 {
350 int d, i;
351
352 d = addr | (RL_9346_READ << sc->rl_eewidth);
353
354 /*
355 * Feed in each bit and strobe the clock.
356 */
357
358 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
359 if (d & i) {
360 EE_SET(RL_EE_DATAIN);
361 } else {
362 EE_CLR(RL_EE_DATAIN);
363 }
364 DELAY(100);
365 EE_SET(RL_EE_CLK);
366 DELAY(150);
367 EE_CLR(RL_EE_CLK);
368 DELAY(100);
369 }
370 }
371
372 /*
373 * Read a word of data stored in the EEPROM at address 'addr.'
374 */
375 static void
376 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
377 {
378 int i;
379 u_int16_t word = 0;
380
381 /*
382 * Send address of word we want to read.
383 */
384 re_eeprom_putbyte(sc, addr);
385
386 /*
387 * Start reading bits from EEPROM.
388 */
389 for (i = 0x8000; i; i >>= 1) {
390 EE_SET(RL_EE_CLK);
391 DELAY(100);
392 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
393 word |= i;
394 EE_CLR(RL_EE_CLK);
395 DELAY(100);
396 }
397
398 *dest = word;
399 }
400
401 /*
402 * Read a sequence of words from the EEPROM.
403 */
404 static void
405 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
406 {
407 int i;
408 u_int16_t word = 0, *ptr;
409
410 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
411
412 DELAY(100);
413
414 for (i = 0; i < cnt; i++) {
415 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
416 re_eeprom_getword(sc, off + i, &word);
417 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
418 ptr = (u_int16_t *)(dest + (i * 2));
419 *ptr = word;
420 }
421
422 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
423 }
424
425 static int
426 re_gmii_readreg(device_t dev, int phy, int reg)
427 {
428 struct rl_softc *sc;
429 u_int32_t rval;
430 int i;
431
432 sc = device_get_softc(dev);
433
434 /* Let the rgephy driver read the GMEDIASTAT register */
435
436 if (reg == RL_GMEDIASTAT) {
437 rval = CSR_READ_1(sc, RL_GMEDIASTAT);
438 return (rval);
439 }
440
441 CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
442
443 for (i = 0; i < RL_PHY_TIMEOUT; i++) {
444 rval = CSR_READ_4(sc, RL_PHYAR);
445 if (rval & RL_PHYAR_BUSY)
446 break;
447 DELAY(25);
448 }
449
450 if (i == RL_PHY_TIMEOUT) {
451 device_printf(sc->rl_dev, "PHY read failed\n");
452 return (0);
453 }
454
455 /*
456 * Controller requires a 20us delay to process next MDIO request.
457 */
458 DELAY(20);
459
460 return (rval & RL_PHYAR_PHYDATA);
461 }
462
463 static int
464 re_gmii_writereg(device_t dev, int phy, int reg, int data)
465 {
466 struct rl_softc *sc;
467 u_int32_t rval;
468 int i;
469
470 sc = device_get_softc(dev);
471
472 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
473 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
474
475 for (i = 0; i < RL_PHY_TIMEOUT; i++) {
476 rval = CSR_READ_4(sc, RL_PHYAR);
477 if (!(rval & RL_PHYAR_BUSY))
478 break;
479 DELAY(25);
480 }
481
482 if (i == RL_PHY_TIMEOUT) {
483 device_printf(sc->rl_dev, "PHY write failed\n");
484 return (0);
485 }
486
487 /*
488 * Controller requires a 20us delay to process next MDIO request.
489 */
490 DELAY(20);
491
492 return (0);
493 }
494
495 static int
496 re_miibus_readreg(device_t dev, int phy, int reg)
497 {
498 struct rl_softc *sc;
499 u_int16_t rval = 0;
500 u_int16_t re8139_reg = 0;
501
502 sc = device_get_softc(dev);
503
504 if (sc->rl_type == RL_8169) {
505 rval = re_gmii_readreg(dev, phy, reg);
506 return (rval);
507 }
508
509 switch (reg) {
510 case MII_BMCR:
511 re8139_reg = RL_BMCR;
512 break;
513 case MII_BMSR:
514 re8139_reg = RL_BMSR;
515 break;
516 case MII_ANAR:
517 re8139_reg = RL_ANAR;
518 break;
519 case MII_ANER:
520 re8139_reg = RL_ANER;
521 break;
522 case MII_ANLPAR:
523 re8139_reg = RL_LPAR;
524 break;
525 case MII_PHYIDR1:
526 case MII_PHYIDR2:
527 return (0);
528 /*
529 * Allow the rlphy driver to read the media status
530 * register. If we have a link partner which does not
531 * support NWAY, this is the register which will tell
532 * us the results of parallel detection.
533 */
534 case RL_MEDIASTAT:
535 rval = CSR_READ_1(sc, RL_MEDIASTAT);
536 return (rval);
537 default:
538 device_printf(sc->rl_dev, "bad phy register\n");
539 return (0);
540 }
541 rval = CSR_READ_2(sc, re8139_reg);
542 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
543 /* 8139C+ has different bit layout. */
544 rval &= ~(BMCR_LOOP | BMCR_ISO);
545 }
546 return (rval);
547 }
548
549 static int
550 re_miibus_writereg(device_t dev, int phy, int reg, int data)
551 {
552 struct rl_softc *sc;
553 u_int16_t re8139_reg = 0;
554 int rval = 0;
555
556 sc = device_get_softc(dev);
557
558 if (sc->rl_type == RL_8169) {
559 rval = re_gmii_writereg(dev, phy, reg, data);
560 return (rval);
561 }
562
563 switch (reg) {
564 case MII_BMCR:
565 re8139_reg = RL_BMCR;
566 if (sc->rl_type == RL_8139CPLUS) {
567 /* 8139C+ has different bit layout. */
568 data &= ~(BMCR_LOOP | BMCR_ISO);
569 }
570 break;
571 case MII_BMSR:
572 re8139_reg = RL_BMSR;
573 break;
574 case MII_ANAR:
575 re8139_reg = RL_ANAR;
576 break;
577 case MII_ANER:
578 re8139_reg = RL_ANER;
579 break;
580 case MII_ANLPAR:
581 re8139_reg = RL_LPAR;
582 break;
583 case MII_PHYIDR1:
584 case MII_PHYIDR2:
585 return (0);
586 break;
587 default:
588 device_printf(sc->rl_dev, "bad phy register\n");
589 return (0);
590 }
591 CSR_WRITE_2(sc, re8139_reg, data);
592 return (0);
593 }
594
595 static void
596 re_miibus_statchg(device_t dev)
597 {
598 struct rl_softc *sc;
599 struct ifnet *ifp;
600 struct mii_data *mii;
601
602 sc = device_get_softc(dev);
603 mii = device_get_softc(sc->rl_miibus);
604 ifp = sc->rl_ifp;
605 if (mii == NULL || ifp == NULL ||
606 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
607 return;
608
609 sc->rl_flags &= ~RL_FLAG_LINK;
610 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
611 (IFM_ACTIVE | IFM_AVALID)) {
612 switch (IFM_SUBTYPE(mii->mii_media_active)) {
613 case IFM_10_T:
614 case IFM_100_TX:
615 sc->rl_flags |= RL_FLAG_LINK;
616 break;
617 case IFM_1000_T:
618 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
619 break;
620 sc->rl_flags |= RL_FLAG_LINK;
621 break;
622 default:
623 break;
624 }
625 }
626 /*
627 * RealTek controllers does not provide any interface to
628 * Tx/Rx MACs for resolved speed, duplex and flow-control
629 * parameters.
630 */
631 }
632
633 /*
634 * Set the RX configuration and 64-bit multicast hash filter.
635 */
636 static void
637 re_set_rxmode(struct rl_softc *sc)
638 {
639 struct ifnet *ifp;
640 struct ifmultiaddr *ifma;
641 uint32_t hashes[2] = { 0, 0 };
642 uint32_t h, rxfilt;
643
644 RL_LOCK_ASSERT(sc);
645
646 ifp = sc->rl_ifp;
647
648 rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
649
650 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
651 if (ifp->if_flags & IFF_PROMISC)
652 rxfilt |= RL_RXCFG_RX_ALLPHYS;
653 /*
654 * Unlike other hardwares, we have to explicitly set
655 * RL_RXCFG_RX_MULTI to receive multicast frames in
656 * promiscuous mode.
657 */
658 rxfilt |= RL_RXCFG_RX_MULTI;
659 hashes[0] = hashes[1] = 0xffffffff;
660 goto done;
661 }
662
663 if_maddr_rlock(ifp);
664 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
665 if (ifma->ifma_addr->sa_family != AF_LINK)
666 continue;
667 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
668 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
669 if (h < 32)
670 hashes[0] |= (1 << h);
671 else
672 hashes[1] |= (1 << (h - 32));
673 }
674 if_maddr_runlock(ifp);
675
676 if (hashes[0] != 0 || hashes[1] != 0) {
677 /*
678 * For some unfathomable reason, RealTek decided to
679 * reverse the order of the multicast hash registers
680 * in the PCI Express parts. This means we have to
681 * write the hash pattern in reverse order for those
682 * devices.
683 */
684 if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
685 h = bswap32(hashes[0]);
686 hashes[0] = bswap32(hashes[1]);
687 hashes[1] = h;
688 }
689 rxfilt |= RL_RXCFG_RX_MULTI;
690 }
691
692 done:
693 CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
694 CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
695 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
696 }
697
698 static void
699 re_reset(struct rl_softc *sc)
700 {
701 int i;
702
703 RL_LOCK_ASSERT(sc);
704
705 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
706
707 for (i = 0; i < RL_TIMEOUT; i++) {
708 DELAY(10);
709 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
710 break;
711 }
712 if (i == RL_TIMEOUT)
713 device_printf(sc->rl_dev, "reset never completed!\n");
714
715 if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
716 CSR_WRITE_1(sc, 0x82, 1);
717 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S)
718 re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
719 }
720
721 #ifdef RE_DIAG
722
723 /*
724 * The following routine is designed to test for a defect on some
725 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
726 * lines connected to the bus, however for a 32-bit only card, they
727 * should be pulled high. The result of this defect is that the
728 * NIC will not work right if you plug it into a 64-bit slot: DMA
729 * operations will be done with 64-bit transfers, which will fail
730 * because the 64-bit data lines aren't connected.
731 *
732 * There's no way to work around this (short of talking a soldering
733 * iron to the board), however we can detect it. The method we use
734 * here is to put the NIC into digital loopback mode, set the receiver
735 * to promiscuous mode, and then try to send a frame. We then compare
736 * the frame data we sent to what was received. If the data matches,
737 * then the NIC is working correctly, otherwise we know the user has
738 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
739 * slot. In the latter case, there's no way the NIC can work correctly,
740 * so we print out a message on the console and abort the device attach.
741 */
742
743 static int
744 re_diag(struct rl_softc *sc)
745 {
746 struct ifnet *ifp = sc->rl_ifp;
747 struct mbuf *m0;
748 struct ether_header *eh;
749 struct rl_desc *cur_rx;
750 u_int16_t status;
751 u_int32_t rxstat;
752 int total_len, i, error = 0, phyaddr;
753 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
754 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
755
756 /* Allocate a single mbuf */
757 MGETHDR(m0, M_DONTWAIT, MT_DATA);
758 if (m0 == NULL)
759 return (ENOBUFS);
760
761 RL_LOCK(sc);
762
763 /*
764 * Initialize the NIC in test mode. This sets the chip up
765 * so that it can send and receive frames, but performs the
766 * following special functions:
767 * - Puts receiver in promiscuous mode
768 * - Enables digital loopback mode
769 * - Leaves interrupts turned off
770 */
771
772 ifp->if_flags |= IFF_PROMISC;
773 sc->rl_testmode = 1;
774 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
775 re_init_locked(sc);
776 sc->rl_flags |= RL_FLAG_LINK;
777 if (sc->rl_type == RL_8169)
778 phyaddr = 1;
779 else
780 phyaddr = 0;
781
782 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
783 for (i = 0; i < RL_TIMEOUT; i++) {
784 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
785 if (!(status & BMCR_RESET))
786 break;
787 }
788
789 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
790 CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
791
792 DELAY(100000);
793
794 /* Put some data in the mbuf */
795
796 eh = mtod(m0, struct ether_header *);
797 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
798 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
799 eh->ether_type = htons(ETHERTYPE_IP);
800 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
801
802 /*
803 * Queue the packet, start transmission.
804 * Note: IF_HANDOFF() ultimately calls re_start() for us.
805 */
806
807 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
808 RL_UNLOCK(sc);
809 /* XXX: re_diag must not be called when in ALTQ mode */
810 IF_HANDOFF(&ifp->if_snd, m0, ifp);
811 RL_LOCK(sc);
812 m0 = NULL;
813
814 /* Wait for it to propagate through the chip */
815
816 DELAY(100000);
817 for (i = 0; i < RL_TIMEOUT; i++) {
818 status = CSR_READ_2(sc, RL_ISR);
819 CSR_WRITE_2(sc, RL_ISR, status);
820 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
821 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
822 break;
823 DELAY(10);
824 }
825
826 if (i == RL_TIMEOUT) {
827 device_printf(sc->rl_dev,
828 "diagnostic failed, failed to receive packet in"
829 " loopback mode\n");
830 error = EIO;
831 goto done;
832 }
833
834 /*
835 * The packet should have been dumped into the first
836 * entry in the RX DMA ring. Grab it from there.
837 */
838
839 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
840 sc->rl_ldata.rl_rx_list_map,
841 BUS_DMASYNC_POSTREAD);
842 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
843 sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
844 BUS_DMASYNC_POSTREAD);
845 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
846 sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
847
848 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
849 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
850 eh = mtod(m0, struct ether_header *);
851
852 cur_rx = &sc->rl_ldata.rl_rx_list[0];
853 total_len = RL_RXBYTES(cur_rx);
854 rxstat = le32toh(cur_rx->rl_cmdstat);
855
856 if (total_len != ETHER_MIN_LEN) {
857 device_printf(sc->rl_dev,
858 "diagnostic failed, received short packet\n");
859 error = EIO;
860 goto done;
861 }
862
863 /* Test that the received packet data matches what we sent. */
864
865 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
866 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
867 ntohs(eh->ether_type) != ETHERTYPE_IP) {
868 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
869 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
870 dst, ":", src, ":", ETHERTYPE_IP);
871 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
872 eh->ether_dhost, ":", eh->ether_shost, ":",
873 ntohs(eh->ether_type));
874 device_printf(sc->rl_dev, "You may have a defective 32-bit "
875 "NIC plugged into a 64-bit PCI slot.\n");
876 device_printf(sc->rl_dev, "Please re-install the NIC in a "
877 "32-bit slot for proper operation.\n");
878 device_printf(sc->rl_dev, "Read the re(4) man page for more "
879 "details.\n");
880 error = EIO;
881 }
882
883 done:
884 /* Turn interface off, release resources */
885
886 sc->rl_testmode = 0;
887 sc->rl_flags &= ~RL_FLAG_LINK;
888 ifp->if_flags &= ~IFF_PROMISC;
889 re_stop(sc);
890 if (m0 != NULL)
891 m_freem(m0);
892
893 RL_UNLOCK(sc);
894
895 return (error);
896 }
897
898 #endif
899
900 /*
901 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
902 * IDs against our list and return a device name if we find a match.
903 */
904 static int
905 re_probe(device_t dev)
906 {
907 const struct rl_type *t;
908 uint16_t devid, vendor;
909 uint16_t revid, sdevid;
910 int i;
911
912 vendor = pci_get_vendor(dev);
913 devid = pci_get_device(dev);
914 revid = pci_get_revid(dev);
915 sdevid = pci_get_subdevice(dev);
916
917 if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
918 if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
919 /*
920 * Only attach to rev. 3 of the Linksys EG1032 adapter.
921 * Rev. 2 is supported by sk(4).
922 */
923 return (ENXIO);
924 }
925 }
926
927 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
928 if (revid != 0x20) {
929 /* 8139, let rl(4) take care of this device. */
930 return (ENXIO);
931 }
932 }
933
934 t = re_devs;
935 for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) {
936 if (vendor == t->rl_vid && devid == t->rl_did) {
937 device_set_desc(dev, t->rl_name);
938 return (BUS_PROBE_DEFAULT);
939 }
940 }
941
942 return (ENXIO);
943 }
944
945 /*
946 * Map a single buffer address.
947 */
948
949 static void
950 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
951 {
952 bus_addr_t *addr;
953
954 if (error)
955 return;
956
957 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
958 addr = arg;
959 *addr = segs->ds_addr;
960 }
961
962 static int
963 re_allocmem(device_t dev, struct rl_softc *sc)
964 {
965 bus_addr_t lowaddr;
966 bus_size_t rx_list_size, tx_list_size;
967 int error;
968 int i;
969
970 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
971 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
972
973 /*
974 * Allocate the parent bus DMA tag appropriate for PCI.
975 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
976 * register should be set. However some RealTek chips are known
977 * to be buggy on DAC handling, therefore disable DAC by limiting
978 * DMA address space to 32bit. PCIe variants of RealTek chips
979 * may not have the limitation.
980 */
981 lowaddr = BUS_SPACE_MAXADDR;
982 if ((sc->rl_flags & RL_FLAG_PCIE) == 0)
983 lowaddr = BUS_SPACE_MAXADDR_32BIT;
984 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
985 lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
986 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
987 NULL, NULL, &sc->rl_parent_tag);
988 if (error) {
989 device_printf(dev, "could not allocate parent DMA tag\n");
990 return (error);
991 }
992
993 /*
994 * Allocate map for TX mbufs.
995 */
996 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
997 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
998 NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
999 NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
1000 if (error) {
1001 device_printf(dev, "could not allocate TX DMA tag\n");
1002 return (error);
1003 }
1004
1005 /*
1006 * Allocate map for RX mbufs.
1007 */
1008
1009 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1010 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t),
1011 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1012 MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL,
1013 &sc->rl_ldata.rl_jrx_mtag);
1014 if (error) {
1015 device_printf(dev,
1016 "could not allocate jumbo RX DMA tag\n");
1017 return (error);
1018 }
1019 }
1020 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
1021 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1022 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
1023 if (error) {
1024 device_printf(dev, "could not allocate RX DMA tag\n");
1025 return (error);
1026 }
1027
1028 /*
1029 * Allocate map for TX descriptor list.
1030 */
1031 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1032 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1033 NULL, tx_list_size, 1, tx_list_size, 0,
1034 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1035 if (error) {
1036 device_printf(dev, "could not allocate TX DMA ring tag\n");
1037 return (error);
1038 }
1039
1040 /* Allocate DMA'able memory for the TX ring */
1041
1042 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1043 (void **)&sc->rl_ldata.rl_tx_list,
1044 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1045 &sc->rl_ldata.rl_tx_list_map);
1046 if (error) {
1047 device_printf(dev, "could not allocate TX DMA ring\n");
1048 return (error);
1049 }
1050
1051 /* Load the map for the TX ring. */
1052
1053 sc->rl_ldata.rl_tx_list_addr = 0;
1054 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1055 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1056 tx_list_size, re_dma_map_addr,
1057 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1058 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
1059 device_printf(dev, "could not load TX DMA ring\n");
1060 return (ENOMEM);
1061 }
1062
1063 /* Create DMA maps for TX buffers */
1064
1065 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1066 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
1067 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1068 if (error) {
1069 device_printf(dev, "could not create DMA map for TX\n");
1070 return (error);
1071 }
1072 }
1073
1074 /*
1075 * Allocate map for RX descriptor list.
1076 */
1077 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1078 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1079 NULL, rx_list_size, 1, rx_list_size, 0,
1080 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1081 if (error) {
1082 device_printf(dev, "could not create RX DMA ring tag\n");
1083 return (error);
1084 }
1085
1086 /* Allocate DMA'able memory for the RX ring */
1087
1088 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1089 (void **)&sc->rl_ldata.rl_rx_list,
1090 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1091 &sc->rl_ldata.rl_rx_list_map);
1092 if (error) {
1093 device_printf(dev, "could not allocate RX DMA ring\n");
1094 return (error);
1095 }
1096
1097 /* Load the map for the RX ring. */
1098
1099 sc->rl_ldata.rl_rx_list_addr = 0;
1100 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1101 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1102 rx_list_size, re_dma_map_addr,
1103 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1104 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
1105 device_printf(dev, "could not load RX DMA ring\n");
1106 return (ENOMEM);
1107 }
1108
1109 /* Create DMA maps for RX buffers */
1110
1111 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1112 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1113 &sc->rl_ldata.rl_jrx_sparemap);
1114 if (error) {
1115 device_printf(dev,
1116 "could not create spare DMA map for jumbo RX\n");
1117 return (error);
1118 }
1119 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1120 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1121 &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1122 if (error) {
1123 device_printf(dev,
1124 "could not create DMA map for jumbo RX\n");
1125 return (error);
1126 }
1127 }
1128 }
1129 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1130 &sc->rl_ldata.rl_rx_sparemap);
1131 if (error) {
1132 device_printf(dev, "could not create spare DMA map for RX\n");
1133 return (error);
1134 }
1135 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1136 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1137 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1138 if (error) {
1139 device_printf(dev, "could not create DMA map for RX\n");
1140 return (error);
1141 }
1142 }
1143
1144 /* Create DMA map for statistics. */
1145 error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0,
1146 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1147 sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL,
1148 &sc->rl_ldata.rl_stag);
1149 if (error) {
1150 device_printf(dev, "could not create statistics DMA tag\n");
1151 return (error);
1152 }
1153 /* Allocate DMA'able memory for statistics. */
1154 error = bus_dmamem_alloc(sc->rl_ldata.rl_stag,
1155 (void **)&sc->rl_ldata.rl_stats,
1156 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1157 &sc->rl_ldata.rl_smap);
1158 if (error) {
1159 device_printf(dev,
1160 "could not allocate statistics DMA memory\n");
1161 return (error);
1162 }
1163 /* Load the map for statistics. */
1164 sc->rl_ldata.rl_stats_addr = 0;
1165 error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap,
1166 sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr,
1167 &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT);
1168 if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) {
1169 device_printf(dev, "could not load statistics DMA memory\n");
1170 return (ENOMEM);
1171 }
1172
1173 return (0);
1174 }
1175
1176 /*
1177 * Attach the interface. Allocate softc structures, do ifmedia
1178 * setup and ethernet/BPF attach.
1179 */
1180 static int
1181 re_attach(device_t dev)
1182 {
1183 u_char eaddr[ETHER_ADDR_LEN];
1184 u_int16_t as[ETHER_ADDR_LEN / 2];
1185 struct rl_softc *sc;
1186 struct ifnet *ifp;
1187 const struct rl_hwrev *hw_rev;
1188 u_int32_t cap, ctl;
1189 int hwrev;
1190 u_int16_t devid, re_did = 0;
1191 int error = 0, i, phy, rid;
1192 int msic, msixc, reg;
1193 uint8_t cfg;
1194
1195 sc = device_get_softc(dev);
1196 sc->rl_dev = dev;
1197
1198 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1199 MTX_DEF);
1200 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
1201
1202 /*
1203 * Map control/status registers.
1204 */
1205 pci_enable_busmaster(dev);
1206
1207 devid = pci_get_device(dev);
1208 /*
1209 * Prefer memory space register mapping over IO space.
1210 * Because RTL8169SC does not seem to work when memory mapping
1211 * is used always activate io mapping.
1212 */
1213 if (devid == RT_DEVICEID_8169SC)
1214 prefer_iomap = 1;
1215 if (prefer_iomap == 0) {
1216 sc->rl_res_id = PCIR_BAR(1);
1217 sc->rl_res_type = SYS_RES_MEMORY;
1218 /* RTL8168/8101E seems to use different BARs. */
1219 if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
1220 sc->rl_res_id = PCIR_BAR(2);
1221 } else {
1222 sc->rl_res_id = PCIR_BAR(0);
1223 sc->rl_res_type = SYS_RES_IOPORT;
1224 }
1225 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1226 &sc->rl_res_id, RF_ACTIVE);
1227 if (sc->rl_res == NULL && prefer_iomap == 0) {
1228 sc->rl_res_id = PCIR_BAR(0);
1229 sc->rl_res_type = SYS_RES_IOPORT;
1230 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1231 &sc->rl_res_id, RF_ACTIVE);
1232 }
1233 if (sc->rl_res == NULL) {
1234 device_printf(dev, "couldn't map ports/memory\n");
1235 error = ENXIO;
1236 goto fail;
1237 }
1238
1239 sc->rl_btag = rman_get_bustag(sc->rl_res);
1240 sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1241
1242 msic = pci_msi_count(dev);
1243 msixc = pci_msix_count(dev);
1244 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
1245 sc->rl_flags |= RL_FLAG_PCIE;
1246 sc->rl_expcap = reg;
1247 }
1248 if (bootverbose) {
1249 device_printf(dev, "MSI count : %d\n", msic);
1250 device_printf(dev, "MSI-X count : %d\n", msixc);
1251 }
1252 if (msix_disable > 0)
1253 msixc = 0;
1254 if (msi_disable > 0)
1255 msic = 0;
1256 /* Prefer MSI-X to MSI. */
1257 if (msixc > 0) {
1258 msixc = 1;
1259 rid = PCIR_BAR(4);
1260 sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1261 &rid, RF_ACTIVE);
1262 if (sc->rl_res_pba == NULL) {
1263 device_printf(sc->rl_dev,
1264 "could not allocate MSI-X PBA resource\n");
1265 }
1266 if (sc->rl_res_pba != NULL &&
1267 pci_alloc_msix(dev, &msixc) == 0) {
1268 if (msixc == 1) {
1269 device_printf(dev, "Using %d MSI-X message\n",
1270 msixc);
1271 sc->rl_flags |= RL_FLAG_MSIX;
1272 } else
1273 pci_release_msi(dev);
1274 }
1275 if ((sc->rl_flags & RL_FLAG_MSIX) == 0) {
1276 if (sc->rl_res_pba != NULL)
1277 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1278 sc->rl_res_pba);
1279 sc->rl_res_pba = NULL;
1280 msixc = 0;
1281 }
1282 }
1283 /* Prefer MSI to INTx. */
1284 if (msixc == 0 && msic > 0) {
1285 msic = 1;
1286 if (pci_alloc_msi(dev, &msic) == 0) {
1287 if (msic == RL_MSI_MESSAGES) {
1288 device_printf(dev, "Using %d MSI message\n",
1289 msic);
1290 sc->rl_flags |= RL_FLAG_MSI;
1291 /* Explicitly set MSI enable bit. */
1292 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1293 cfg = CSR_READ_1(sc, RL_CFG2);
1294 cfg |= RL_CFG2_MSI;
1295 CSR_WRITE_1(sc, RL_CFG2, cfg);
1296 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1297 } else
1298 pci_release_msi(dev);
1299 }
1300 if ((sc->rl_flags & RL_FLAG_MSI) == 0)
1301 msic = 0;
1302 }
1303
1304 /* Allocate interrupt */
1305 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) {
1306 rid = 0;
1307 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1308 RF_SHAREABLE | RF_ACTIVE);
1309 if (sc->rl_irq[0] == NULL) {
1310 device_printf(dev, "couldn't allocate IRQ resources\n");
1311 error = ENXIO;
1312 goto fail;
1313 }
1314 } else {
1315 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
1316 sc->rl_irq[i] = bus_alloc_resource_any(dev,
1317 SYS_RES_IRQ, &rid, RF_ACTIVE);
1318 if (sc->rl_irq[i] == NULL) {
1319 device_printf(dev,
1320 "couldn't llocate IRQ resources for "
1321 "message %d\n", rid);
1322 error = ENXIO;
1323 goto fail;
1324 }
1325 }
1326 }
1327
1328 if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
1329 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1330 cfg = CSR_READ_1(sc, RL_CFG2);
1331 if ((cfg & RL_CFG2_MSI) != 0) {
1332 device_printf(dev, "turning off MSI enable bit.\n");
1333 cfg &= ~RL_CFG2_MSI;
1334 CSR_WRITE_1(sc, RL_CFG2, cfg);
1335 }
1336 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1337 }
1338
1339 /* Disable ASPM L0S/L1. */
1340 if (sc->rl_expcap != 0) {
1341 cap = pci_read_config(dev, sc->rl_expcap +
1342 PCIER_LINK_CAP, 2);
1343 if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
1344 ctl = pci_read_config(dev, sc->rl_expcap +
1345 PCIER_LINK_CTL, 2);
1346 if ((ctl & 0x0003) != 0) {
1347 ctl &= ~0x0003;
1348 pci_write_config(dev, sc->rl_expcap +
1349 PCIER_LINK_CTL, ctl, 2);
1350 device_printf(dev, "ASPM disabled\n");
1351 }
1352 } else
1353 device_printf(dev, "no ASPM capability\n");
1354 }
1355
1356 hw_rev = re_hwrevs;
1357 hwrev = CSR_READ_4(sc, RL_TXCFG);
1358 switch (hwrev & 0x70000000) {
1359 case 0x00000000:
1360 case 0x10000000:
1361 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
1362 hwrev &= (RL_TXCFG_HWREV | 0x80000000);
1363 break;
1364 default:
1365 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
1366 hwrev &= RL_TXCFG_HWREV;
1367 break;
1368 }
1369 device_printf(dev, "MAC rev. 0x%08x\n", hwrev & 0x00700000);
1370 while (hw_rev->rl_desc != NULL) {
1371 if (hw_rev->rl_rev == hwrev) {
1372 sc->rl_type = hw_rev->rl_type;
1373 sc->rl_hwrev = hw_rev;
1374 break;
1375 }
1376 hw_rev++;
1377 }
1378 if (hw_rev->rl_desc == NULL) {
1379 device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
1380 error = ENXIO;
1381 goto fail;
1382 }
1383
1384 switch (hw_rev->rl_rev) {
1385 case RL_HWREV_8139CPLUS:
1386 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
1387 break;
1388 case RL_HWREV_8100E:
1389 case RL_HWREV_8101E:
1390 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
1391 break;
1392 case RL_HWREV_8102E:
1393 case RL_HWREV_8102EL:
1394 case RL_HWREV_8102EL_SPIN1:
1395 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1396 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1397 RL_FLAG_AUTOPAD;
1398 break;
1399 case RL_HWREV_8103E:
1400 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1401 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1402 RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
1403 break;
1404 case RL_HWREV_8401E:
1405 case RL_HWREV_8105E:
1406 case RL_HWREV_8105E_SPIN1:
1407 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1408 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1409 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
1410 break;
1411 case RL_HWREV_8402:
1412 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1413 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1414 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD |
1415 RL_FLAG_CMDSTOP_WAIT_TXQ;
1416 break;
1417 case RL_HWREV_8168B_SPIN1:
1418 case RL_HWREV_8168B_SPIN2:
1419 sc->rl_flags |= RL_FLAG_WOLRXENB;
1420 /* FALLTHROUGH */
1421 case RL_HWREV_8168B_SPIN3:
1422 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
1423 break;
1424 case RL_HWREV_8168C_SPIN2:
1425 sc->rl_flags |= RL_FLAG_MACSLEEP;
1426 /* FALLTHROUGH */
1427 case RL_HWREV_8168C:
1428 if ((hwrev & 0x00700000) == 0x00200000)
1429 sc->rl_flags |= RL_FLAG_MACSLEEP;
1430 /* FALLTHROUGH */
1431 case RL_HWREV_8168CP:
1432 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1433 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1434 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1435 break;
1436 case RL_HWREV_8168D:
1437 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1438 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1439 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1440 RL_FLAG_WOL_MANLINK;
1441 break;
1442 case RL_HWREV_8168DP:
1443 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1444 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD |
1445 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK;
1446 break;
1447 case RL_HWREV_8168E:
1448 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1449 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1450 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1451 RL_FLAG_WOL_MANLINK;
1452 break;
1453 case RL_HWREV_8168E_VL:
1454 case RL_HWREV_8168F:
1455 case RL_HWREV_8411:
1456 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1457 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1458 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1459 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK;
1460 break;
1461 case RL_HWREV_8169_8110SB:
1462 case RL_HWREV_8169_8110SBL:
1463 case RL_HWREV_8169_8110SC:
1464 case RL_HWREV_8169_8110SCE:
1465 sc->rl_flags |= RL_FLAG_PHYWAKE;
1466 /* FALLTHROUGH */
1467 case RL_HWREV_8169:
1468 case RL_HWREV_8169S:
1469 case RL_HWREV_8110S:
1470 sc->rl_flags |= RL_FLAG_MACRESET;
1471 break;
1472 default:
1473 break;
1474 }
1475
1476 if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) {
1477 sc->rl_cfg0 = RL_8139_CFG0;
1478 sc->rl_cfg1 = RL_8139_CFG1;
1479 sc->rl_cfg2 = 0;
1480 sc->rl_cfg3 = RL_8139_CFG3;
1481 sc->rl_cfg4 = RL_8139_CFG4;
1482 sc->rl_cfg5 = RL_8139_CFG5;
1483 } else {
1484 sc->rl_cfg0 = RL_CFG0;
1485 sc->rl_cfg1 = RL_CFG1;
1486 sc->rl_cfg2 = RL_CFG2;
1487 sc->rl_cfg3 = RL_CFG3;
1488 sc->rl_cfg4 = RL_CFG4;
1489 sc->rl_cfg5 = RL_CFG5;
1490 }
1491
1492 /* Reset the adapter. */
1493 RL_LOCK(sc);
1494 re_reset(sc);
1495 RL_UNLOCK(sc);
1496
1497 /* Enable PME. */
1498 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1499 cfg = CSR_READ_1(sc, sc->rl_cfg1);
1500 cfg |= RL_CFG1_PME;
1501 CSR_WRITE_1(sc, sc->rl_cfg1, cfg);
1502 cfg = CSR_READ_1(sc, sc->rl_cfg5);
1503 cfg &= RL_CFG5_PME_STS;
1504 CSR_WRITE_1(sc, sc->rl_cfg5, cfg);
1505 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1506
1507 if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
1508 /*
1509 * XXX Should have a better way to extract station
1510 * address from EEPROM.
1511 */
1512 for (i = 0; i < ETHER_ADDR_LEN; i++)
1513 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1514 } else {
1515 sc->rl_eewidth = RL_9356_ADDR_LEN;
1516 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
1517 if (re_did != 0x8129)
1518 sc->rl_eewidth = RL_9346_ADDR_LEN;
1519
1520 /*
1521 * Get station address from the EEPROM.
1522 */
1523 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
1524 for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
1525 as[i] = le16toh(as[i]);
1526 bcopy(as, eaddr, ETHER_ADDR_LEN);
1527 }
1528
1529 if (sc->rl_type == RL_8169) {
1530 /* Set RX length mask and number of descriptors. */
1531 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1532 sc->rl_txstart = RL_GTXSTART;
1533 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
1534 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
1535 } else {
1536 /* Set RX length mask and number of descriptors. */
1537 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1538 sc->rl_txstart = RL_TXSTART;
1539 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
1540 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
1541 }
1542
1543 error = re_allocmem(dev, sc);
1544 if (error)
1545 goto fail;
1546 re_add_sysctls(sc);
1547
1548 ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
1549 if (ifp == NULL) {
1550 device_printf(dev, "can not if_alloc()\n");
1551 error = ENOSPC;
1552 goto fail;
1553 }
1554
1555 /* Take controller out of deep sleep mode. */
1556 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
1557 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
1558 CSR_WRITE_1(sc, RL_GPIO,
1559 CSR_READ_1(sc, RL_GPIO) | 0x01);
1560 else
1561 CSR_WRITE_1(sc, RL_GPIO,
1562 CSR_READ_1(sc, RL_GPIO) & ~0x01);
1563 }
1564
1565 /* Take PHY out of power down mode. */
1566 if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) {
1567 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1568 if (hw_rev->rl_rev == RL_HWREV_8401E)
1569 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08);
1570 }
1571 if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
1572 re_gmii_writereg(dev, 1, 0x1f, 0);
1573 re_gmii_writereg(dev, 1, 0x0e, 0);
1574 }
1575
1576 ifp->if_softc = sc;
1577 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1578 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1579 ifp->if_ioctl = re_ioctl;
1580 ifp->if_start = re_start;
1581 /*
1582 * RTL8168/8111C generates wrong IP checksummed frame if the
1583 * packet has IP options so disable TX IP checksum offloading.
1584 */
1585 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C ||
1586 sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2)
1587 ifp->if_hwassist = CSUM_TCP | CSUM_UDP;
1588 else
1589 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
1590 ifp->if_hwassist |= CSUM_TSO;
1591 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
1592 ifp->if_capenable = ifp->if_capabilities;
1593 ifp->if_init = re_init;
1594 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN);
1595 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
1596 IFQ_SET_READY(&ifp->if_snd);
1597
1598 TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
1599
1600 #define RE_PHYAD_INTERNAL 0
1601
1602 /* Do MII setup. */
1603 phy = RE_PHYAD_INTERNAL;
1604 if (sc->rl_type == RL_8169)
1605 phy = 1;
1606 error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd,
1607 re_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE);
1608 if (error != 0) {
1609 device_printf(dev, "attaching PHYs failed\n");
1610 goto fail;
1611 }
1612
1613 /*
1614 * Call MI attach routine.
1615 */
1616 ether_ifattach(ifp, eaddr);
1617
1618 /* VLAN capability setup */
1619 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1620 if (ifp->if_capabilities & IFCAP_HWCSUM)
1621 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1622 /* Enable WOL if PM is supported. */
1623 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, ®) == 0)
1624 ifp->if_capabilities |= IFCAP_WOL;
1625 ifp->if_capenable = ifp->if_capabilities;
1626 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST);
1627 /*
1628 * Don't enable TSO by default. It is known to generate
1629 * corrupted TCP segments(bad TCP options) under certain
1630 * circumtances.
1631 */
1632 ifp->if_hwassist &= ~CSUM_TSO;
1633 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
1634 #ifdef DEVICE_POLLING
1635 ifp->if_capabilities |= IFCAP_POLLING;
1636 #endif
1637 /*
1638 * Tell the upper layer(s) we support long frames.
1639 * Must appear after the call to ether_ifattach() because
1640 * ether_ifattach() sets ifi_hdrlen to the default value.
1641 */
1642 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1643
1644 #ifdef RE_DIAG
1645 /*
1646 * Perform hardware diagnostic on the original RTL8169.
1647 * Some 32-bit cards were incorrectly wired and would
1648 * malfunction if plugged into a 64-bit slot.
1649 */
1650
1651 if (hwrev == RL_HWREV_8169) {
1652 error = re_diag(sc);
1653 if (error) {
1654 device_printf(dev,
1655 "attach aborted due to hardware diag failure\n");
1656 ether_ifdetach(ifp);
1657 goto fail;
1658 }
1659 }
1660 #endif
1661
1662 #ifdef RE_TX_MODERATION
1663 intr_filter = 1;
1664 #endif
1665 /* Hook interrupt last to avoid having to lock softc */
1666 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
1667 intr_filter == 0) {
1668 error = bus_setup_intr(dev, sc->rl_irq[0],
1669 INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc,
1670 &sc->rl_intrhand[0]);
1671 } else {
1672 error = bus_setup_intr(dev, sc->rl_irq[0],
1673 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
1674 &sc->rl_intrhand[0]);
1675 }
1676 if (error) {
1677 device_printf(dev, "couldn't set up irq\n");
1678 ether_ifdetach(ifp);
1679 }
1680
1681 fail:
1682
1683 if (error)
1684 re_detach(dev);
1685
1686 return (error);
1687 }
1688
1689 /*
1690 * Shutdown hardware and free up resources. This can be called any
1691 * time after the mutex has been initialized. It is called in both
1692 * the error case in attach and the normal detach case so it needs
1693 * to be careful about only freeing resources that have actually been
1694 * allocated.
1695 */
1696 static int
1697 re_detach(device_t dev)
1698 {
1699 struct rl_softc *sc;
1700 struct ifnet *ifp;
1701 int i, rid;
1702
1703 sc = device_get_softc(dev);
1704 ifp = sc->rl_ifp;
1705 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
1706
1707 /* These should only be active if attach succeeded */
1708 if (device_is_attached(dev)) {
1709 #ifdef DEVICE_POLLING
1710 if (ifp->if_capenable & IFCAP_POLLING)
1711 ether_poll_deregister(ifp);
1712 #endif
1713 RL_LOCK(sc);
1714 #if 0
1715 sc->suspended = 1;
1716 #endif
1717 re_stop(sc);
1718 RL_UNLOCK(sc);
1719 callout_drain(&sc->rl_stat_callout);
1720 taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
1721 /*
1722 * Force off the IFF_UP flag here, in case someone
1723 * still had a BPF descriptor attached to this
1724 * interface. If they do, ether_ifdetach() will cause
1725 * the BPF code to try and clear the promisc mode
1726 * flag, which will bubble down to re_ioctl(),
1727 * which will try to call re_init() again. This will
1728 * turn the NIC back on and restart the MII ticker,
1729 * which will panic the system when the kernel tries
1730 * to invoke the re_tick() function that isn't there
1731 * anymore.
1732 */
1733 ifp->if_flags &= ~IFF_UP;
1734 ether_ifdetach(ifp);
1735 }
1736 if (sc->rl_miibus)
1737 device_delete_child(dev, sc->rl_miibus);
1738 bus_generic_detach(dev);
1739
1740 /*
1741 * The rest is resource deallocation, so we should already be
1742 * stopped here.
1743 */
1744
1745 if (sc->rl_intrhand[0] != NULL) {
1746 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1747 sc->rl_intrhand[0] = NULL;
1748 }
1749 if (ifp != NULL)
1750 if_free(ifp);
1751 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
1752 rid = 0;
1753 else
1754 rid = 1;
1755 if (sc->rl_irq[0] != NULL) {
1756 bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]);
1757 sc->rl_irq[0] = NULL;
1758 }
1759 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0)
1760 pci_release_msi(dev);
1761 if (sc->rl_res_pba) {
1762 rid = PCIR_BAR(4);
1763 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba);
1764 }
1765 if (sc->rl_res)
1766 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1767 sc->rl_res);
1768
1769 /* Unload and free the RX DMA ring memory and map */
1770
1771 if (sc->rl_ldata.rl_rx_list_tag) {
1772 if (sc->rl_ldata.rl_rx_list_map)
1773 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1774 sc->rl_ldata.rl_rx_list_map);
1775 if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list)
1776 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1777 sc->rl_ldata.rl_rx_list,
1778 sc->rl_ldata.rl_rx_list_map);
1779 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1780 }
1781
1782 /* Unload and free the TX DMA ring memory and map */
1783
1784 if (sc->rl_ldata.rl_tx_list_tag) {
1785 if (sc->rl_ldata.rl_tx_list_map)
1786 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1787 sc->rl_ldata.rl_tx_list_map);
1788 if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list)
1789 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1790 sc->rl_ldata.rl_tx_list,
1791 sc->rl_ldata.rl_tx_list_map);
1792 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1793 }
1794
1795 /* Destroy all the RX and TX buffer maps */
1796
1797 if (sc->rl_ldata.rl_tx_mtag) {
1798 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1799 if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap)
1800 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
1801 sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1802 }
1803 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
1804 }
1805 if (sc->rl_ldata.rl_rx_mtag) {
1806 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1807 if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap)
1808 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1809 sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1810 }
1811 if (sc->rl_ldata.rl_rx_sparemap)
1812 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1813 sc->rl_ldata.rl_rx_sparemap);
1814 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
1815 }
1816 if (sc->rl_ldata.rl_jrx_mtag) {
1817 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1818 if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap)
1819 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1820 sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1821 }
1822 if (sc->rl_ldata.rl_jrx_sparemap)
1823 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1824 sc->rl_ldata.rl_jrx_sparemap);
1825 bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag);
1826 }
1827 /* Unload and free the stats buffer and map */
1828
1829 if (sc->rl_ldata.rl_stag) {
1830 if (sc->rl_ldata.rl_smap)
1831 bus_dmamap_unload(sc->rl_ldata.rl_stag,
1832 sc->rl_ldata.rl_smap);
1833 if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats)
1834 bus_dmamem_free(sc->rl_ldata.rl_stag,
1835 sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap);
1836 bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1837 }
1838
1839 if (sc->rl_parent_tag)
1840 bus_dma_tag_destroy(sc->rl_parent_tag);
1841
1842 mtx_destroy(&sc->rl_mtx);
1843
1844 return (0);
1845 }
1846
1847 static __inline void
1848 re_discard_rxbuf(struct rl_softc *sc, int idx)
1849 {
1850 struct rl_desc *desc;
1851 struct rl_rxdesc *rxd;
1852 uint32_t cmdstat;
1853
1854 if (sc->rl_ifp->if_mtu > RL_MTU &&
1855 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1856 rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1857 else
1858 rxd = &sc->rl_ldata.rl_rx_desc[idx];
1859 desc = &sc->rl_ldata.rl_rx_list[idx];
1860 desc->rl_vlanctl = 0;
1861 cmdstat = rxd->rx_size;
1862 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1863 cmdstat |= RL_RDESC_CMD_EOR;
1864 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1865 }
1866
1867 static int
1868 re_newbuf(struct rl_softc *sc, int idx)
1869 {
1870 struct mbuf *m;
1871 struct rl_rxdesc *rxd;
1872 bus_dma_segment_t segs[1];
1873 bus_dmamap_t map;
1874 struct rl_desc *desc;
1875 uint32_t cmdstat;
1876 int error, nsegs;
1877
1878 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1879 if (m == NULL)
1880 return (ENOBUFS);
1881
1882 m->m_len = m->m_pkthdr.len = MCLBYTES;
1883 #ifdef RE_FIXUP_RX
1884 /*
1885 * This is part of an evil trick to deal with non-x86 platforms.
1886 * The RealTek chip requires RX buffers to be aligned on 64-bit
1887 * boundaries, but that will hose non-x86 machines. To get around
1888 * this, we leave some empty space at the start of each buffer
1889 * and for non-x86 hosts, we copy the buffer back six bytes
1890 * to achieve word alignment. This is slightly more efficient
1891 * than allocating a new buffer, copying the contents, and
1892 * discarding the old buffer.
1893 */
1894 m_adj(m, RE_ETHER_ALIGN);
1895 #endif
1896 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
1897 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1898 if (error != 0) {
1899 m_freem(m);
1900 return (ENOBUFS);
1901 }
1902 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1903
1904 rxd = &sc->rl_ldata.rl_rx_desc[idx];
1905 if (rxd->rx_m != NULL) {
1906 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1907 BUS_DMASYNC_POSTREAD);
1908 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
1909 }
1910
1911 rxd->rx_m = m;
1912 map = rxd->rx_dmamap;
1913 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
1914 rxd->rx_size = segs[0].ds_len;
1915 sc->rl_ldata.rl_rx_sparemap = map;
1916 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1917 BUS_DMASYNC_PREREAD);
1918
1919 desc = &sc->rl_ldata.rl_rx_list[idx];
1920 desc->rl_vlanctl = 0;
1921 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1922 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1923 cmdstat = segs[0].ds_len;
1924 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1925 cmdstat |= RL_RDESC_CMD_EOR;
1926 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1927
1928 return (0);
1929 }
1930
1931 static int
1932 re_jumbo_newbuf(struct rl_softc *sc, int idx)
1933 {
1934 struct mbuf *m;
1935 struct rl_rxdesc *rxd;
1936 bus_dma_segment_t segs[1];
1937 bus_dmamap_t map;
1938 struct rl_desc *desc;
1939 uint32_t cmdstat;
1940 int error, nsegs;
1941
1942 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1943 if (m == NULL)
1944 return (ENOBUFS);
1945 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1946 #ifdef RE_FIXUP_RX
1947 m_adj(m, RE_ETHER_ALIGN);
1948 #endif
1949 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag,
1950 sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1951 if (error != 0) {
1952 m_freem(m);
1953 return (ENOBUFS);
1954 }
1955 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1956
1957 rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1958 if (rxd->rx_m != NULL) {
1959 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
1960 BUS_DMASYNC_POSTREAD);
1961 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap);
1962 }
1963
1964 rxd->rx_m = m;
1965 map = rxd->rx_dmamap;
1966 rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap;
1967 rxd->rx_size = segs[0].ds_len;
1968 sc->rl_ldata.rl_jrx_sparemap = map;
1969 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
1970 BUS_DMASYNC_PREREAD);
1971
1972 desc = &sc->rl_ldata.rl_rx_list[idx];
1973 desc->rl_vlanctl = 0;
1974 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1975 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1976 cmdstat = segs[0].ds_len;
1977 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1978 cmdstat |= RL_RDESC_CMD_EOR;
1979 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1980
1981 return (0);
1982 }
1983
1984 #ifdef RE_FIXUP_RX
1985 static __inline void
1986 re_fixup_rx(struct mbuf *m)
1987 {
1988 int i;
1989 uint16_t *src, *dst;
1990
1991 src = mtod(m, uint16_t *);
1992 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
1993
1994 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1995 *dst++ = *src++;
1996
1997 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
1998 }
1999 #endif
2000
2001 static int
2002 re_tx_list_init(struct rl_softc *sc)
2003 {
2004 struct rl_desc *desc;
2005 int i;
2006
2007 RL_LOCK_ASSERT(sc);
2008
2009 bzero(sc->rl_ldata.rl_tx_list,
2010 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
2011 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
2012 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
2013 /* Set EOR. */
2014 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
2015 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
2016
2017 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2018 sc->rl_ldata.rl_tx_list_map,
2019 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2020
2021 sc->rl_ldata.rl_tx_prodidx = 0;
2022 sc->rl_ldata.rl_tx_considx = 0;
2023 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
2024
2025 return (0);
2026 }
2027
2028 static int
2029 re_rx_list_init(struct rl_softc *sc)
2030 {
2031 int error, i;
2032
2033 bzero(sc->rl_ldata.rl_rx_list,
2034 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2035 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2036 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
2037 if ((error = re_newbuf(sc, i)) != 0)
2038 return (error);
2039 }
2040
2041 /* Flush the RX descriptors */
2042
2043 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2044 sc->rl_ldata.rl_rx_list_map,
2045 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2046
2047 sc->rl_ldata.rl_rx_prodidx = 0;
2048 sc->rl_head = sc->rl_tail = NULL;
2049 sc->rl_int_rx_act = 0;
2050
2051 return (0);
2052 }
2053
2054 static int
2055 re_jrx_list_init(struct rl_softc *sc)
2056 {
2057 int error, i;
2058
2059 bzero(sc->rl_ldata.rl_rx_list,
2060 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2061 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2062 sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL;
2063 if ((error = re_jumbo_newbuf(sc, i)) != 0)
2064 return (error);
2065 }
2066
2067 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2068 sc->rl_ldata.rl_rx_list_map,
2069 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2070
2071 sc->rl_ldata.rl_rx_prodidx = 0;
2072 sc->rl_head = sc->rl_tail = NULL;
2073 sc->rl_int_rx_act = 0;
2074
2075 return (0);
2076 }
2077
2078 /*
2079 * RX handler for C+ and 8169. For the gigE chips, we support
2080 * the reception of jumbo frames that have been fragmented
2081 * across multiple 2K mbuf cluster buffers.
2082 */
2083 static int
2084 re_rxeof(struct rl_softc *sc, int *rx_npktsp)
2085 {
2086 struct mbuf *m;
2087 struct ifnet *ifp;
2088 int i, rxerr, total_len;
2089 struct rl_desc *cur_rx;
2090 u_int32_t rxstat, rxvlan;
2091 int jumbo, maxpkt = 16, rx_npkts = 0;
2092
2093 RL_LOCK_ASSERT(sc);
2094
2095 ifp = sc->rl_ifp;
2096 if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
2097 jumbo = 1;
2098 else
2099 jumbo = 0;
2100
2101 /* Invalidate the descriptor memory */
2102
2103 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2104 sc->rl_ldata.rl_rx_list_map,
2105 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2106
2107 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
2108 i = RL_RX_DESC_NXT(sc, i)) {
2109 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2110 break;
2111 cur_rx = &sc->rl_ldata.rl_rx_list[i];
2112 rxstat = le32toh(cur_rx->rl_cmdstat);
2113 if ((rxstat & RL_RDESC_STAT_OWN) != 0)
2114 break;
2115 total_len = rxstat & sc->rl_rxlenmask;
2116 rxvlan = le32toh(cur_rx->rl_vlanctl);
2117 if (jumbo != 0)
2118 m = sc->rl_ldata.rl_jrx_desc[i].rx_m;
2119 else
2120 m = sc->rl_ldata.rl_rx_desc[i].rx_m;
2121
2122 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
2123 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
2124 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
2125 /*
2126 * RTL8168C or later controllers do not
2127 * support multi-fragment packet.
2128 */
2129 re_discard_rxbuf(sc, i);
2130 continue;
2131 } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) {
2132 if (re_newbuf(sc, i) != 0) {
2133 /*
2134 * If this is part of a multi-fragment packet,
2135 * discard all the pieces.
2136 */
2137 if (sc->rl_head != NULL) {
2138 m_freem(sc->rl_head);
2139 sc->rl_head = sc->rl_tail = NULL;
2140 }
2141 re_discard_rxbuf(sc, i);
2142 continue;
2143 }
2144 m->m_len = RE_RX_DESC_BUFLEN;
2145 if (sc->rl_head == NULL)
2146 sc->rl_head = sc->rl_tail = m;
2147 else {
2148 m->m_flags &= ~M_PKTHDR;
2149 sc->rl_tail->m_next = m;
2150 sc->rl_tail = m;
2151 }
2152 continue;
2153 }
2154
2155 /*
2156 * NOTE: for the 8139C+, the frame length field
2157 * is always 12 bits in size, but for the gigE chips,
2158 * it is 13 bits (since the max RX frame length is 16K).
2159 * Unfortunately, all 32 bits in the status word
2160 * were already used, so to make room for the extra
2161 * length bit, RealTek took out the 'frame alignment
2162 * error' bit and shifted the other status bits
2163 * over one slot. The OWN, EOR, FS and LS bits are
2164 * still in the same places. We have already extracted
2165 * the frame length and checked the OWN bit, so rather
2166 * than using an alternate bit mapping, we shift the
2167 * status bits one space to the right so we can evaluate
2168 * them using the 8169 status as though it was in the
2169 * same format as that of the 8139C+.
2170 */
2171 if (sc->rl_type == RL_8169)
2172 rxstat >>= 1;
2173
2174 /*
2175 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
2176 * set, but if CRC is clear, it will still be a valid frame.
2177 */
2178 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) {
2179 rxerr = 1;
2180 if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 &&
2181 total_len > 8191 &&
2182 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)
2183 rxerr = 0;
2184 if (rxerr != 0) {
2185 ifp->if_ierrors++;
2186 /*
2187 * If this is part of a multi-fragment packet,
2188 * discard all the pieces.
2189 */
2190 if (sc->rl_head != NULL) {
2191 m_freem(sc->rl_head);
2192 sc->rl_head = sc->rl_tail = NULL;
2193 }
2194 re_discard_rxbuf(sc, i);
2195 continue;
2196 }
2197 }
2198
2199 /*
2200 * If allocating a replacement mbuf fails,
2201 * reload the current one.
2202 */
2203 if (jumbo != 0)
2204 rxerr = re_jumbo_newbuf(sc, i);
2205 else
2206 rxerr = re_newbuf(sc, i);
2207 if (rxerr != 0) {
2208 ifp->if_iqdrops++;
2209 if (sc->rl_head != NULL) {
2210 m_freem(sc->rl_head);
2211 sc->rl_head = sc->rl_tail = NULL;
2212 }
2213 re_discard_rxbuf(sc, i);
2214 continue;
2215 }
2216
2217 if (sc->rl_head != NULL) {
2218 if (jumbo != 0)
2219 m->m_len = total_len;
2220 else {
2221 m->m_len = total_len % RE_RX_DESC_BUFLEN;
2222 if (m->m_len == 0)
2223 m->m_len = RE_RX_DESC_BUFLEN;
2224 }
2225 /*
2226 * Special case: if there's 4 bytes or less
2227 * in this buffer, the mbuf can be discarded:
2228 * the last 4 bytes is the CRC, which we don't
2229 * care about anyway.
2230 */
2231 if (m->m_len <= ETHER_CRC_LEN) {
2232 sc->rl_tail->m_len -=
2233 (ETHER_CRC_LEN - m->m_len);
2234 m_freem(m);
2235 } else {
2236 m->m_len -= ETHER_CRC_LEN;
2237 m->m_flags &= ~M_PKTHDR;
2238 sc->rl_tail->m_next = m;
2239 }
2240 m = sc->rl_head;
2241 sc->rl_head = sc->rl_tail = NULL;
2242 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
2243 } else
2244 m->m_pkthdr.len = m->m_len =
2245 (total_len - ETHER_CRC_LEN);
2246
2247 #ifdef RE_FIXUP_RX
2248 re_fixup_rx(m);
2249 #endif
2250 ifp->if_ipackets++;
2251 m->m_pkthdr.rcvif = ifp;
2252
2253 /* Do RX checksumming if enabled */
2254
2255 if (ifp->if_capenable & IFCAP_RXCSUM) {
2256 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2257 /* Check IP header checksum */
2258 if (rxstat & RL_RDESC_STAT_PROTOID)
2259 m->m_pkthdr.csum_flags |=
2260 CSUM_IP_CHECKED;
2261 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
2262 m->m_pkthdr.csum_flags |=
2263 CSUM_IP_VALID;
2264
2265 /* Check TCP/UDP checksum */
2266 if ((RL_TCPPKT(rxstat) &&
2267 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2268 (RL_UDPPKT(rxstat) &&
2269 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2270 m->m_pkthdr.csum_flags |=
2271 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2272 m->m_pkthdr.csum_data = 0xffff;
2273 }
2274 } else {
2275 /*
2276 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
2277 */
2278 if ((rxstat & RL_RDESC_STAT_PROTOID) &&
2279 (rxvlan & RL_RDESC_IPV4))
2280 m->m_pkthdr.csum_flags |=
2281 CSUM_IP_CHECKED;
2282 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
2283 (rxvlan & RL_RDESC_IPV4))
2284 m->m_pkthdr.csum_flags |=
2285 CSUM_IP_VALID;
2286 if (((rxstat & RL_RDESC_STAT_TCP) &&
2287 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2288 ((rxstat & RL_RDESC_STAT_UDP) &&
2289 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2290 m->m_pkthdr.csum_flags |=
2291 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2292 m->m_pkthdr.csum_data = 0xffff;
2293 }
2294 }
2295 }
2296 maxpkt--;
2297 if (rxvlan & RL_RDESC_VLANCTL_TAG) {
2298 m->m_pkthdr.ether_vtag =
2299 bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
2300 m->m_flags |= M_VLANTAG;
2301 }
2302 RL_UNLOCK(sc);
2303 (*ifp->if_input)(ifp, m);
2304 RL_LOCK(sc);
2305 rx_npkts++;
2306 }
2307
2308 /* Flush the RX DMA ring */
2309
2310 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2311 sc->rl_ldata.rl_rx_list_map,
2312 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2313
2314 sc->rl_ldata.rl_rx_prodidx = i;
2315
2316 if (rx_npktsp != NULL)
2317 *rx_npktsp = rx_npkts;
2318 if (maxpkt)
2319 return (EAGAIN);
2320
2321 return (0);
2322 }
2323
2324 static void
2325 re_txeof(struct rl_softc *sc)
2326 {
2327 struct ifnet *ifp;
2328 struct rl_txdesc *txd;
2329 u_int32_t txstat;
2330 int cons;
2331
2332 cons = sc->rl_ldata.rl_tx_considx;
2333 if (cons == sc->rl_ldata.rl_tx_prodidx)
2334 return;
2335
2336 ifp = sc->rl_ifp;
2337 /* Invalidate the TX descriptor list */
2338 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2339 sc->rl_ldata.rl_tx_list_map,
2340 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2341
2342 for (; cons != sc->rl_ldata.rl_tx_prodidx;
2343 cons = RL_TX_DESC_NXT(sc, cons)) {
2344 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
2345 if (txstat & RL_TDESC_STAT_OWN)
2346 break;
2347 /*
2348 * We only stash mbufs in the last descriptor
2349 * in a fragment chain, which also happens to
2350 * be the only place where the TX status bits
2351 * are valid.
2352 */
2353 if (txstat & RL_TDESC_CMD_EOF) {
2354 txd = &sc->rl_ldata.rl_tx_desc[cons];
2355 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
2356 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2357 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
2358 txd->tx_dmamap);
2359 KASSERT(txd->tx_m != NULL,
2360 ("%s: freeing NULL mbufs!", __func__));
2361 m_freem(txd->tx_m);
2362 txd->tx_m = NULL;
2363 if (txstat & (RL_TDESC_STAT_EXCESSCOL|
2364 RL_TDESC_STAT_COLCNT))
2365 ifp->if_collisions++;
2366 if (txstat & RL_TDESC_STAT_TXERRSUM)
2367 ifp->if_oerrors++;
2368 else
2369 ifp->if_opackets++;
2370 }
2371 sc->rl_ldata.rl_tx_free++;
2372 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2373 }
2374 sc->rl_ldata.rl_tx_considx = cons;
2375
2376 /* No changes made to the TX ring, so no flush needed */
2377
2378 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
2379 #ifdef RE_TX_MODERATION
2380 /*
2381 * If not all descriptors have been reaped yet, reload
2382 * the timer so that we will eventually get another
2383 * interrupt that will cause us to re-enter this routine.
2384 * This is done in case the transmitter has gone idle.
2385 */
2386 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2387 #endif
2388 } else
2389 sc->rl_watchdog_timer = 0;
2390 }
2391
2392 static void
2393 re_tick(void *xsc)
2394 {
2395 struct rl_softc *sc;
2396 struct mii_data *mii;
2397
2398 sc = xsc;
2399
2400 RL_LOCK_ASSERT(sc);
2401
2402 mii = device_get_softc(sc->rl_miibus);
2403 mii_tick(mii);
2404 if ((sc->rl_flags & RL_FLAG_LINK) == 0)
2405 re_miibus_statchg(sc->rl_dev);
2406 /*
2407 * Reclaim transmitted frames here. Technically it is not
2408 * necessary to do here but it ensures periodic reclamation
2409 * regardless of Tx completion interrupt which seems to be
2410 * lost on PCIe based controllers under certain situations.
2411 */
2412 re_txeof(sc);
2413 re_watchdog(sc);
2414 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
2415 }
2416
2417 #ifdef DEVICE_POLLING
2418 static int
2419 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2420 {
2421 struct rl_softc *sc = ifp->if_softc;
2422 int rx_npkts = 0;
2423
2424 RL_LOCK(sc);
2425 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2426 rx_npkts = re_poll_locked(ifp, cmd, count);
2427 RL_UNLOCK(sc);
2428 return (rx_npkts);
2429 }
2430
2431 static int
2432 re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2433 {
2434 struct rl_softc *sc = ifp->if_softc;
2435 int rx_npkts;
2436
2437 RL_LOCK_ASSERT(sc);
2438
2439 sc->rxcycles = count;
2440 re_rxeof(sc, &rx_npkts);
2441 re_txeof(sc);
2442
2443 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2444 re_start_locked(ifp);
2445
2446 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2447 u_int16_t status;
2448
2449 status = CSR_READ_2(sc, RL_ISR);
2450 if (status == 0xffff)
2451 return (rx_npkts);
2452 if (status)
2453 CSR_WRITE_2(sc, RL_ISR, status);
2454 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2455 (sc->rl_flags & RL_FLAG_PCIE))
2456 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2457
2458 /*
2459 * XXX check behaviour on receiver stalls.
2460 */
2461
2462 if (status & RL_ISR_SYSTEM_ERR) {
2463 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2464 re_init_locked(sc);
2465 }
2466 }
2467 return (rx_npkts);
2468 }
2469 #endif /* DEVICE_POLLING */
2470
2471 static int
2472 re_intr(void *arg)
2473 {
2474 struct rl_softc *sc;
2475 uint16_t status;
2476
2477 sc = arg;
2478
2479 status = CSR_READ_2(sc, RL_ISR);
2480 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
2481 return (FILTER_STRAY);
2482 CSR_WRITE_2(sc, RL_IMR, 0);
2483
2484 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2485
2486 return (FILTER_HANDLED);
2487 }
2488
2489 static void
2490 re_int_task(void *arg, int npending)
2491 {
2492 struct rl_softc *sc;
2493 struct ifnet *ifp;
2494 u_int16_t status;
2495 int rval = 0;
2496
2497 sc = arg;
2498 ifp = sc->rl_ifp;
2499
2500 RL_LOCK(sc);
2501
2502 status = CSR_READ_2(sc, RL_ISR);
2503 CSR_WRITE_2(sc, RL_ISR, status);
2504
2505 if (sc->suspended ||
2506 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2507 RL_UNLOCK(sc);
2508 return;
2509 }
2510
2511 #ifdef DEVICE_POLLING
2512 if (ifp->if_capenable & IFCAP_POLLING) {
2513 RL_UNLOCK(sc);
2514 return;
2515 }
2516 #endif
2517
2518 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
2519 rval = re_rxeof(sc, NULL);
2520
2521 /*
2522 * Some chips will ignore a second TX request issued
2523 * while an existing transmission is in progress. If
2524 * the transmitter goes idle but there are still
2525 * packets waiting to be sent, we need to restart the
2526 * channel here to flush them out. This only seems to
2527 * be required with the PCIe devices.
2528 */
2529 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2530 (sc->rl_flags & RL_FLAG_PCIE))
2531 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2532 if (status & (
2533 #ifdef RE_TX_MODERATION
2534 RL_ISR_TIMEOUT_EXPIRED|
2535 #else
2536 RL_ISR_TX_OK|
2537 #endif
2538 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
2539 re_txeof(sc);
2540
2541 if (status & RL_ISR_SYSTEM_ERR) {
2542 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2543 re_init_locked(sc);
2544 }
2545
2546 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2547 re_start_locked(ifp);
2548
2549 RL_UNLOCK(sc);
2550
2551 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
2552 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2553 return;
2554 }
2555
2556 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2557 }
2558
2559 static void
2560 re_intr_msi(void *xsc)
2561 {
2562 struct rl_softc *sc;
2563 struct ifnet *ifp;
2564 uint16_t intrs, status;
2565
2566 sc = xsc;
2567 RL_LOCK(sc);
2568
2569 ifp = sc->rl_ifp;
2570 #ifdef DEVICE_POLLING
2571 if (ifp->if_capenable & IFCAP_POLLING) {
2572 RL_UNLOCK(sc);
2573 return;
2574 }
2575 #endif
2576 /* Disable interrupts. */
2577 CSR_WRITE_2(sc, RL_IMR, 0);
2578 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2579 RL_UNLOCK(sc);
2580 return;
2581 }
2582
2583 intrs = RL_INTRS_CPLUS;
2584 status = CSR_READ_2(sc, RL_ISR);
2585 CSR_WRITE_2(sc, RL_ISR, status);
2586 if (sc->rl_int_rx_act > 0) {
2587 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2588 RL_ISR_RX_OVERRUN);
2589 status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2590 RL_ISR_RX_OVERRUN);
2591 }
2592
2593 if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
2594 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
2595 re_rxeof(sc, NULL);
2596 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2597 if (sc->rl_int_rx_mod != 0 &&
2598 (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
2599 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) {
2600 /* Rearm one-shot timer. */
2601 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2602 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR |
2603 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN);
2604 sc->rl_int_rx_act = 1;
2605 } else {
2606 intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR |
2607 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN;
2608 sc->rl_int_rx_act = 0;
2609 }
2610 }
2611 }
2612
2613 /*
2614 * Some chips will ignore a second TX request issued
2615 * while an existing transmission is in progress. If
2616 * the transmitter goes idle but there are still
2617 * packets waiting to be sent, we need to restart the
2618 * channel here to flush them out. This only seems to
2619 * be required with the PCIe devices.
2620 */
2621 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2622 (sc->rl_flags & RL_FLAG_PCIE))
2623 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2624 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL))
2625 re_txeof(sc);
2626
2627 if (status & RL_ISR_SYSTEM_ERR) {
2628 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2629 re_init_locked(sc);
2630 }
2631
2632 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2633 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2634 re_start_locked(ifp);
2635 CSR_WRITE_2(sc, RL_IMR, intrs);
2636 }
2637 RL_UNLOCK(sc);
2638 }
2639
2640 static int
2641 re_encap(struct rl_softc *sc, struct mbuf **m_head)
2642 {
2643 struct rl_txdesc *txd, *txd_last;
2644 bus_dma_segment_t segs[RL_NTXSEGS];
2645 bus_dmamap_t map;
2646 struct mbuf *m_new;
2647 struct rl_desc *desc;
2648 int nsegs, prod;
2649 int i, error, ei, si;
2650 int padlen;
2651 uint32_t cmdstat, csum_flags, vlanctl;
2652
2653 RL_LOCK_ASSERT(sc);
2654 M_ASSERTPKTHDR((*m_head));
2655
2656 /*
2657 * With some of the RealTek chips, using the checksum offload
2658 * support in conjunction with the autopadding feature results
2659 * in the transmission of corrupt frames. For example, if we
2660 * need to send a really small IP fragment that's less than 60
2661 * bytes in size, and IP header checksumming is enabled, the
2662 * resulting ethernet frame that appears on the wire will
2663 * have garbled payload. To work around this, if TX IP checksum
2664 * offload is enabled, we always manually pad short frames out
2665 * to the minimum ethernet frame size.
2666 */
2667 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
2668 (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
2669 ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2670 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
2671 if (M_WRITABLE(*m_head) == 0) {
2672 /* Get a writable copy. */
2673 m_new = m_dup(*m_head, M_DONTWAIT);
2674 m_freem(*m_head);
2675 if (m_new == NULL) {
2676 *m_head = NULL;
2677 return (ENOBUFS);
2678 }
2679 *m_head = m_new;
2680 }
2681 if ((*m_head)->m_next != NULL ||
2682 M_TRAILINGSPACE(*m_head) < padlen) {
2683 m_new = m_defrag(*m_head, M_DONTWAIT);
2684 if (m_new == NULL) {
2685 m_freem(*m_head);
2686 *m_head = NULL;
2687 return (ENOBUFS);
2688 }
2689 } else
2690 m_new = *m_head;
2691
2692 /*
2693 * Manually pad short frames, and zero the pad space
2694 * to avoid leaking data.
2695 */
2696 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
2697 m_new->m_pkthdr.len += padlen;
2698 m_new->m_len = m_new->m_pkthdr.len;
2699 *m_head = m_new;
2700 }
2701
2702 prod = sc->rl_ldata.rl_tx_prodidx;
2703 txd = &sc->rl_ldata.rl_tx_desc[prod];
2704 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2705 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2706 if (error == EFBIG) {
2707 m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS);
2708 if (m_new == NULL) {
2709 m_freem(*m_head);
2710 *m_head = NULL;
2711 return (ENOBUFS);
2712 }
2713 *m_head = m_new;
2714 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
2715 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2716 if (error != 0) {
2717 m_freem(*m_head);
2718 *m_head = NULL;
2719 return (error);
2720 }
2721 } else if (error != 0)
2722 return (error);
2723 if (nsegs == 0) {
2724 m_freem(*m_head);
2725 *m_head = NULL;
2726 return (EIO);
2727 }
2728
2729 /* Check for number of available descriptors. */
2730 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
2731 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
2732 return (ENOBUFS);
2733 }
2734
2735 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2736 BUS_DMASYNC_PREWRITE);
2737
2738 /*
2739 * Set up checksum offload. Note: checksum offload bits must
2740 * appear in all descriptors of a multi-descriptor transmit
2741 * attempt. This is according to testing done with an 8169
2742 * chip. This is a requirement.
2743 */
2744 vlanctl = 0;
2745 csum_flags = 0;
2746 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2747 if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) {
2748 csum_flags |= RL_TDESC_CMD_LGSEND;
2749 vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2750 RL_TDESC_CMD_MSSVALV2_SHIFT);
2751 } else {
2752 csum_flags |= RL_TDESC_CMD_LGSEND |
2753 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2754 RL_TDESC_CMD_MSSVAL_SHIFT);
2755 }
2756 } else {
2757 /*
2758 * Unconditionally enable IP checksum if TCP or UDP
2759 * checksum is required. Otherwise, TCP/UDP checksum
2760 * does't make effects.
2761 */
2762 if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
2763 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2764 csum_flags |= RL_TDESC_CMD_IPCSUM;
2765 if (((*m_head)->m_pkthdr.csum_flags &
2766 CSUM_TCP) != 0)
2767 csum_flags |= RL_TDESC_CMD_TCPCSUM;
2768 if (((*m_head)->m_pkthdr.csum_flags &
2769 CSUM_UDP) != 0)
2770 csum_flags |= RL_TDESC_CMD_UDPCSUM;
2771 } else {
2772 vlanctl |= RL_TDESC_CMD_IPCSUMV2;
2773 if (((*m_head)->m_pkthdr.csum_flags &
2774 CSUM_TCP) != 0)
2775 vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
2776 if (((*m_head)->m_pkthdr.csum_flags &
2777 CSUM_UDP) != 0)
2778 vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
2779 }
2780 }
2781 }
2782
2783 /*
2784 * Set up hardware VLAN tagging. Note: vlan tag info must
2785 * appear in all descriptors of a multi-descriptor
2786 * transmission attempt.
2787 */
2788 if ((*m_head)->m_flags & M_VLANTAG)
2789 vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
2790 RL_TDESC_VLANCTL_TAG;
2791
2792 si = prod;
2793 for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
2794 desc = &sc->rl_ldata.rl_tx_list[prod];
2795 desc->rl_vlanctl = htole32(vlanctl);
2796 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
2797 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
2798 cmdstat = segs[i].ds_len;
2799 if (i != 0)
2800 cmdstat |= RL_TDESC_CMD_OWN;
2801 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
2802 cmdstat |= RL_TDESC_CMD_EOR;
2803 desc->rl_cmdstat = htole32(cmdstat | csum_flags);
2804 sc->rl_ldata.rl_tx_free--;
2805 }
2806 /* Update producer index. */
2807 sc->rl_ldata.rl_tx_prodidx = prod;
2808
2809 /* Set EOF on the last descriptor. */
2810 ei = RL_TX_DESC_PRV(sc, prod);
2811 desc = &sc->rl_ldata.rl_tx_list[ei];
2812 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
2813
2814 desc = &sc->rl_ldata.rl_tx_list[si];
2815 /* Set SOF and transfer ownership of packet to the chip. */
2816 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
2817
2818 /*
2819 * Insure that the map for this transmission
2820 * is placed at the array index of the last descriptor
2821 * in this chain. (Swap last and first dmamaps.)
2822 */
2823 txd_last = &sc->rl_ldata.rl_tx_desc[ei];
2824 map = txd->tx_dmamap;
2825 txd->tx_dmamap = txd_last->tx_dmamap;
2826 txd_last->tx_dmamap = map;
2827 txd_last->tx_m = *m_head;
2828
2829 return (0);
2830 }
2831
2832 static void
2833 re_start(struct ifnet *ifp)
2834 {
2835 struct rl_softc *sc;
2836
2837 sc = ifp->if_softc;
2838 RL_LOCK(sc);
2839 re_start_locked(ifp);
2840 RL_UNLOCK(sc);
2841 }
2842
2843 /*
2844 * Main transmit routine for C+ and gigE NICs.
2845 */
2846 static void
2847 re_start_locked(struct ifnet *ifp)
2848 {
2849 struct rl_softc *sc;
2850 struct mbuf *m_head;
2851 int queued;
2852
2853 sc = ifp->if_softc;
2854
2855 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2856 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
2857 return;
2858
2859 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2860 sc->rl_ldata.rl_tx_free > 1;) {
2861 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2862 if (m_head == NULL)
2863 break;
2864
2865 if (re_encap(sc, &m_head) != 0) {
2866 if (m_head == NULL)
2867 break;
2868 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2869 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2870 break;
2871 }
2872
2873 /*
2874 * If there's a BPF listener, bounce a copy of this frame
2875 * to him.
2876 */
2877 ETHER_BPF_MTAP(ifp, m_head);
2878
2879 queued++;
2880 }
2881
2882 if (queued == 0) {
2883 #ifdef RE_TX_MODERATION
2884 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
2885 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2886 #endif
2887 return;
2888 }
2889
2890 /* Flush the TX descriptors */
2891
2892 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2893 sc->rl_ldata.rl_tx_list_map,
2894 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2895
2896 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2897
2898 #ifdef RE_TX_MODERATION
2899 /*
2900 * Use the countdown timer for interrupt moderation.
2901 * 'TX done' interrupts are disabled. Instead, we reset the
2902 * countdown timer, which will begin counting until it hits
2903 * the value in the TIMERINT register, and then trigger an
2904 * interrupt. Each time we write to the TIMERCNT register,
2905 * the timer count is reset to 0.
2906 */
2907 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2908 #endif
2909
2910 /*
2911 * Set a timeout in case the chip goes out to lunch.
2912 */
2913 sc->rl_watchdog_timer = 5;
2914 }
2915
2916 static void
2917 re_set_jumbo(struct rl_softc *sc, int jumbo)
2918 {
2919
2920 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) {
2921 pci_set_max_read_req(sc->rl_dev, 4096);
2922 return;
2923 }
2924
2925 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2926 if (jumbo != 0) {
2927 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) |
2928 RL_CFG3_JUMBO_EN0);
2929 switch (sc->rl_hwrev->rl_rev) {
2930 case RL_HWREV_8168DP:
2931 break;
2932 case RL_HWREV_8168E:
2933 CSR_WRITE_1(sc, sc->rl_cfg4,
2934 CSR_READ_1(sc, sc->rl_cfg4) | 0x01);
2935 break;
2936 default:
2937 CSR_WRITE_1(sc, sc->rl_cfg4,
2938 CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1);
2939 }
2940 } else {
2941 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) &
2942 ~RL_CFG3_JUMBO_EN0);
2943 switch (sc->rl_hwrev->rl_rev) {
2944 case RL_HWREV_8168DP:
2945 break;
2946 case RL_HWREV_8168E:
2947 CSR_WRITE_1(sc, sc->rl_cfg4,
2948 CSR_READ_1(sc, sc->rl_cfg4) & ~0x01);
2949 break;
2950 default:
2951 CSR_WRITE_1(sc, sc->rl_cfg4,
2952 CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1);
2953 }
2954 }
2955 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2956
2957 switch (sc->rl_hwrev->rl_rev) {
2958 case RL_HWREV_8168DP:
2959 pci_set_max_read_req(sc->rl_dev, 4096);
2960 break;
2961 default:
2962 if (jumbo != 0)
2963 pci_set_max_read_req(sc->rl_dev, 512);
2964 else
2965 pci_set_max_read_req(sc->rl_dev, 4096);
2966 }
2967 }
2968
2969 static void
2970 re_init(void *xsc)
2971 {
2972 struct rl_softc *sc = xsc;
2973
2974 RL_LOCK(sc);
2975 re_init_locked(sc);
2976 RL_UNLOCK(sc);
2977 }
2978
2979 static void
2980 re_init_locked(struct rl_softc *sc)
2981 {
2982 struct ifnet *ifp = sc->rl_ifp;
2983 struct mii_data *mii;
2984 uint32_t reg;
2985 uint16_t cfg;
2986 union {
2987 uint32_t align_dummy;
2988 u_char eaddr[ETHER_ADDR_LEN];
2989 } eaddr;
2990
2991 RL_LOCK_ASSERT(sc);
2992
2993 mii = device_get_softc(sc->rl_miibus);
2994
2995 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2996 return;
2997
2998 /*
2999 * Cancel pending I/O and free all RX/TX buffers.
3000 */
3001 re_stop(sc);
3002
3003 /* Put controller into known state. */
3004 re_reset(sc);
3005
3006 /*
3007 * For C+ mode, initialize the RX descriptors and mbufs.
3008 */
3009 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3010 if (ifp->if_mtu > RL_MTU) {
3011 if (re_jrx_list_init(sc) != 0) {
3012 device_printf(sc->rl_dev,
3013 "no memory for jumbo RX buffers\n");
3014 re_stop(sc);
3015 return;
3016 }
3017 /* Disable checksum offloading for jumbo frames. */
3018 ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4);
3019 ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO);
3020 } else {
3021 if (re_rx_list_init(sc) != 0) {
3022 device_printf(sc->rl_dev,
3023 "no memory for RX buffers\n");
3024 re_stop(sc);
3025 return;
3026 }
3027 }
3028 re_set_jumbo(sc, ifp->if_mtu > RL_MTU);
3029 } else {
3030 if (re_rx_list_init(sc) != 0) {
3031 device_printf(sc->rl_dev, "no memory for RX buffers\n");
3032 re_stop(sc);
3033 return;
3034 }
3035 if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3036 pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) {
3037 if (ifp->if_mtu > RL_MTU)
3038 pci_set_max_read_req(sc->rl_dev, 512);
3039 else
3040 pci_set_max_read_req(sc->rl_dev, 4096);
3041 }
3042 }
3043 re_tx_list_init(sc);
3044
3045 /*
3046 * Enable C+ RX and TX mode, as well as VLAN stripping and
3047 * RX checksum offload. We must configure the C+ register
3048 * before all others.
3049 */
3050 cfg = RL_CPLUSCMD_PCI_MRW;
3051 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3052 cfg |= RL_CPLUSCMD_RXCSUM_ENB;
3053 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3054 cfg |= RL_CPLUSCMD_VLANSTRIP;
3055 if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
3056 cfg |= RL_CPLUSCMD_MACSTAT_DIS;
3057 /* XXX magic. */
3058 cfg |= 0x0001;
3059 } else
3060 cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
3061 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
3062 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC ||
3063 sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) {
3064 reg = 0x000fff00;
3065 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0)
3066 reg |= 0x000000ff;
3067 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE)
3068 reg |= 0x00f00000;
3069 CSR_WRITE_4(sc, 0x7c, reg);
3070 /* Disable interrupt mitigation. */
3071 CSR_WRITE_2(sc, 0xe2, 0);
3072 }
3073 /*
3074 * Disable TSO if interface MTU size is greater than MSS
3075 * allowed in controller.
3076 */
3077 if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) {
3078 ifp->if_capenable &= ~IFCAP_TSO4;
3079 ifp->if_hwassist &= ~CSUM_TSO;
3080 }
3081
3082 /*
3083 * Init our MAC address. Even though the chipset
3084 * documentation doesn't mention it, we need to enter "Config
3085 * register write enable" mode to modify the ID registers.
3086 */
3087 /* Copy MAC address on stack to align. */
3088 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN);
3089 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3090 CSR_WRITE_4(sc, RL_IDR0,
3091 htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
3092 CSR_WRITE_4(sc, RL_IDR4,
3093 htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
3094 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3095
3096 /*
3097 * Load the addresses of the RX and TX lists into the chip.
3098 */
3099
3100 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
3101 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
3102 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
3103 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
3104
3105 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
3106 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
3107 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
3108 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
3109
3110 /*
3111 * Enable transmit and receive.
3112 */
3113 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
3114
3115 /*
3116 * Set the initial TX configuration.
3117 */
3118 if (sc->rl_testmode) {
3119 if (sc->rl_type == RL_8169)
3120 CSR_WRITE_4(sc, RL_TXCFG,
3121 RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
3122 else
3123 CSR_WRITE_4(sc, RL_TXCFG,
3124 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
3125 } else
3126 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
3127
3128 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
3129
3130 /*
3131 * Set the initial RX configuration.
3132 */
3133 re_set_rxmode(sc);
3134
3135 /* Configure interrupt moderation. */
3136 if (sc->rl_type == RL_8169) {
3137 /* Magic from vendor. */
3138 CSR_WRITE_2(sc, RL_INTRMOD, 0x5100);
3139 }
3140
3141 #ifdef DEVICE_POLLING
3142 /*
3143 * Disable interrupts if we are polling.
3144 */
3145 if (ifp->if_capenable & IFCAP_POLLING)
3146 CSR_WRITE_2(sc, RL_IMR, 0);
3147 else /* otherwise ... */
3148 #endif
3149
3150 /*
3151 * Enable interrupts.
3152 */
3153 if (sc->rl_testmode)
3154 CSR_WRITE_2(sc, RL_IMR, 0);
3155 else
3156 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3157 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
3158
3159 /* Set initial TX threshold */
3160 sc->rl_txthresh = RL_TX_THRESH_INIT;
3161
3162 /* Start RX/TX process. */
3163 CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
3164 #ifdef notdef
3165 /* Enable receiver and transmitter. */
3166 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
3167 #endif
3168
3169 /*
3170 * Initialize the timer interrupt register so that
3171 * a timer interrupt will be generated once the timer
3172 * reaches a certain number of ticks. The timer is
3173 * reloaded on each transmit.
3174 */
3175 #ifdef RE_TX_MODERATION
3176 /*
3177 * Use timer interrupt register to moderate TX interrupt
3178 * moderation, which dramatically improves TX frame rate.
3179 */
3180 if (sc->rl_type == RL_8169)
3181 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
3182 else
3183 CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
3184 #else
3185 /*
3186 * Use timer interrupt register to moderate RX interrupt
3187 * moderation.
3188 */
3189 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
3190 intr_filter == 0) {
3191 if (sc->rl_type == RL_8169)
3192 CSR_WRITE_4(sc, RL_TIMERINT_8169,
3193 RL_USECS(sc->rl_int_rx_mod));
3194 } else {
3195 if (sc->rl_type == RL_8169)
3196 CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0));
3197 }
3198 #endif
3199
3200 /*
3201 * For 8169 gigE NICs, set the max allowed RX packet
3202 * size so we can receive jumbo frames.
3203 */
3204 if (sc->rl_type == RL_8169) {
3205 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3206 /*
3207 * For controllers that use new jumbo frame scheme,
3208 * set maximum size of jumbo frame depedning on
3209 * controller revisions.
3210 */
3211 if (ifp->if_mtu > RL_MTU)
3212 CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3213 sc->rl_hwrev->rl_max_mtu +
3214 ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN +
3215 ETHER_CRC_LEN);
3216 else
3217 CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3218 RE_RX_DESC_BUFLEN);
3219 } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3220 sc->rl_hwrev->rl_max_mtu == RL_MTU) {
3221 /* RTL810x has no jumbo frame support. */
3222 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
3223 } else
3224 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
3225 }
3226
3227 if (sc->rl_testmode)
3228 return;
3229
3230 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |
3231 RL_CFG1_DRVLOAD);
3232
3233 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3234 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3235
3236 sc->rl_flags &= ~RL_FLAG_LINK;
3237 mii_mediachg(mii);
3238
3239 sc->rl_watchdog_timer = 0;
3240 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
3241 }
3242
3243 /*
3244 * Set media options.
3245 */
3246 static int
3247 re_ifmedia_upd(struct ifnet *ifp)
3248 {
3249 struct rl_softc *sc;
3250 struct mii_data *mii;
3251 int error;
3252
3253 sc = ifp->if_softc;
3254 mii = device_get_softc(sc->rl_miibus);
3255 RL_LOCK(sc);
3256 error = mii_mediachg(mii);
3257 RL_UNLOCK(sc);
3258
3259 return (error);
3260 }
3261
3262 /*
3263 * Report current media status.
3264 */
3265 static void
3266 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3267 {
3268 struct rl_softc *sc;
3269 struct mii_data *mii;
3270
3271 sc = ifp->if_softc;
3272 mii = device_get_softc(sc->rl_miibus);
3273
3274 RL_LOCK(sc);
3275 mii_pollstat(mii);
3276 ifmr->ifm_active = mii->mii_media_active;
3277 ifmr->ifm_status = mii->mii_media_status;
3278 RL_UNLOCK(sc);
3279 }
3280
3281 static int
3282 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3283 {
3284 struct rl_softc *sc = ifp->if_softc;
3285 struct ifreq *ifr = (struct ifreq *) data;
3286 struct mii_data *mii;
3287 uint32_t rev;
3288 int error = 0;
3289
3290 switch (command) {
3291 case SIOCSIFMTU:
3292 if (ifr->ifr_mtu < ETHERMIN ||
3293 ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu) {
3294 error = EINVAL;
3295 break;
3296 }
3297 RL_LOCK(sc);
3298 if (ifp->if_mtu != ifr->ifr_mtu) {
3299 ifp->if_mtu = ifr->ifr_mtu;
3300 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3301 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3302 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3303 re_init_locked(sc);
3304 }
3305 if (ifp->if_mtu > RL_TSO_MTU &&
3306 (ifp->if_capenable & IFCAP_TSO4) != 0) {
3307 ifp->if_capenable &= ~(IFCAP_TSO4 |
3308 IFCAP_VLAN_HWTSO);
3309 ifp->if_hwassist &= ~CSUM_TSO;
3310 }
3311 VLAN_CAPABILITIES(ifp);
3312 }
3313 RL_UNLOCK(sc);
3314 break;
3315 case SIOCSIFFLAGS:
3316 RL_LOCK(sc);
3317 if ((ifp->if_flags & IFF_UP) != 0) {
3318 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3319 if (((ifp->if_flags ^ sc->rl_if_flags)
3320 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3321 re_set_rxmode(sc);
3322 } else
3323 re_init_locked(sc);
3324 } else {
3325 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3326 re_stop(sc);
3327 }
3328 sc->rl_if_flags = ifp->if_flags;
3329 RL_UNLOCK(sc);
3330 break;
3331 case SIOCADDMULTI:
3332 case SIOCDELMULTI:
3333 RL_LOCK(sc);
3334 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3335 re_set_rxmode(sc);
3336 RL_UNLOCK(sc);
3337 break;
3338 case SIOCGIFMEDIA:
3339 case SIOCSIFMEDIA:
3340 mii = device_get_softc(sc->rl_miibus);
3341 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3342 break;
3343 case SIOCSIFCAP:
3344 {
3345 int mask, reinit;
3346
3347 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3348 reinit = 0;
3349 #ifdef DEVICE_POLLING
3350 if (mask & IFCAP_POLLING) {
3351 if (ifr->ifr_reqcap & IFCAP_POLLING) {
3352 error = ether_poll_register(re_poll, ifp);
3353 if (error)
3354 return (error);
3355 RL_LOCK(sc);
3356 /* Disable interrupts */
3357 CSR_WRITE_2(sc, RL_IMR, 0x0000);
3358 ifp->if_capenable |= IFCAP_POLLING;
3359 RL_UNLOCK(sc);
3360 } else {
3361 error = ether_poll_deregister(ifp);
3362 /* Enable interrupts. */
3363 RL_LOCK(sc);
3364 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3365 ifp->if_capenable &= ~IFCAP_POLLING;
3366 RL_UNLOCK(sc);
3367 }
3368 }
3369 #endif /* DEVICE_POLLING */
3370 RL_LOCK(sc);
3371 if ((mask & IFCAP_TXCSUM) != 0 &&
3372 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
3373 ifp->if_capenable ^= IFCAP_TXCSUM;
3374 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
3375 rev = sc->rl_hwrev->rl_rev;
3376 if (rev == RL_HWREV_8168C ||
3377 rev == RL_HWREV_8168C_SPIN2)
3378 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
3379 else
3380 ifp->if_hwassist |= RE_CSUM_FEATURES;
3381 } else
3382 ifp->if_hwassist &= ~RE_CSUM_FEATURES;
3383 reinit = 1;
3384 }
3385 if ((mask & IFCAP_RXCSUM) != 0 &&
3386 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
3387 ifp->if_capenable ^= IFCAP_RXCSUM;
3388 reinit = 1;
3389 }
3390 if ((mask & IFCAP_TSO4) != 0 &&
3391 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
3392 ifp->if_capenable ^= IFCAP_TSO4;
3393 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
3394 ifp->if_hwassist |= CSUM_TSO;
3395 else
3396 ifp->if_hwassist &= ~CSUM_TSO;
3397 if (ifp->if_mtu > RL_TSO_MTU &&
3398 (ifp->if_capenable & IFCAP_TSO4) != 0) {
3399 ifp->if_capenable &= ~IFCAP_TSO4;
3400 ifp->if_hwassist &= ~CSUM_TSO;
3401 }
3402 }
3403 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
3404 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
3405 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3406 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3407 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
3408 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3409 /* TSO over VLAN requires VLAN hardware tagging. */
3410 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
3411 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
3412 reinit = 1;
3413 }
3414 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3415 (mask & (IFCAP_HWCSUM | IFCAP_TSO4 |
3416 IFCAP_VLAN_HWTSO)) != 0)
3417 reinit = 1;
3418 if ((mask & IFCAP_WOL) != 0 &&
3419 (ifp->if_capabilities & IFCAP_WOL) != 0) {
3420 if ((mask & IFCAP_WOL_UCAST) != 0)
3421 ifp->if_capenable ^= IFCAP_WOL_UCAST;
3422 if ((mask & IFCAP_WOL_MCAST) != 0)
3423 ifp->if_capenable ^= IFCAP_WOL_MCAST;
3424 if ((mask & IFCAP_WOL_MAGIC) != 0)
3425 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3426 }
3427 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3428 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3429 re_init_locked(sc);
3430 }
3431 RL_UNLOCK(sc);
3432 VLAN_CAPABILITIES(ifp);
3433 }
3434 break;
3435 default:
3436 error = ether_ioctl(ifp, command, data);
3437 break;
3438 }
3439
3440 return (error);
3441 }
3442
3443 static void
3444 re_watchdog(struct rl_softc *sc)
3445 {
3446 struct ifnet *ifp;
3447
3448 RL_LOCK_ASSERT(sc);
3449
3450 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
3451 return;
3452
3453 ifp = sc->rl_ifp;
3454 re_txeof(sc);
3455 if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
3456 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
3457 "-- recovering\n");
3458 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3459 re_start_locked(ifp);
3460 return;
3461 }
3462
3463 if_printf(ifp, "watchdog timeout\n");
3464 ifp->if_oerrors++;
3465
3466 re_rxeof(sc, NULL);
3467 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3468 re_init_locked(sc);
3469 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3470 re_start_locked(ifp);
3471 }
3472
3473 /*
3474 * Stop the adapter and free any mbufs allocated to the
3475 * RX and TX lists.
3476 */
3477 static void
3478 re_stop(struct rl_softc *sc)
3479 {
3480 int i;
3481 struct ifnet *ifp;
3482 struct rl_txdesc *txd;
3483 struct rl_rxdesc *rxd;
3484
3485 RL_LOCK_ASSERT(sc);
3486
3487 ifp = sc->rl_ifp;
3488
3489 sc->rl_watchdog_timer = 0;
3490 callout_stop(&sc->rl_stat_callout);
3491 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3492
3493 /*
3494 * Disable accepting frames to put RX MAC into idle state.
3495 * Otherwise it's possible to get frames while stop command
3496 * execution is in progress and controller can DMA the frame
3497 * to already freed RX buffer during that period.
3498 */
3499 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &
3500 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI |
3501 RL_RXCFG_RX_BROAD));
3502
3503 if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) {
3504 for (i = RL_TIMEOUT; i > 0; i--) {
3505 if ((CSR_READ_1(sc, sc->rl_txstart) &
3506 RL_TXSTART_START) == 0)
3507 break;
3508 DELAY(20);
3509 }
3510 if (i == 0)
3511 device_printf(sc->rl_dev,
3512 "stopping TX poll timed out!\n");
3513 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3514 } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) {
3515 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
3516 RL_CMD_RX_ENB);
3517 if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) {
3518 for (i = RL_TIMEOUT; i > 0; i--) {
3519 if ((CSR_READ_4(sc, RL_TXCFG) &
3520 RL_TXCFG_QUEUE_EMPTY) != 0)
3521 break;
3522 DELAY(100);
3523 }
3524 if (i == 0)
3525 device_printf(sc->rl_dev,
3526 "stopping TXQ timed out!\n");
3527 }
3528 } else
3529 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3530 DELAY(1000);
3531 CSR_WRITE_2(sc, RL_IMR, 0x0000);
3532 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
3533
3534 if (sc->rl_head != NULL) {
3535 m_freem(sc->rl_head);
3536 sc->rl_head = sc->rl_tail = NULL;
3537 }
3538
3539 /* Free the TX list buffers. */
3540 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
3541 txd = &sc->rl_ldata.rl_tx_desc[i];
3542 if (txd->tx_m != NULL) {
3543 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3544 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3545 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
3546 txd->tx_dmamap);
3547 m_freem(txd->tx_m);
3548 txd->tx_m = NULL;
3549 }
3550 }
3551
3552 /* Free the RX list buffers. */
3553 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3554 rxd = &sc->rl_ldata.rl_rx_desc[i];
3555 if (rxd->rx_m != NULL) {
3556 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
3557 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3558 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
3559 rxd->rx_dmamap);
3560 m_freem(rxd->rx_m);
3561 rxd->rx_m = NULL;
3562 }
3563 }
3564
3565 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3566 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3567 rxd = &sc->rl_ldata.rl_jrx_desc[i];
3568 if (rxd->rx_m != NULL) {
3569 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag,
3570 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3571 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag,
3572 rxd->rx_dmamap);
3573 m_freem(rxd->rx_m);
3574 rxd->rx_m = NULL;
3575 }
3576 }
3577 }
3578 }
3579
3580 /*
3581 * Device suspend routine. Stop the interface and save some PCI
3582 * settings in case the BIOS doesn't restore them properly on
3583 * resume.
3584 */
3585 static int
3586 re_suspend(device_t dev)
3587 {
3588 struct rl_softc *sc;
3589
3590 sc = device_get_softc(dev);
3591
3592 RL_LOCK(sc);
3593 re_stop(sc);
3594 re_setwol(sc);
3595 sc->suspended = 1;
3596 RL_UNLOCK(sc);
3597
3598 return (0);
3599 }
3600
3601 /*
3602 * Device resume routine. Restore some PCI settings in case the BIOS
3603 * doesn't, re-enable busmastering, and restart the interface if
3604 * appropriate.
3605 */
3606 static int
3607 re_resume(device_t dev)
3608 {
3609 struct rl_softc *sc;
3610 struct ifnet *ifp;
3611
3612 sc = device_get_softc(dev);
3613
3614 RL_LOCK(sc);
3615
3616 ifp = sc->rl_ifp;
3617 /* Take controller out of sleep mode. */
3618 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3619 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3620 CSR_WRITE_1(sc, RL_GPIO,
3621 CSR_READ_1(sc, RL_GPIO) | 0x01);
3622 }
3623
3624 /*
3625 * Clear WOL matching such that normal Rx filtering
3626 * wouldn't interfere with WOL patterns.
3627 */
3628 re_clrwol(sc);
3629
3630 /* reinitialize interface if necessary */
3631 if (ifp->if_flags & IFF_UP)
3632 re_init_locked(sc);
3633
3634 sc->suspended = 0;
3635 RL_UNLOCK(sc);
3636
3637 return (0);
3638 }
3639
3640 /*
3641 * Stop all chip I/O so that the kernel's probe routines don't
3642 * get confused by errant DMAs when rebooting.
3643 */
3644 static int
3645 re_shutdown(device_t dev)
3646 {
3647 struct rl_softc *sc;
3648
3649 sc = device_get_softc(dev);
3650
3651 RL_LOCK(sc);
3652 re_stop(sc);
3653 /*
3654 * Mark interface as down since otherwise we will panic if
3655 * interrupt comes in later on, which can happen in some
3656 * cases.
3657 */
3658 sc->rl_ifp->if_flags &= ~IFF_UP;
3659 re_setwol(sc);
3660 RL_UNLOCK(sc);
3661
3662 return (0);
3663 }
3664
3665 static void
3666 re_set_linkspeed(struct rl_softc *sc)
3667 {
3668 struct mii_softc *miisc;
3669 struct mii_data *mii;
3670 int aneg, i, phyno;
3671
3672 RL_LOCK_ASSERT(sc);
3673
3674 mii = device_get_softc(sc->rl_miibus);
3675 mii_pollstat(mii);
3676 aneg = 0;
3677 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3678 (IFM_ACTIVE | IFM_AVALID)) {
3679 switch IFM_SUBTYPE(mii->mii_media_active) {
3680 case IFM_10_T:
3681 case IFM_100_TX:
3682 return;
3683 case IFM_1000_T:
3684 aneg++;
3685 break;
3686 default:
3687 break;
3688 }
3689 }
3690 miisc = LIST_FIRST(&mii->mii_phys);
3691 phyno = miisc->mii_phy;
3692 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3693 mii_phy_reset(miisc);
3694 re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0);
3695 re_miibus_writereg(sc->rl_dev, phyno,
3696 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3697 re_miibus_writereg(sc->rl_dev, phyno,
3698 MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
3699 DELAY(1000);
3700 if (aneg != 0) {
3701 /*
3702 * Poll link state until re(4) get a 10/100Mbps link.
3703 */
3704 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3705 mii_pollstat(mii);
3706 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3707 == (IFM_ACTIVE | IFM_AVALID)) {
3708 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3709 case IFM_10_T:
3710 case IFM_100_TX:
3711 return;
3712 default:
3713 break;
3714 }
3715 }
3716 RL_UNLOCK(sc);
3717 pause("relnk", hz);
3718 RL_LOCK(sc);
3719 }
3720 if (i == MII_ANEGTICKS_GIGE)
3721 device_printf(sc->rl_dev,
3722 "establishing a link failed, WOL may not work!");
3723 }
3724 /*
3725 * No link, force MAC to have 100Mbps, full-duplex link.
3726 * MAC does not require reprogramming on resolved speed/duplex,
3727 * so this is just for completeness.
3728 */
3729 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3730 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3731 }
3732
3733 static void
3734 re_setwol(struct rl_softc *sc)
3735 {
3736 struct ifnet *ifp;
3737 int pmc;
3738 uint16_t pmstat;
3739 uint8_t v;
3740
3741 RL_LOCK_ASSERT(sc);
3742
3743 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3744 return;
3745
3746 ifp = sc->rl_ifp;
3747 /* Put controller into sleep mode. */
3748 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3749 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3750 CSR_WRITE_1(sc, RL_GPIO,
3751 CSR_READ_1(sc, RL_GPIO) & ~0x01);
3752 }
3753 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
3754 re_set_rxmode(sc);
3755 if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0)
3756 re_set_linkspeed(sc);
3757 if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
3758 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
3759 }
3760 /* Enable config register write. */
3761 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3762
3763 /* Enable PME. */
3764 v = CSR_READ_1(sc, sc->rl_cfg1);
3765 v &= ~RL_CFG1_PME;
3766 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3767 v |= RL_CFG1_PME;
3768 CSR_WRITE_1(sc, sc->rl_cfg1, v);
3769
3770 v = CSR_READ_1(sc, sc->rl_cfg3);
3771 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3772 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3773 v |= RL_CFG3_WOL_MAGIC;
3774 CSR_WRITE_1(sc, sc->rl_cfg3, v);
3775
3776 v = CSR_READ_1(sc, sc->rl_cfg5);
3777 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST |
3778 RL_CFG5_WOL_LANWAKE);
3779 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
3780 v |= RL_CFG5_WOL_UCAST;
3781 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
3782 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
3783 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3784 v |= RL_CFG5_WOL_LANWAKE;
3785 CSR_WRITE_1(sc, sc->rl_cfg5, v);
3786
3787 /* Config register write done. */
3788 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3789
3790 if ((ifp->if_capenable & IFCAP_WOL) == 0 &&
3791 (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
3792 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80);
3793 /*
3794 * It seems that hardware resets its link speed to 100Mbps in
3795 * power down mode so switching to 100Mbps in driver is not
3796 * needed.
3797 */
3798
3799 /* Request PME if WOL is requested. */
3800 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
3801 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3802 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3803 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3804 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3805 }
3806
3807 static void
3808 re_clrwol(struct rl_softc *sc)
3809 {
3810 int pmc;
3811 uint8_t v;
3812
3813 RL_LOCK_ASSERT(sc);
3814
3815 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3816 return;
3817
3818 /* Enable config register write. */
3819 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3820
3821 v = CSR_READ_1(sc, sc->rl_cfg3);
3822 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3823 CSR_WRITE_1(sc, sc->rl_cfg3, v);
3824
3825 /* Config register write done. */
3826 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3827
3828 v = CSR_READ_1(sc, sc->rl_cfg5);
3829 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3830 v &= ~RL_CFG5_WOL_LANWAKE;
3831 CSR_WRITE_1(sc, sc->rl_cfg5, v);
3832 }
3833
3834 static void
3835 re_add_sysctls(struct rl_softc *sc)
3836 {
3837 struct sysctl_ctx_list *ctx;
3838 struct sysctl_oid_list *children;
3839 int error;
3840
3841 ctx = device_get_sysctl_ctx(sc->rl_dev);
3842 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
3843
3844 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats",
3845 CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I",
3846 "Statistics Information");
3847 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
3848 return;
3849
3850 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod",
3851 CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0,
3852 sysctl_hw_re_int_mod, "I", "re RX interrupt moderation");
3853 /* Pull in device tunables. */
3854 sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3855 error = resource_int_value(device_get_name(sc->rl_dev),
3856 device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod);
3857 if (error == 0) {
3858 if (sc->rl_int_rx_mod < RL_TIMER_MIN ||
3859 sc->rl_int_rx_mod > RL_TIMER_MAX) {
3860 device_printf(sc->rl_dev, "int_rx_mod value out of "
3861 "range; using default: %d\n",
3862 RL_TIMER_DEFAULT);
3863 sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3864 }
3865 }
3866
3867 }
3868
3869 static int
3870 re_sysctl_stats(SYSCTL_HANDLER_ARGS)
3871 {
3872 struct rl_softc *sc;
3873 struct rl_stats *stats;
3874 int error, i, result;
3875
3876 result = -1;
3877 error = sysctl_handle_int(oidp, &result, 0, req);
3878 if (error || req->newptr == NULL)
3879 return (error);
3880
3881 if (result == 1) {
3882 sc = (struct rl_softc *)arg1;
3883 RL_LOCK(sc);
3884 if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3885 RL_UNLOCK(sc);
3886 goto done;
3887 }
3888 bus_dmamap_sync(sc->rl_ldata.rl_stag,
3889 sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD);
3890 CSR_WRITE_4(sc, RL_DUMPSTATS_HI,
3891 RL_ADDR_HI(sc->rl_ldata.rl_stats_addr));
3892 CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3893 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr));
3894 CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3895 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr |
3896 RL_DUMPSTATS_START));
3897 for (i = RL_TIMEOUT; i > 0; i--) {
3898 if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) &
3899 RL_DUMPSTATS_START) == 0)
3900 break;
3901 DELAY(1000);
3902 }
3903 bus_dmamap_sync(sc->rl_ldata.rl_stag,
3904 sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD);
3905 RL_UNLOCK(sc);
3906 if (i == 0) {
3907 device_printf(sc->rl_dev,
3908 "DUMP statistics request timedout\n");
3909 return (ETIMEDOUT);
3910 }
3911 done:
3912 stats = sc->rl_ldata.rl_stats;
3913 printf("%s statistics:\n", device_get_nameunit(sc->rl_dev));
3914 printf("Tx frames : %ju\n",
3915 (uintmax_t)le64toh(stats->rl_tx_pkts));
3916 printf("Rx frames : %ju\n",
3917 (uintmax_t)le64toh(stats->rl_rx_pkts));
3918 printf("Tx errors : %ju\n",
3919 (uintmax_t)le64toh(stats->rl_tx_errs));
3920 printf("Rx errors : %u\n",
3921 le32toh(stats->rl_rx_errs));
3922 printf("Rx missed frames : %u\n",
3923 (uint32_t)le16toh(stats->rl_missed_pkts));
3924 printf("Rx frame alignment errs : %u\n",
3925 (uint32_t)le16toh(stats->rl_rx_framealign_errs));
3926 printf("Tx single collisions : %u\n",
3927 le32toh(stats->rl_tx_onecoll));
3928 printf("Tx multiple collisions : %u\n",
3929 le32toh(stats->rl_tx_multicolls));
3930 printf("Rx unicast frames : %ju\n",
3931 (uintmax_t)le64toh(stats->rl_rx_ucasts));
3932 printf("Rx broadcast frames : %ju\n",
3933 (uintmax_t)le64toh(stats->rl_rx_bcasts));
3934 printf("Rx multicast frames : %u\n",
3935 le32toh(stats->rl_rx_mcasts));
3936 printf("Tx aborts : %u\n",
3937 (uint32_t)le16toh(stats->rl_tx_aborts));
3938 printf("Tx underruns : %u\n",
3939 (uint32_t)le16toh(stats->rl_rx_underruns));
3940 }
3941
3942 return (error);
3943 }
3944
3945 static int
3946 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3947 {
3948 int error, value;
3949
3950 if (arg1 == NULL)
3951 return (EINVAL);
3952 value = *(int *)arg1;
3953 error = sysctl_handle_int(oidp, &value, 0, req);
3954 if (error || req->newptr == NULL)
3955 return (error);
3956 if (value < low || value > high)
3957 return (EINVAL);
3958 *(int *)arg1 = value;
3959
3960 return (0);
3961 }
3962
3963 static int
3964 sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)
3965 {
3966
3967 return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN,
3968 RL_TIMER_MAX));
3969 }
Cache object: b68363cdb314aa16a5c750c95787a2a8
|