FreeBSD/Linux Kernel Cross Reference
sys/dev/re/if_re.c
1 /*-
2 * Copyright (c) 1997, 1998-2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 /*
37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
38 *
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
41 * Wind River Systems
42 */
43
44 /*
45 * This driver is designed to support RealTek's next generation of
46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
49 *
50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
51 * with the older 8139 family, however it also supports a special
52 * C+ mode of operation that provides several new performance enhancing
53 * features. These include:
54 *
55 * o Descriptor based DMA mechanism. Each descriptor represents
56 * a single packet fragment. Data buffers may be aligned on
57 * any byte boundary.
58 *
59 * o 64-bit DMA
60 *
61 * o TCP/IP checksum offload for both RX and TX
62 *
63 * o High and normal priority transmit DMA rings
64 *
65 * o VLAN tag insertion and extraction
66 *
67 * o TCP large send (segmentation offload)
68 *
69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
70 * programming API is fairly straightforward. The RX filtering, EEPROM
71 * access and PHY access is the same as it is on the older 8139 series
72 * chips.
73 *
74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
75 * same programming API and feature set as the 8139C+ with the following
76 * differences and additions:
77 *
78 * o 1000Mbps mode
79 *
80 * o Jumbo frames
81 *
82 * o GMII and TBI ports/registers for interfacing with copper
83 * or fiber PHYs
84 *
85 * o RX and TX DMA rings can have up to 1024 descriptors
86 * (the 8139C+ allows a maximum of 64)
87 *
88 * o Slight differences in register layout from the 8139C+
89 *
90 * The TX start and timer interrupt registers are at different locations
91 * on the 8169 than they are on the 8139C+. Also, the status word in the
92 * RX descriptor has a slightly different bit layout. The 8169 does not
93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
94 * copper gigE PHY.
95 *
96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
97 * (the 'S' stands for 'single-chip'). These devices have the same
98 * programming API as the older 8169, but also have some vendor-specific
99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
101 *
102 * This driver takes advantage of the RX and TX checksum offload and
103 * VLAN tag insertion/extraction features. It also implements TX
104 * interrupt moderation using the timer interrupt registers, which
105 * significantly reduces TX interrupt load. There is also support
106 * for jumbo frames, however the 8169/8169S/8110S can not transmit
107 * jumbo frames larger than 7440, so the max MTU possible with this
108 * driver is 7422 bytes.
109 */
110
111 #ifdef HAVE_KERNEL_OPTION_HEADERS
112 #include "opt_device_polling.h"
113 #endif
114
115 #include <sys/param.h>
116 #include <sys/endian.h>
117 #include <sys/systm.h>
118 #include <sys/sockio.h>
119 #include <sys/mbuf.h>
120 #include <sys/malloc.h>
121 #include <sys/module.h>
122 #include <sys/kernel.h>
123 #include <sys/socket.h>
124 #include <sys/lock.h>
125 #include <sys/mutex.h>
126 #include <sys/sysctl.h>
127 #include <sys/taskqueue.h>
128
129 #include <net/if.h>
130 #include <net/if_arp.h>
131 #include <net/ethernet.h>
132 #include <net/if_dl.h>
133 #include <net/if_media.h>
134 #include <net/if_types.h>
135 #include <net/if_vlan_var.h>
136
137 #include <net/bpf.h>
138
139 #include <machine/bus.h>
140 #include <machine/resource.h>
141 #include <sys/bus.h>
142 #include <sys/rman.h>
143
144 #include <dev/mii/mii.h>
145 #include <dev/mii/miivar.h>
146
147 #include <dev/pci/pcireg.h>
148 #include <dev/pci/pcivar.h>
149
150 #include <dev/rl/if_rlreg.h>
151
152 MODULE_DEPEND(re, pci, 1, 1, 1);
153 MODULE_DEPEND(re, ether, 1, 1, 1);
154 MODULE_DEPEND(re, miibus, 1, 1, 1);
155
156 /* "device miibus" required. See GENERIC if you get errors here. */
157 #include "miibus_if.h"
158
159 /* Tunables. */
160 static int intr_filter = 0;
161 TUNABLE_INT("hw.re.intr_filter", &intr_filter);
162 static int msi_disable = 0;
163 TUNABLE_INT("hw.re.msi_disable", &msi_disable);
164 static int msix_disable = 0;
165 TUNABLE_INT("hw.re.msix_disable", &msix_disable);
166 static int prefer_iomap = 0;
167 TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
168
169 #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
170
171 /*
172 * Various supported device vendors/types and their names.
173 */
174 static const struct rl_type re_devs[] = {
175 { DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
176 "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
177 { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0,
178 "D-Link DGE-530(T) Gigabit Ethernet Adapter" },
179 { RT_VENDORID, RT_DEVICEID_8139, 0,
180 "RealTek 8139C+ 10/100BaseTX" },
181 { RT_VENDORID, RT_DEVICEID_8101E, 0,
182 "RealTek 810xE PCIe 10/100baseTX" },
183 { RT_VENDORID, RT_DEVICEID_8168, 0,
184 "RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" },
185 { RT_VENDORID, RT_DEVICEID_8169, 0,
186 "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
187 { RT_VENDORID, RT_DEVICEID_8169SC, 0,
188 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
189 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
190 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
191 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
192 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
193 { USR_VENDORID, USR_DEVICEID_997902, 0,
194 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
195 };
196
197 static const struct rl_hwrev re_hwrevs[] = {
198 { RL_HWREV_8139, RL_8139, "", RL_MTU },
199 { RL_HWREV_8139A, RL_8139, "A", RL_MTU },
200 { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
201 { RL_HWREV_8139B, RL_8139, "B", RL_MTU },
202 { RL_HWREV_8130, RL_8139, "8130", RL_MTU },
203 { RL_HWREV_8139C, RL_8139, "C", RL_MTU },
204 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU },
205 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU },
206 { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU },
207 { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU },
208 { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU },
209 { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU },
210 { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU },
211 { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
212 { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU },
213 { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
214 { RL_HWREV_8100, RL_8139, "8100", RL_MTU },
215 { RL_HWREV_8101, RL_8139, "8101", RL_MTU },
216 { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU },
217 { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU },
218 { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU },
219 { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU },
220 { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
221 { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
222 { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU },
223 { RL_HWREV_8402, RL_8169, "8402", RL_MTU },
224 { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
225 { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU },
226 { RL_HWREV_8106E, RL_8169, "8106E", RL_MTU },
227 { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
228 { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
229 { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
230 { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
231 { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K },
232 { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K },
233 { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
234 { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
235 { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
236 { RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K},
237 { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K},
238 { RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K},
239 { RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K},
240 { RL_HWREV_8168H, RL_8169, "8168H/8111H", RL_JUMBO_MTU_9K},
241 { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K},
242 { RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K},
243 { 0, 0, NULL, 0 }
244 };
245
246 static int re_probe (device_t);
247 static int re_attach (device_t);
248 static int re_detach (device_t);
249
250 static int re_encap (struct rl_softc *, struct mbuf **);
251
252 static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int);
253 static int re_allocmem (device_t, struct rl_softc *);
254 static __inline void re_discard_rxbuf
255 (struct rl_softc *, int);
256 static int re_newbuf (struct rl_softc *, int);
257 static int re_jumbo_newbuf (struct rl_softc *, int);
258 static int re_rx_list_init (struct rl_softc *);
259 static int re_jrx_list_init (struct rl_softc *);
260 static int re_tx_list_init (struct rl_softc *);
261 #ifdef RE_FIXUP_RX
262 static __inline void re_fixup_rx
263 (struct mbuf *);
264 #endif
265 static int re_rxeof (struct rl_softc *, int *);
266 static void re_txeof (struct rl_softc *);
267 #ifdef DEVICE_POLLING
268 static int re_poll (struct ifnet *, enum poll_cmd, int);
269 static int re_poll_locked (struct ifnet *, enum poll_cmd, int);
270 #endif
271 static int re_intr (void *);
272 static void re_intr_msi (void *);
273 static void re_tick (void *);
274 static void re_int_task (void *, int);
275 static void re_start (struct ifnet *);
276 static void re_start_locked (struct ifnet *);
277 static int re_ioctl (struct ifnet *, u_long, caddr_t);
278 static void re_init (void *);
279 static void re_init_locked (struct rl_softc *);
280 static void re_stop (struct rl_softc *);
281 static void re_watchdog (struct rl_softc *);
282 static int re_suspend (device_t);
283 static int re_resume (device_t);
284 static int re_shutdown (device_t);
285 static int re_ifmedia_upd (struct ifnet *);
286 static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *);
287
288 static void re_eeprom_putbyte (struct rl_softc *, int);
289 static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *);
290 static void re_read_eeprom (struct rl_softc *, caddr_t, int, int);
291 static int re_gmii_readreg (device_t, int, int);
292 static int re_gmii_writereg (device_t, int, int, int);
293
294 static int re_miibus_readreg (device_t, int, int);
295 static int re_miibus_writereg (device_t, int, int, int);
296 static void re_miibus_statchg (device_t);
297
298 static void re_set_jumbo (struct rl_softc *, int);
299 static void re_set_rxmode (struct rl_softc *);
300 static void re_reset (struct rl_softc *);
301 static void re_setwol (struct rl_softc *);
302 static void re_clrwol (struct rl_softc *);
303 static void re_set_linkspeed (struct rl_softc *);
304
305 #ifdef DEV_NETMAP /* see ixgbe.c for details */
306 #include <dev/netmap/if_re_netmap.h>
307 #endif /* !DEV_NETMAP */
308
309 #ifdef RE_DIAG
310 static int re_diag (struct rl_softc *);
311 #endif
312
313 static void re_add_sysctls (struct rl_softc *);
314 static int re_sysctl_stats (SYSCTL_HANDLER_ARGS);
315 static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int);
316 static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS);
317
318 static device_method_t re_methods[] = {
319 /* Device interface */
320 DEVMETHOD(device_probe, re_probe),
321 DEVMETHOD(device_attach, re_attach),
322 DEVMETHOD(device_detach, re_detach),
323 DEVMETHOD(device_suspend, re_suspend),
324 DEVMETHOD(device_resume, re_resume),
325 DEVMETHOD(device_shutdown, re_shutdown),
326
327 /* MII interface */
328 DEVMETHOD(miibus_readreg, re_miibus_readreg),
329 DEVMETHOD(miibus_writereg, re_miibus_writereg),
330 DEVMETHOD(miibus_statchg, re_miibus_statchg),
331
332 DEVMETHOD_END
333 };
334
335 static driver_t re_driver = {
336 "re",
337 re_methods,
338 sizeof(struct rl_softc)
339 };
340
341 static devclass_t re_devclass;
342
343 DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
344 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
345
346 #define EE_SET(x) \
347 CSR_WRITE_1(sc, RL_EECMD, \
348 CSR_READ_1(sc, RL_EECMD) | x)
349
350 #define EE_CLR(x) \
351 CSR_WRITE_1(sc, RL_EECMD, \
352 CSR_READ_1(sc, RL_EECMD) & ~x)
353
354 /*
355 * Send a read command and address to the EEPROM, check for ACK.
356 */
357 static void
358 re_eeprom_putbyte(struct rl_softc *sc, int addr)
359 {
360 int d, i;
361
362 d = addr | (RL_9346_READ << sc->rl_eewidth);
363
364 /*
365 * Feed in each bit and strobe the clock.
366 */
367
368 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
369 if (d & i) {
370 EE_SET(RL_EE_DATAIN);
371 } else {
372 EE_CLR(RL_EE_DATAIN);
373 }
374 DELAY(100);
375 EE_SET(RL_EE_CLK);
376 DELAY(150);
377 EE_CLR(RL_EE_CLK);
378 DELAY(100);
379 }
380 }
381
382 /*
383 * Read a word of data stored in the EEPROM at address 'addr.'
384 */
385 static void
386 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
387 {
388 int i;
389 u_int16_t word = 0;
390
391 /*
392 * Send address of word we want to read.
393 */
394 re_eeprom_putbyte(sc, addr);
395
396 /*
397 * Start reading bits from EEPROM.
398 */
399 for (i = 0x8000; i; i >>= 1) {
400 EE_SET(RL_EE_CLK);
401 DELAY(100);
402 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
403 word |= i;
404 EE_CLR(RL_EE_CLK);
405 DELAY(100);
406 }
407
408 *dest = word;
409 }
410
411 /*
412 * Read a sequence of words from the EEPROM.
413 */
414 static void
415 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
416 {
417 int i;
418 u_int16_t word = 0, *ptr;
419
420 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
421
422 DELAY(100);
423
424 for (i = 0; i < cnt; i++) {
425 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
426 re_eeprom_getword(sc, off + i, &word);
427 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
428 ptr = (u_int16_t *)(dest + (i * 2));
429 *ptr = word;
430 }
431
432 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
433 }
434
435 static int
436 re_gmii_readreg(device_t dev, int phy, int reg)
437 {
438 struct rl_softc *sc;
439 u_int32_t rval;
440 int i;
441
442 sc = device_get_softc(dev);
443
444 /* Let the rgephy driver read the GMEDIASTAT register */
445
446 if (reg == RL_GMEDIASTAT) {
447 rval = CSR_READ_1(sc, RL_GMEDIASTAT);
448 return (rval);
449 }
450
451 CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
452
453 for (i = 0; i < RL_PHY_TIMEOUT; i++) {
454 rval = CSR_READ_4(sc, RL_PHYAR);
455 if (rval & RL_PHYAR_BUSY)
456 break;
457 DELAY(25);
458 }
459
460 if (i == RL_PHY_TIMEOUT) {
461 device_printf(sc->rl_dev, "PHY read failed\n");
462 return (0);
463 }
464
465 /*
466 * Controller requires a 20us delay to process next MDIO request.
467 */
468 DELAY(20);
469
470 return (rval & RL_PHYAR_PHYDATA);
471 }
472
473 static int
474 re_gmii_writereg(device_t dev, int phy, int reg, int data)
475 {
476 struct rl_softc *sc;
477 u_int32_t rval;
478 int i;
479
480 sc = device_get_softc(dev);
481
482 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
483 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
484
485 for (i = 0; i < RL_PHY_TIMEOUT; i++) {
486 rval = CSR_READ_4(sc, RL_PHYAR);
487 if (!(rval & RL_PHYAR_BUSY))
488 break;
489 DELAY(25);
490 }
491
492 if (i == RL_PHY_TIMEOUT) {
493 device_printf(sc->rl_dev, "PHY write failed\n");
494 return (0);
495 }
496
497 /*
498 * Controller requires a 20us delay to process next MDIO request.
499 */
500 DELAY(20);
501
502 return (0);
503 }
504
505 static int
506 re_miibus_readreg(device_t dev, int phy, int reg)
507 {
508 struct rl_softc *sc;
509 u_int16_t rval = 0;
510 u_int16_t re8139_reg = 0;
511
512 sc = device_get_softc(dev);
513
514 if (sc->rl_type == RL_8169) {
515 rval = re_gmii_readreg(dev, phy, reg);
516 return (rval);
517 }
518
519 switch (reg) {
520 case MII_BMCR:
521 re8139_reg = RL_BMCR;
522 break;
523 case MII_BMSR:
524 re8139_reg = RL_BMSR;
525 break;
526 case MII_ANAR:
527 re8139_reg = RL_ANAR;
528 break;
529 case MII_ANER:
530 re8139_reg = RL_ANER;
531 break;
532 case MII_ANLPAR:
533 re8139_reg = RL_LPAR;
534 break;
535 case MII_PHYIDR1:
536 case MII_PHYIDR2:
537 return (0);
538 /*
539 * Allow the rlphy driver to read the media status
540 * register. If we have a link partner which does not
541 * support NWAY, this is the register which will tell
542 * us the results of parallel detection.
543 */
544 case RL_MEDIASTAT:
545 rval = CSR_READ_1(sc, RL_MEDIASTAT);
546 return (rval);
547 default:
548 device_printf(sc->rl_dev, "bad phy register\n");
549 return (0);
550 }
551 rval = CSR_READ_2(sc, re8139_reg);
552 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
553 /* 8139C+ has different bit layout. */
554 rval &= ~(BMCR_LOOP | BMCR_ISO);
555 }
556 return (rval);
557 }
558
559 static int
560 re_miibus_writereg(device_t dev, int phy, int reg, int data)
561 {
562 struct rl_softc *sc;
563 u_int16_t re8139_reg = 0;
564 int rval = 0;
565
566 sc = device_get_softc(dev);
567
568 if (sc->rl_type == RL_8169) {
569 rval = re_gmii_writereg(dev, phy, reg, data);
570 return (rval);
571 }
572
573 switch (reg) {
574 case MII_BMCR:
575 re8139_reg = RL_BMCR;
576 if (sc->rl_type == RL_8139CPLUS) {
577 /* 8139C+ has different bit layout. */
578 data &= ~(BMCR_LOOP | BMCR_ISO);
579 }
580 break;
581 case MII_BMSR:
582 re8139_reg = RL_BMSR;
583 break;
584 case MII_ANAR:
585 re8139_reg = RL_ANAR;
586 break;
587 case MII_ANER:
588 re8139_reg = RL_ANER;
589 break;
590 case MII_ANLPAR:
591 re8139_reg = RL_LPAR;
592 break;
593 case MII_PHYIDR1:
594 case MII_PHYIDR2:
595 return (0);
596 break;
597 default:
598 device_printf(sc->rl_dev, "bad phy register\n");
599 return (0);
600 }
601 CSR_WRITE_2(sc, re8139_reg, data);
602 return (0);
603 }
604
605 static void
606 re_miibus_statchg(device_t dev)
607 {
608 struct rl_softc *sc;
609 struct ifnet *ifp;
610 struct mii_data *mii;
611
612 sc = device_get_softc(dev);
613 mii = device_get_softc(sc->rl_miibus);
614 ifp = sc->rl_ifp;
615 if (mii == NULL || ifp == NULL ||
616 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
617 return;
618
619 sc->rl_flags &= ~RL_FLAG_LINK;
620 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
621 (IFM_ACTIVE | IFM_AVALID)) {
622 switch (IFM_SUBTYPE(mii->mii_media_active)) {
623 case IFM_10_T:
624 case IFM_100_TX:
625 sc->rl_flags |= RL_FLAG_LINK;
626 break;
627 case IFM_1000_T:
628 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
629 break;
630 sc->rl_flags |= RL_FLAG_LINK;
631 break;
632 default:
633 break;
634 }
635 }
636 /*
637 * RealTek controllers do not provide any interface to the RX/TX
638 * MACs for resolved speed, duplex and flow-control parameters.
639 */
640 }
641
642 /*
643 * Set the RX configuration and 64-bit multicast hash filter.
644 */
645 static void
646 re_set_rxmode(struct rl_softc *sc)
647 {
648 struct ifnet *ifp;
649 struct ifmultiaddr *ifma;
650 uint32_t hashes[2] = { 0, 0 };
651 uint32_t h, rxfilt;
652
653 RL_LOCK_ASSERT(sc);
654
655 ifp = sc->rl_ifp;
656
657 rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
658 if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0)
659 rxfilt |= RL_RXCFG_EARLYOFF;
660 else if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
661 rxfilt |= RL_RXCFG_EARLYOFFV2;
662
663 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
664 if (ifp->if_flags & IFF_PROMISC)
665 rxfilt |= RL_RXCFG_RX_ALLPHYS;
666 /*
667 * Unlike other hardwares, we have to explicitly set
668 * RL_RXCFG_RX_MULTI to receive multicast frames in
669 * promiscuous mode.
670 */
671 rxfilt |= RL_RXCFG_RX_MULTI;
672 hashes[0] = hashes[1] = 0xffffffff;
673 goto done;
674 }
675
676 if_maddr_rlock(ifp);
677 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
678 if (ifma->ifma_addr->sa_family != AF_LINK)
679 continue;
680 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
681 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
682 if (h < 32)
683 hashes[0] |= (1 << h);
684 else
685 hashes[1] |= (1 << (h - 32));
686 }
687 if_maddr_runlock(ifp);
688
689 if (hashes[0] != 0 || hashes[1] != 0) {
690 /*
691 * For some unfathomable reason, RealTek decided to
692 * reverse the order of the multicast hash registers
693 * in the PCI Express parts. This means we have to
694 * write the hash pattern in reverse order for those
695 * devices.
696 */
697 if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
698 h = bswap32(hashes[0]);
699 hashes[0] = bswap32(hashes[1]);
700 hashes[1] = h;
701 }
702 rxfilt |= RL_RXCFG_RX_MULTI;
703 }
704
705 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168F) {
706 /* Disable multicast filtering due to silicon bug. */
707 hashes[0] = 0xffffffff;
708 hashes[1] = 0xffffffff;
709 }
710
711 done:
712 CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
713 CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
714 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
715 }
716
717 static void
718 re_reset(struct rl_softc *sc)
719 {
720 int i;
721
722 RL_LOCK_ASSERT(sc);
723
724 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
725
726 for (i = 0; i < RL_TIMEOUT; i++) {
727 DELAY(10);
728 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
729 break;
730 }
731 if (i == RL_TIMEOUT)
732 device_printf(sc->rl_dev, "reset never completed!\n");
733
734 if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
735 CSR_WRITE_1(sc, 0x82, 1);
736 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S)
737 re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
738 }
739
740 #ifdef RE_DIAG
741
742 /*
743 * The following routine is designed to test for a defect on some
744 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
745 * lines connected to the bus, however for a 32-bit only card, they
746 * should be pulled high. The result of this defect is that the
747 * NIC will not work right if you plug it into a 64-bit slot: DMA
748 * operations will be done with 64-bit transfers, which will fail
749 * because the 64-bit data lines aren't connected.
750 *
751 * There's no way to work around this (short of talking a soldering
752 * iron to the board), however we can detect it. The method we use
753 * here is to put the NIC into digital loopback mode, set the receiver
754 * to promiscuous mode, and then try to send a frame. We then compare
755 * the frame data we sent to what was received. If the data matches,
756 * then the NIC is working correctly, otherwise we know the user has
757 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
758 * slot. In the latter case, there's no way the NIC can work correctly,
759 * so we print out a message on the console and abort the device attach.
760 */
761
762 static int
763 re_diag(struct rl_softc *sc)
764 {
765 struct ifnet *ifp = sc->rl_ifp;
766 struct mbuf *m0;
767 struct ether_header *eh;
768 struct rl_desc *cur_rx;
769 u_int16_t status;
770 u_int32_t rxstat;
771 int total_len, i, error = 0, phyaddr;
772 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
773 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
774
775 /* Allocate a single mbuf */
776 MGETHDR(m0, M_NOWAIT, MT_DATA);
777 if (m0 == NULL)
778 return (ENOBUFS);
779
780 RL_LOCK(sc);
781
782 /*
783 * Initialize the NIC in test mode. This sets the chip up
784 * so that it can send and receive frames, but performs the
785 * following special functions:
786 * - Puts receiver in promiscuous mode
787 * - Enables digital loopback mode
788 * - Leaves interrupts turned off
789 */
790
791 ifp->if_flags |= IFF_PROMISC;
792 sc->rl_testmode = 1;
793 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
794 re_init_locked(sc);
795 sc->rl_flags |= RL_FLAG_LINK;
796 if (sc->rl_type == RL_8169)
797 phyaddr = 1;
798 else
799 phyaddr = 0;
800
801 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
802 for (i = 0; i < RL_TIMEOUT; i++) {
803 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
804 if (!(status & BMCR_RESET))
805 break;
806 }
807
808 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
809 CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
810
811 DELAY(100000);
812
813 /* Put some data in the mbuf */
814
815 eh = mtod(m0, struct ether_header *);
816 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
817 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
818 eh->ether_type = htons(ETHERTYPE_IP);
819 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
820
821 /*
822 * Queue the packet, start transmission.
823 * Note: IF_HANDOFF() ultimately calls re_start() for us.
824 */
825
826 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
827 RL_UNLOCK(sc);
828 /* XXX: re_diag must not be called when in ALTQ mode */
829 IF_HANDOFF(&ifp->if_snd, m0, ifp);
830 RL_LOCK(sc);
831 m0 = NULL;
832
833 /* Wait for it to propagate through the chip */
834
835 DELAY(100000);
836 for (i = 0; i < RL_TIMEOUT; i++) {
837 status = CSR_READ_2(sc, RL_ISR);
838 CSR_WRITE_2(sc, RL_ISR, status);
839 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
840 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
841 break;
842 DELAY(10);
843 }
844
845 if (i == RL_TIMEOUT) {
846 device_printf(sc->rl_dev,
847 "diagnostic failed, failed to receive packet in"
848 " loopback mode\n");
849 error = EIO;
850 goto done;
851 }
852
853 /*
854 * The packet should have been dumped into the first
855 * entry in the RX DMA ring. Grab it from there.
856 */
857
858 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
859 sc->rl_ldata.rl_rx_list_map,
860 BUS_DMASYNC_POSTREAD);
861 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
862 sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
863 BUS_DMASYNC_POSTREAD);
864 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
865 sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
866
867 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
868 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
869 eh = mtod(m0, struct ether_header *);
870
871 cur_rx = &sc->rl_ldata.rl_rx_list[0];
872 total_len = RL_RXBYTES(cur_rx);
873 rxstat = le32toh(cur_rx->rl_cmdstat);
874
875 if (total_len != ETHER_MIN_LEN) {
876 device_printf(sc->rl_dev,
877 "diagnostic failed, received short packet\n");
878 error = EIO;
879 goto done;
880 }
881
882 /* Test that the received packet data matches what we sent. */
883
884 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
885 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
886 ntohs(eh->ether_type) != ETHERTYPE_IP) {
887 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
888 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
889 dst, ":", src, ":", ETHERTYPE_IP);
890 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
891 eh->ether_dhost, ":", eh->ether_shost, ":",
892 ntohs(eh->ether_type));
893 device_printf(sc->rl_dev, "You may have a defective 32-bit "
894 "NIC plugged into a 64-bit PCI slot.\n");
895 device_printf(sc->rl_dev, "Please re-install the NIC in a "
896 "32-bit slot for proper operation.\n");
897 device_printf(sc->rl_dev, "Read the re(4) man page for more "
898 "details.\n");
899 error = EIO;
900 }
901
902 done:
903 /* Turn interface off, release resources */
904
905 sc->rl_testmode = 0;
906 sc->rl_flags &= ~RL_FLAG_LINK;
907 ifp->if_flags &= ~IFF_PROMISC;
908 re_stop(sc);
909 if (m0 != NULL)
910 m_freem(m0);
911
912 RL_UNLOCK(sc);
913
914 return (error);
915 }
916
917 #endif
918
919 /*
920 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
921 * IDs against our list and return a device name if we find a match.
922 */
923 static int
924 re_probe(device_t dev)
925 {
926 const struct rl_type *t;
927 uint16_t devid, vendor;
928 uint16_t revid, sdevid;
929 int i;
930
931 vendor = pci_get_vendor(dev);
932 devid = pci_get_device(dev);
933 revid = pci_get_revid(dev);
934 sdevid = pci_get_subdevice(dev);
935
936 if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
937 if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
938 /*
939 * Only attach to rev. 3 of the Linksys EG1032 adapter.
940 * Rev. 2 is supported by sk(4).
941 */
942 return (ENXIO);
943 }
944 }
945
946 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
947 if (revid != 0x20) {
948 /* 8139, let rl(4) take care of this device. */
949 return (ENXIO);
950 }
951 }
952
953 t = re_devs;
954 for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) {
955 if (vendor == t->rl_vid && devid == t->rl_did) {
956 device_set_desc(dev, t->rl_name);
957 return (BUS_PROBE_DEFAULT);
958 }
959 }
960
961 return (ENXIO);
962 }
963
964 /*
965 * Map a single buffer address.
966 */
967
968 static void
969 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
970 {
971 bus_addr_t *addr;
972
973 if (error)
974 return;
975
976 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
977 addr = arg;
978 *addr = segs->ds_addr;
979 }
980
981 static int
982 re_allocmem(device_t dev, struct rl_softc *sc)
983 {
984 bus_addr_t lowaddr;
985 bus_size_t rx_list_size, tx_list_size;
986 int error;
987 int i;
988
989 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
990 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
991
992 /*
993 * Allocate the parent bus DMA tag appropriate for PCI.
994 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
995 * register should be set. However some RealTek chips are known
996 * to be buggy on DAC handling, therefore disable DAC by limiting
997 * DMA address space to 32bit. PCIe variants of RealTek chips
998 * may not have the limitation.
999 */
1000 lowaddr = BUS_SPACE_MAXADDR;
1001 if ((sc->rl_flags & RL_FLAG_PCIE) == 0)
1002 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1003 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
1004 lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
1005 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1006 NULL, NULL, &sc->rl_parent_tag);
1007 if (error) {
1008 device_printf(dev, "could not allocate parent DMA tag\n");
1009 return (error);
1010 }
1011
1012 /*
1013 * Allocate map for TX mbufs.
1014 */
1015 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
1016 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1017 NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
1018 NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
1019 if (error) {
1020 device_printf(dev, "could not allocate TX DMA tag\n");
1021 return (error);
1022 }
1023
1024 /*
1025 * Allocate map for RX mbufs.
1026 */
1027
1028 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1029 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t),
1030 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1031 MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL,
1032 &sc->rl_ldata.rl_jrx_mtag);
1033 if (error) {
1034 device_printf(dev,
1035 "could not allocate jumbo RX DMA tag\n");
1036 return (error);
1037 }
1038 }
1039 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
1040 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1041 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
1042 if (error) {
1043 device_printf(dev, "could not allocate RX DMA tag\n");
1044 return (error);
1045 }
1046
1047 /*
1048 * Allocate map for TX descriptor list.
1049 */
1050 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1051 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1052 NULL, tx_list_size, 1, tx_list_size, 0,
1053 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1054 if (error) {
1055 device_printf(dev, "could not allocate TX DMA ring tag\n");
1056 return (error);
1057 }
1058
1059 /* Allocate DMA'able memory for the TX ring */
1060
1061 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1062 (void **)&sc->rl_ldata.rl_tx_list,
1063 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1064 &sc->rl_ldata.rl_tx_list_map);
1065 if (error) {
1066 device_printf(dev, "could not allocate TX DMA ring\n");
1067 return (error);
1068 }
1069
1070 /* Load the map for the TX ring. */
1071
1072 sc->rl_ldata.rl_tx_list_addr = 0;
1073 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1074 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1075 tx_list_size, re_dma_map_addr,
1076 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1077 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
1078 device_printf(dev, "could not load TX DMA ring\n");
1079 return (ENOMEM);
1080 }
1081
1082 /* Create DMA maps for TX buffers */
1083
1084 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1085 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
1086 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1087 if (error) {
1088 device_printf(dev, "could not create DMA map for TX\n");
1089 return (error);
1090 }
1091 }
1092
1093 /*
1094 * Allocate map for RX descriptor list.
1095 */
1096 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1097 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1098 NULL, rx_list_size, 1, rx_list_size, 0,
1099 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1100 if (error) {
1101 device_printf(dev, "could not create RX DMA ring tag\n");
1102 return (error);
1103 }
1104
1105 /* Allocate DMA'able memory for the RX ring */
1106
1107 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1108 (void **)&sc->rl_ldata.rl_rx_list,
1109 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1110 &sc->rl_ldata.rl_rx_list_map);
1111 if (error) {
1112 device_printf(dev, "could not allocate RX DMA ring\n");
1113 return (error);
1114 }
1115
1116 /* Load the map for the RX ring. */
1117
1118 sc->rl_ldata.rl_rx_list_addr = 0;
1119 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1120 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1121 rx_list_size, re_dma_map_addr,
1122 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1123 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
1124 device_printf(dev, "could not load RX DMA ring\n");
1125 return (ENOMEM);
1126 }
1127
1128 /* Create DMA maps for RX buffers */
1129
1130 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1131 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1132 &sc->rl_ldata.rl_jrx_sparemap);
1133 if (error) {
1134 device_printf(dev,
1135 "could not create spare DMA map for jumbo RX\n");
1136 return (error);
1137 }
1138 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1139 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1140 &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1141 if (error) {
1142 device_printf(dev,
1143 "could not create DMA map for jumbo RX\n");
1144 return (error);
1145 }
1146 }
1147 }
1148 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1149 &sc->rl_ldata.rl_rx_sparemap);
1150 if (error) {
1151 device_printf(dev, "could not create spare DMA map for RX\n");
1152 return (error);
1153 }
1154 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1155 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1156 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1157 if (error) {
1158 device_printf(dev, "could not create DMA map for RX\n");
1159 return (error);
1160 }
1161 }
1162
1163 /* Create DMA map for statistics. */
1164 error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0,
1165 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1166 sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL,
1167 &sc->rl_ldata.rl_stag);
1168 if (error) {
1169 device_printf(dev, "could not create statistics DMA tag\n");
1170 return (error);
1171 }
1172 /* Allocate DMA'able memory for statistics. */
1173 error = bus_dmamem_alloc(sc->rl_ldata.rl_stag,
1174 (void **)&sc->rl_ldata.rl_stats,
1175 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1176 &sc->rl_ldata.rl_smap);
1177 if (error) {
1178 device_printf(dev,
1179 "could not allocate statistics DMA memory\n");
1180 return (error);
1181 }
1182 /* Load the map for statistics. */
1183 sc->rl_ldata.rl_stats_addr = 0;
1184 error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap,
1185 sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr,
1186 &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT);
1187 if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) {
1188 device_printf(dev, "could not load statistics DMA memory\n");
1189 return (ENOMEM);
1190 }
1191
1192 return (0);
1193 }
1194
1195 /*
1196 * Attach the interface. Allocate softc structures, do ifmedia
1197 * setup and ethernet/BPF attach.
1198 */
1199 static int
1200 re_attach(device_t dev)
1201 {
1202 u_char eaddr[ETHER_ADDR_LEN];
1203 u_int16_t as[ETHER_ADDR_LEN / 2];
1204 struct rl_softc *sc;
1205 struct ifnet *ifp;
1206 const struct rl_hwrev *hw_rev;
1207 int capmask, error = 0, hwrev, i, msic, msixc,
1208 phy, reg, rid;
1209 u_int32_t cap, ctl;
1210 u_int16_t devid, re_did = 0;
1211 uint8_t cfg;
1212
1213 sc = device_get_softc(dev);
1214 sc->rl_dev = dev;
1215
1216 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1217 MTX_DEF);
1218 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
1219
1220 /*
1221 * Map control/status registers.
1222 */
1223 pci_enable_busmaster(dev);
1224
1225 devid = pci_get_device(dev);
1226 /*
1227 * Prefer memory space register mapping over IO space.
1228 * Because RTL8169SC does not seem to work when memory mapping
1229 * is used always activate io mapping.
1230 */
1231 if (devid == RT_DEVICEID_8169SC)
1232 prefer_iomap = 1;
1233 if (prefer_iomap == 0) {
1234 sc->rl_res_id = PCIR_BAR(1);
1235 sc->rl_res_type = SYS_RES_MEMORY;
1236 /* RTL8168/8101E seems to use different BARs. */
1237 if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
1238 sc->rl_res_id = PCIR_BAR(2);
1239 } else {
1240 sc->rl_res_id = PCIR_BAR(0);
1241 sc->rl_res_type = SYS_RES_IOPORT;
1242 }
1243 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1244 &sc->rl_res_id, RF_ACTIVE);
1245 if (sc->rl_res == NULL && prefer_iomap == 0) {
1246 sc->rl_res_id = PCIR_BAR(0);
1247 sc->rl_res_type = SYS_RES_IOPORT;
1248 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1249 &sc->rl_res_id, RF_ACTIVE);
1250 }
1251 if (sc->rl_res == NULL) {
1252 device_printf(dev, "couldn't map ports/memory\n");
1253 error = ENXIO;
1254 goto fail;
1255 }
1256
1257 sc->rl_btag = rman_get_bustag(sc->rl_res);
1258 sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1259
1260 msic = pci_msi_count(dev);
1261 msixc = pci_msix_count(dev);
1262 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
1263 sc->rl_flags |= RL_FLAG_PCIE;
1264 sc->rl_expcap = reg;
1265 }
1266 if (bootverbose) {
1267 device_printf(dev, "MSI count : %d\n", msic);
1268 device_printf(dev, "MSI-X count : %d\n", msixc);
1269 }
1270 if (msix_disable > 0)
1271 msixc = 0;
1272 if (msi_disable > 0)
1273 msic = 0;
1274 /* Prefer MSI-X to MSI. */
1275 if (msixc > 0) {
1276 msixc = RL_MSI_MESSAGES;
1277 rid = PCIR_BAR(4);
1278 sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1279 &rid, RF_ACTIVE);
1280 if (sc->rl_res_pba == NULL) {
1281 device_printf(sc->rl_dev,
1282 "could not allocate MSI-X PBA resource\n");
1283 }
1284 if (sc->rl_res_pba != NULL &&
1285 pci_alloc_msix(dev, &msixc) == 0) {
1286 if (msixc == RL_MSI_MESSAGES) {
1287 device_printf(dev, "Using %d MSI-X message\n",
1288 msixc);
1289 sc->rl_flags |= RL_FLAG_MSIX;
1290 } else
1291 pci_release_msi(dev);
1292 }
1293 if ((sc->rl_flags & RL_FLAG_MSIX) == 0) {
1294 if (sc->rl_res_pba != NULL)
1295 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1296 sc->rl_res_pba);
1297 sc->rl_res_pba = NULL;
1298 msixc = 0;
1299 }
1300 }
1301 /* Prefer MSI to INTx. */
1302 if (msixc == 0 && msic > 0) {
1303 msic = RL_MSI_MESSAGES;
1304 if (pci_alloc_msi(dev, &msic) == 0) {
1305 if (msic == RL_MSI_MESSAGES) {
1306 device_printf(dev, "Using %d MSI message\n",
1307 msic);
1308 sc->rl_flags |= RL_FLAG_MSI;
1309 /* Explicitly set MSI enable bit. */
1310 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1311 cfg = CSR_READ_1(sc, RL_CFG2);
1312 cfg |= RL_CFG2_MSI;
1313 CSR_WRITE_1(sc, RL_CFG2, cfg);
1314 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1315 } else
1316 pci_release_msi(dev);
1317 }
1318 if ((sc->rl_flags & RL_FLAG_MSI) == 0)
1319 msic = 0;
1320 }
1321
1322 /* Allocate interrupt */
1323 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) {
1324 rid = 0;
1325 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1326 RF_SHAREABLE | RF_ACTIVE);
1327 if (sc->rl_irq[0] == NULL) {
1328 device_printf(dev, "couldn't allocate IRQ resources\n");
1329 error = ENXIO;
1330 goto fail;
1331 }
1332 } else {
1333 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
1334 sc->rl_irq[i] = bus_alloc_resource_any(dev,
1335 SYS_RES_IRQ, &rid, RF_ACTIVE);
1336 if (sc->rl_irq[i] == NULL) {
1337 device_printf(dev,
1338 "couldn't llocate IRQ resources for "
1339 "message %d\n", rid);
1340 error = ENXIO;
1341 goto fail;
1342 }
1343 }
1344 }
1345
1346 if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
1347 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1348 cfg = CSR_READ_1(sc, RL_CFG2);
1349 if ((cfg & RL_CFG2_MSI) != 0) {
1350 device_printf(dev, "turning off MSI enable bit.\n");
1351 cfg &= ~RL_CFG2_MSI;
1352 CSR_WRITE_1(sc, RL_CFG2, cfg);
1353 }
1354 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1355 }
1356
1357 /* Disable ASPM L0S/L1. */
1358 if (sc->rl_expcap != 0) {
1359 cap = pci_read_config(dev, sc->rl_expcap +
1360 PCIER_LINK_CAP, 2);
1361 if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
1362 ctl = pci_read_config(dev, sc->rl_expcap +
1363 PCIER_LINK_CTL, 2);
1364 if ((ctl & 0x0003) != 0) {
1365 ctl &= ~0x0003;
1366 pci_write_config(dev, sc->rl_expcap +
1367 PCIER_LINK_CTL, ctl, 2);
1368 device_printf(dev, "ASPM disabled\n");
1369 }
1370 } else
1371 device_printf(dev, "no ASPM capability\n");
1372 }
1373
1374 hw_rev = re_hwrevs;
1375 hwrev = CSR_READ_4(sc, RL_TXCFG);
1376 switch (hwrev & 0x70000000) {
1377 case 0x00000000:
1378 case 0x10000000:
1379 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
1380 hwrev &= (RL_TXCFG_HWREV | 0x80000000);
1381 break;
1382 default:
1383 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
1384 sc->rl_macrev = hwrev & 0x00700000;
1385 hwrev &= RL_TXCFG_HWREV;
1386 break;
1387 }
1388 device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev);
1389 while (hw_rev->rl_desc != NULL) {
1390 if (hw_rev->rl_rev == hwrev) {
1391 sc->rl_type = hw_rev->rl_type;
1392 sc->rl_hwrev = hw_rev;
1393 break;
1394 }
1395 hw_rev++;
1396 }
1397 if (hw_rev->rl_desc == NULL) {
1398 device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
1399 error = ENXIO;
1400 goto fail;
1401 }
1402
1403 switch (hw_rev->rl_rev) {
1404 case RL_HWREV_8139CPLUS:
1405 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
1406 break;
1407 case RL_HWREV_8100E:
1408 case RL_HWREV_8101E:
1409 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
1410 break;
1411 case RL_HWREV_8102E:
1412 case RL_HWREV_8102EL:
1413 case RL_HWREV_8102EL_SPIN1:
1414 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1415 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1416 RL_FLAG_AUTOPAD;
1417 break;
1418 case RL_HWREV_8103E:
1419 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1420 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1421 RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
1422 break;
1423 case RL_HWREV_8401E:
1424 case RL_HWREV_8105E:
1425 case RL_HWREV_8105E_SPIN1:
1426 case RL_HWREV_8106E:
1427 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1428 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1429 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
1430 break;
1431 case RL_HWREV_8402:
1432 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1433 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1434 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD |
1435 RL_FLAG_CMDSTOP_WAIT_TXQ;
1436 break;
1437 case RL_HWREV_8168B_SPIN1:
1438 case RL_HWREV_8168B_SPIN2:
1439 sc->rl_flags |= RL_FLAG_WOLRXENB;
1440 /* FALLTHROUGH */
1441 case RL_HWREV_8168B_SPIN3:
1442 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
1443 break;
1444 case RL_HWREV_8168C_SPIN2:
1445 sc->rl_flags |= RL_FLAG_MACSLEEP;
1446 /* FALLTHROUGH */
1447 case RL_HWREV_8168C:
1448 if (sc->rl_macrev == 0x00200000)
1449 sc->rl_flags |= RL_FLAG_MACSLEEP;
1450 /* FALLTHROUGH */
1451 case RL_HWREV_8168CP:
1452 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1453 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1454 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1455 break;
1456 case RL_HWREV_8168D:
1457 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1458 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1459 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1460 RL_FLAG_WOL_MANLINK;
1461 break;
1462 case RL_HWREV_8168DP:
1463 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1464 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD |
1465 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK;
1466 break;
1467 case RL_HWREV_8168E:
1468 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1469 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1470 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1471 RL_FLAG_WOL_MANLINK;
1472 break;
1473 case RL_HWREV_8168E_VL:
1474 case RL_HWREV_8168F:
1475 sc->rl_flags |= RL_FLAG_EARLYOFF;
1476 /* FALLTHROUGH */
1477 case RL_HWREV_8411:
1478 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1479 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1480 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1481 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK;
1482 break;
1483 case RL_HWREV_8168EP:
1484 case RL_HWREV_8168G:
1485 case RL_HWREV_8411B:
1486 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1487 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1488 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1489 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK |
1490 RL_FLAG_8168G_PLUS;
1491 break;
1492 case RL_HWREV_8168GU:
1493 case RL_HWREV_8168H:
1494 if (pci_get_device(dev) == RT_DEVICEID_8101E) {
1495 /* RTL8106E(US), RTL8107E */
1496 sc->rl_flags |= RL_FLAG_FASTETHER;
1497 } else
1498 sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1499
1500 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1501 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1502 RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ |
1503 RL_FLAG_8168G_PLUS;
1504 break;
1505 case RL_HWREV_8169_8110SB:
1506 case RL_HWREV_8169_8110SBL:
1507 case RL_HWREV_8169_8110SC:
1508 case RL_HWREV_8169_8110SCE:
1509 sc->rl_flags |= RL_FLAG_PHYWAKE;
1510 /* FALLTHROUGH */
1511 case RL_HWREV_8169:
1512 case RL_HWREV_8169S:
1513 case RL_HWREV_8110S:
1514 sc->rl_flags |= RL_FLAG_MACRESET;
1515 break;
1516 default:
1517 break;
1518 }
1519
1520 if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) {
1521 sc->rl_cfg0 = RL_8139_CFG0;
1522 sc->rl_cfg1 = RL_8139_CFG1;
1523 sc->rl_cfg2 = 0;
1524 sc->rl_cfg3 = RL_8139_CFG3;
1525 sc->rl_cfg4 = RL_8139_CFG4;
1526 sc->rl_cfg5 = RL_8139_CFG5;
1527 } else {
1528 sc->rl_cfg0 = RL_CFG0;
1529 sc->rl_cfg1 = RL_CFG1;
1530 sc->rl_cfg2 = RL_CFG2;
1531 sc->rl_cfg3 = RL_CFG3;
1532 sc->rl_cfg4 = RL_CFG4;
1533 sc->rl_cfg5 = RL_CFG5;
1534 }
1535
1536 /* Reset the adapter. */
1537 RL_LOCK(sc);
1538 re_reset(sc);
1539 RL_UNLOCK(sc);
1540
1541 /* Enable PME. */
1542 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1543 cfg = CSR_READ_1(sc, sc->rl_cfg1);
1544 cfg |= RL_CFG1_PME;
1545 CSR_WRITE_1(sc, sc->rl_cfg1, cfg);
1546 cfg = CSR_READ_1(sc, sc->rl_cfg5);
1547 cfg &= RL_CFG5_PME_STS;
1548 CSR_WRITE_1(sc, sc->rl_cfg5, cfg);
1549 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1550
1551 if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
1552 /*
1553 * XXX Should have a better way to extract station
1554 * address from EEPROM.
1555 */
1556 for (i = 0; i < ETHER_ADDR_LEN; i++)
1557 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1558 } else {
1559 sc->rl_eewidth = RL_9356_ADDR_LEN;
1560 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
1561 if (re_did != 0x8129)
1562 sc->rl_eewidth = RL_9346_ADDR_LEN;
1563
1564 /*
1565 * Get station address from the EEPROM.
1566 */
1567 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
1568 for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
1569 as[i] = le16toh(as[i]);
1570 bcopy(as, eaddr, ETHER_ADDR_LEN);
1571 }
1572
1573 if (sc->rl_type == RL_8169) {
1574 /* Set RX length mask and number of descriptors. */
1575 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1576 sc->rl_txstart = RL_GTXSTART;
1577 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
1578 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
1579 } else {
1580 /* Set RX length mask and number of descriptors. */
1581 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1582 sc->rl_txstart = RL_TXSTART;
1583 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
1584 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
1585 }
1586
1587 error = re_allocmem(dev, sc);
1588 if (error)
1589 goto fail;
1590 re_add_sysctls(sc);
1591
1592 ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
1593 if (ifp == NULL) {
1594 device_printf(dev, "can not if_alloc()\n");
1595 error = ENOSPC;
1596 goto fail;
1597 }
1598
1599 /* Take controller out of deep sleep mode. */
1600 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
1601 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
1602 CSR_WRITE_1(sc, RL_GPIO,
1603 CSR_READ_1(sc, RL_GPIO) | 0x01);
1604 else
1605 CSR_WRITE_1(sc, RL_GPIO,
1606 CSR_READ_1(sc, RL_GPIO) & ~0x01);
1607 }
1608
1609 /* Take PHY out of power down mode. */
1610 if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) {
1611 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1612 if (hw_rev->rl_rev == RL_HWREV_8401E)
1613 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08);
1614 }
1615 if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
1616 re_gmii_writereg(dev, 1, 0x1f, 0);
1617 re_gmii_writereg(dev, 1, 0x0e, 0);
1618 }
1619
1620 ifp->if_softc = sc;
1621 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1622 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1623 ifp->if_ioctl = re_ioctl;
1624 ifp->if_start = re_start;
1625 /*
1626 * RTL8168/8111C generates wrong IP checksummed frame if the
1627 * packet has IP options so disable TX checksum offloading.
1628 */
1629 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C ||
1630 sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 ||
1631 sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) {
1632 ifp->if_hwassist = 0;
1633 ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TSO4;
1634 } else {
1635 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
1636 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
1637 }
1638 ifp->if_hwassist |= CSUM_TSO;
1639 ifp->if_capenable = ifp->if_capabilities;
1640 ifp->if_init = re_init;
1641 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN);
1642 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
1643 IFQ_SET_READY(&ifp->if_snd);
1644
1645 TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
1646
1647 #define RE_PHYAD_INTERNAL 0
1648
1649 /* Do MII setup. */
1650 phy = RE_PHYAD_INTERNAL;
1651 if (sc->rl_type == RL_8169)
1652 phy = 1;
1653 capmask = BMSR_DEFCAPMASK;
1654 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
1655 capmask &= ~BMSR_EXTSTAT;
1656 error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd,
1657 re_ifmedia_sts, capmask, phy, MII_OFFSET_ANY, MIIF_DOPAUSE);
1658 if (error != 0) {
1659 device_printf(dev, "attaching PHYs failed\n");
1660 goto fail;
1661 }
1662
1663 /*
1664 * Call MI attach routine.
1665 */
1666 ether_ifattach(ifp, eaddr);
1667
1668 /* VLAN capability setup */
1669 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1670 if (ifp->if_capabilities & IFCAP_HWCSUM)
1671 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1672 /* Enable WOL if PM is supported. */
1673 if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0)
1674 ifp->if_capabilities |= IFCAP_WOL;
1675 ifp->if_capenable = ifp->if_capabilities;
1676 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST);
1677 /*
1678 * Don't enable TSO by default. It is known to generate
1679 * corrupted TCP segments(bad TCP options) under certain
1680 * circumtances.
1681 */
1682 ifp->if_hwassist &= ~CSUM_TSO;
1683 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
1684 #ifdef DEVICE_POLLING
1685 ifp->if_capabilities |= IFCAP_POLLING;
1686 #endif
1687 /*
1688 * Tell the upper layer(s) we support long frames.
1689 * Must appear after the call to ether_ifattach() because
1690 * ether_ifattach() sets ifi_hdrlen to the default value.
1691 */
1692 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1693
1694 #ifdef DEV_NETMAP
1695 re_netmap_attach(sc);
1696 #endif /* DEV_NETMAP */
1697
1698 #ifdef RE_DIAG
1699 /*
1700 * Perform hardware diagnostic on the original RTL8169.
1701 * Some 32-bit cards were incorrectly wired and would
1702 * malfunction if plugged into a 64-bit slot.
1703 */
1704 if (hwrev == RL_HWREV_8169) {
1705 error = re_diag(sc);
1706 if (error) {
1707 device_printf(dev,
1708 "attach aborted due to hardware diag failure\n");
1709 ether_ifdetach(ifp);
1710 goto fail;
1711 }
1712 }
1713 #endif
1714
1715 #ifdef RE_TX_MODERATION
1716 intr_filter = 1;
1717 #endif
1718 /* Hook interrupt last to avoid having to lock softc */
1719 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
1720 intr_filter == 0) {
1721 error = bus_setup_intr(dev, sc->rl_irq[0],
1722 INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc,
1723 &sc->rl_intrhand[0]);
1724 } else {
1725 error = bus_setup_intr(dev, sc->rl_irq[0],
1726 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
1727 &sc->rl_intrhand[0]);
1728 }
1729 if (error) {
1730 device_printf(dev, "couldn't set up irq\n");
1731 ether_ifdetach(ifp);
1732 }
1733
1734 fail:
1735 if (error)
1736 re_detach(dev);
1737
1738 return (error);
1739 }
1740
1741 /*
1742 * Shutdown hardware and free up resources. This can be called any
1743 * time after the mutex has been initialized. It is called in both
1744 * the error case in attach and the normal detach case so it needs
1745 * to be careful about only freeing resources that have actually been
1746 * allocated.
1747 */
1748 static int
1749 re_detach(device_t dev)
1750 {
1751 struct rl_softc *sc;
1752 struct ifnet *ifp;
1753 int i, rid;
1754
1755 sc = device_get_softc(dev);
1756 ifp = sc->rl_ifp;
1757 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
1758
1759 /* These should only be active if attach succeeded */
1760 if (device_is_attached(dev)) {
1761 #ifdef DEVICE_POLLING
1762 if (ifp->if_capenable & IFCAP_POLLING)
1763 ether_poll_deregister(ifp);
1764 #endif
1765 RL_LOCK(sc);
1766 #if 0
1767 sc->suspended = 1;
1768 #endif
1769 re_stop(sc);
1770 RL_UNLOCK(sc);
1771 callout_drain(&sc->rl_stat_callout);
1772 taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
1773 /*
1774 * Force off the IFF_UP flag here, in case someone
1775 * still had a BPF descriptor attached to this
1776 * interface. If they do, ether_ifdetach() will cause
1777 * the BPF code to try and clear the promisc mode
1778 * flag, which will bubble down to re_ioctl(),
1779 * which will try to call re_init() again. This will
1780 * turn the NIC back on and restart the MII ticker,
1781 * which will panic the system when the kernel tries
1782 * to invoke the re_tick() function that isn't there
1783 * anymore.
1784 */
1785 ifp->if_flags &= ~IFF_UP;
1786 ether_ifdetach(ifp);
1787 }
1788 if (sc->rl_miibus)
1789 device_delete_child(dev, sc->rl_miibus);
1790 bus_generic_detach(dev);
1791
1792 /*
1793 * The rest is resource deallocation, so we should already be
1794 * stopped here.
1795 */
1796
1797 if (sc->rl_intrhand[0] != NULL) {
1798 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1799 sc->rl_intrhand[0] = NULL;
1800 }
1801 if (ifp != NULL) {
1802 #ifdef DEV_NETMAP
1803 netmap_detach(ifp);
1804 #endif /* DEV_NETMAP */
1805 if_free(ifp);
1806 }
1807 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
1808 rid = 0;
1809 else
1810 rid = 1;
1811 if (sc->rl_irq[0] != NULL) {
1812 bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]);
1813 sc->rl_irq[0] = NULL;
1814 }
1815 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0)
1816 pci_release_msi(dev);
1817 if (sc->rl_res_pba) {
1818 rid = PCIR_BAR(4);
1819 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba);
1820 }
1821 if (sc->rl_res)
1822 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1823 sc->rl_res);
1824
1825 /* Unload and free the RX DMA ring memory and map */
1826
1827 if (sc->rl_ldata.rl_rx_list_tag) {
1828 if (sc->rl_ldata.rl_rx_list_map)
1829 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1830 sc->rl_ldata.rl_rx_list_map);
1831 if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list)
1832 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1833 sc->rl_ldata.rl_rx_list,
1834 sc->rl_ldata.rl_rx_list_map);
1835 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1836 }
1837
1838 /* Unload and free the TX DMA ring memory and map */
1839
1840 if (sc->rl_ldata.rl_tx_list_tag) {
1841 if (sc->rl_ldata.rl_tx_list_map)
1842 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1843 sc->rl_ldata.rl_tx_list_map);
1844 if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list)
1845 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1846 sc->rl_ldata.rl_tx_list,
1847 sc->rl_ldata.rl_tx_list_map);
1848 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1849 }
1850
1851 /* Destroy all the RX and TX buffer maps */
1852
1853 if (sc->rl_ldata.rl_tx_mtag) {
1854 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1855 if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap)
1856 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
1857 sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1858 }
1859 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
1860 }
1861 if (sc->rl_ldata.rl_rx_mtag) {
1862 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1863 if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap)
1864 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1865 sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1866 }
1867 if (sc->rl_ldata.rl_rx_sparemap)
1868 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1869 sc->rl_ldata.rl_rx_sparemap);
1870 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
1871 }
1872 if (sc->rl_ldata.rl_jrx_mtag) {
1873 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1874 if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap)
1875 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1876 sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1877 }
1878 if (sc->rl_ldata.rl_jrx_sparemap)
1879 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1880 sc->rl_ldata.rl_jrx_sparemap);
1881 bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag);
1882 }
1883 /* Unload and free the stats buffer and map */
1884
1885 if (sc->rl_ldata.rl_stag) {
1886 if (sc->rl_ldata.rl_smap)
1887 bus_dmamap_unload(sc->rl_ldata.rl_stag,
1888 sc->rl_ldata.rl_smap);
1889 if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats)
1890 bus_dmamem_free(sc->rl_ldata.rl_stag,
1891 sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap);
1892 bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1893 }
1894
1895 if (sc->rl_parent_tag)
1896 bus_dma_tag_destroy(sc->rl_parent_tag);
1897
1898 mtx_destroy(&sc->rl_mtx);
1899
1900 return (0);
1901 }
1902
1903 static __inline void
1904 re_discard_rxbuf(struct rl_softc *sc, int idx)
1905 {
1906 struct rl_desc *desc;
1907 struct rl_rxdesc *rxd;
1908 uint32_t cmdstat;
1909
1910 if (sc->rl_ifp->if_mtu > RL_MTU &&
1911 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1912 rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1913 else
1914 rxd = &sc->rl_ldata.rl_rx_desc[idx];
1915 desc = &sc->rl_ldata.rl_rx_list[idx];
1916 desc->rl_vlanctl = 0;
1917 cmdstat = rxd->rx_size;
1918 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1919 cmdstat |= RL_RDESC_CMD_EOR;
1920 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1921 }
1922
1923 static int
1924 re_newbuf(struct rl_softc *sc, int idx)
1925 {
1926 struct mbuf *m;
1927 struct rl_rxdesc *rxd;
1928 bus_dma_segment_t segs[1];
1929 bus_dmamap_t map;
1930 struct rl_desc *desc;
1931 uint32_t cmdstat;
1932 int error, nsegs;
1933
1934 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1935 if (m == NULL)
1936 return (ENOBUFS);
1937
1938 m->m_len = m->m_pkthdr.len = MCLBYTES;
1939 #ifdef RE_FIXUP_RX
1940 /*
1941 * This is part of an evil trick to deal with non-x86 platforms.
1942 * The RealTek chip requires RX buffers to be aligned on 64-bit
1943 * boundaries, but that will hose non-x86 machines. To get around
1944 * this, we leave some empty space at the start of each buffer
1945 * and for non-x86 hosts, we copy the buffer back six bytes
1946 * to achieve word alignment. This is slightly more efficient
1947 * than allocating a new buffer, copying the contents, and
1948 * discarding the old buffer.
1949 */
1950 m_adj(m, RE_ETHER_ALIGN);
1951 #endif
1952 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
1953 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1954 if (error != 0) {
1955 m_freem(m);
1956 return (ENOBUFS);
1957 }
1958 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1959
1960 rxd = &sc->rl_ldata.rl_rx_desc[idx];
1961 if (rxd->rx_m != NULL) {
1962 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1963 BUS_DMASYNC_POSTREAD);
1964 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
1965 }
1966
1967 rxd->rx_m = m;
1968 map = rxd->rx_dmamap;
1969 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
1970 rxd->rx_size = segs[0].ds_len;
1971 sc->rl_ldata.rl_rx_sparemap = map;
1972 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1973 BUS_DMASYNC_PREREAD);
1974
1975 desc = &sc->rl_ldata.rl_rx_list[idx];
1976 desc->rl_vlanctl = 0;
1977 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1978 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1979 cmdstat = segs[0].ds_len;
1980 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1981 cmdstat |= RL_RDESC_CMD_EOR;
1982 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1983
1984 return (0);
1985 }
1986
1987 static int
1988 re_jumbo_newbuf(struct rl_softc *sc, int idx)
1989 {
1990 struct mbuf *m;
1991 struct rl_rxdesc *rxd;
1992 bus_dma_segment_t segs[1];
1993 bus_dmamap_t map;
1994 struct rl_desc *desc;
1995 uint32_t cmdstat;
1996 int error, nsegs;
1997
1998 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1999 if (m == NULL)
2000 return (ENOBUFS);
2001 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
2002 #ifdef RE_FIXUP_RX
2003 m_adj(m, RE_ETHER_ALIGN);
2004 #endif
2005 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag,
2006 sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
2007 if (error != 0) {
2008 m_freem(m);
2009 return (ENOBUFS);
2010 }
2011 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
2012
2013 rxd = &sc->rl_ldata.rl_jrx_desc[idx];
2014 if (rxd->rx_m != NULL) {
2015 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
2016 BUS_DMASYNC_POSTREAD);
2017 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap);
2018 }
2019
2020 rxd->rx_m = m;
2021 map = rxd->rx_dmamap;
2022 rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap;
2023 rxd->rx_size = segs[0].ds_len;
2024 sc->rl_ldata.rl_jrx_sparemap = map;
2025 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
2026 BUS_DMASYNC_PREREAD);
2027
2028 desc = &sc->rl_ldata.rl_rx_list[idx];
2029 desc->rl_vlanctl = 0;
2030 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
2031 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
2032 cmdstat = segs[0].ds_len;
2033 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
2034 cmdstat |= RL_RDESC_CMD_EOR;
2035 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
2036
2037 return (0);
2038 }
2039
2040 #ifdef RE_FIXUP_RX
2041 static __inline void
2042 re_fixup_rx(struct mbuf *m)
2043 {
2044 int i;
2045 uint16_t *src, *dst;
2046
2047 src = mtod(m, uint16_t *);
2048 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
2049
2050 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2051 *dst++ = *src++;
2052
2053 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
2054 }
2055 #endif
2056
2057 static int
2058 re_tx_list_init(struct rl_softc *sc)
2059 {
2060 struct rl_desc *desc;
2061 int i;
2062
2063 RL_LOCK_ASSERT(sc);
2064
2065 bzero(sc->rl_ldata.rl_tx_list,
2066 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
2067 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
2068 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
2069 #ifdef DEV_NETMAP
2070 re_netmap_tx_init(sc);
2071 #endif /* DEV_NETMAP */
2072 /* Set EOR. */
2073 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
2074 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
2075
2076 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2077 sc->rl_ldata.rl_tx_list_map,
2078 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2079
2080 sc->rl_ldata.rl_tx_prodidx = 0;
2081 sc->rl_ldata.rl_tx_considx = 0;
2082 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
2083
2084 return (0);
2085 }
2086
2087 static int
2088 re_rx_list_init(struct rl_softc *sc)
2089 {
2090 int error, i;
2091
2092 bzero(sc->rl_ldata.rl_rx_list,
2093 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2094 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2095 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
2096 if ((error = re_newbuf(sc, i)) != 0)
2097 return (error);
2098 }
2099 #ifdef DEV_NETMAP
2100 re_netmap_rx_init(sc);
2101 #endif /* DEV_NETMAP */
2102
2103 /* Flush the RX descriptors */
2104
2105 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2106 sc->rl_ldata.rl_rx_list_map,
2107 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2108
2109 sc->rl_ldata.rl_rx_prodidx = 0;
2110 sc->rl_head = sc->rl_tail = NULL;
2111 sc->rl_int_rx_act = 0;
2112
2113 return (0);
2114 }
2115
2116 static int
2117 re_jrx_list_init(struct rl_softc *sc)
2118 {
2119 int error, i;
2120
2121 bzero(sc->rl_ldata.rl_rx_list,
2122 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2123 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2124 sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL;
2125 if ((error = re_jumbo_newbuf(sc, i)) != 0)
2126 return (error);
2127 }
2128
2129 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2130 sc->rl_ldata.rl_rx_list_map,
2131 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2132
2133 sc->rl_ldata.rl_rx_prodidx = 0;
2134 sc->rl_head = sc->rl_tail = NULL;
2135 sc->rl_int_rx_act = 0;
2136
2137 return (0);
2138 }
2139
2140 /*
2141 * RX handler for C+ and 8169. For the gigE chips, we support
2142 * the reception of jumbo frames that have been fragmented
2143 * across multiple 2K mbuf cluster buffers.
2144 */
2145 static int
2146 re_rxeof(struct rl_softc *sc, int *rx_npktsp)
2147 {
2148 struct mbuf *m;
2149 struct ifnet *ifp;
2150 int i, rxerr, total_len;
2151 struct rl_desc *cur_rx;
2152 u_int32_t rxstat, rxvlan;
2153 int jumbo, maxpkt = 16, rx_npkts = 0;
2154
2155 RL_LOCK_ASSERT(sc);
2156
2157 ifp = sc->rl_ifp;
2158 #ifdef DEV_NETMAP
2159 if (netmap_rx_irq(ifp, 0, &rx_npkts))
2160 return 0;
2161 #endif /* DEV_NETMAP */
2162 if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
2163 jumbo = 1;
2164 else
2165 jumbo = 0;
2166
2167 /* Invalidate the descriptor memory */
2168
2169 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2170 sc->rl_ldata.rl_rx_list_map,
2171 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2172
2173 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
2174 i = RL_RX_DESC_NXT(sc, i)) {
2175 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2176 break;
2177 cur_rx = &sc->rl_ldata.rl_rx_list[i];
2178 rxstat = le32toh(cur_rx->rl_cmdstat);
2179 if ((rxstat & RL_RDESC_STAT_OWN) != 0)
2180 break;
2181 total_len = rxstat & sc->rl_rxlenmask;
2182 rxvlan = le32toh(cur_rx->rl_vlanctl);
2183 if (jumbo != 0)
2184 m = sc->rl_ldata.rl_jrx_desc[i].rx_m;
2185 else
2186 m = sc->rl_ldata.rl_rx_desc[i].rx_m;
2187
2188 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
2189 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
2190 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
2191 /*
2192 * RTL8168C or later controllers do not
2193 * support multi-fragment packet.
2194 */
2195 re_discard_rxbuf(sc, i);
2196 continue;
2197 } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) {
2198 if (re_newbuf(sc, i) != 0) {
2199 /*
2200 * If this is part of a multi-fragment packet,
2201 * discard all the pieces.
2202 */
2203 if (sc->rl_head != NULL) {
2204 m_freem(sc->rl_head);
2205 sc->rl_head = sc->rl_tail = NULL;
2206 }
2207 re_discard_rxbuf(sc, i);
2208 continue;
2209 }
2210 m->m_len = RE_RX_DESC_BUFLEN;
2211 if (sc->rl_head == NULL)
2212 sc->rl_head = sc->rl_tail = m;
2213 else {
2214 m->m_flags &= ~M_PKTHDR;
2215 sc->rl_tail->m_next = m;
2216 sc->rl_tail = m;
2217 }
2218 continue;
2219 }
2220
2221 /*
2222 * NOTE: for the 8139C+, the frame length field
2223 * is always 12 bits in size, but for the gigE chips,
2224 * it is 13 bits (since the max RX frame length is 16K).
2225 * Unfortunately, all 32 bits in the status word
2226 * were already used, so to make room for the extra
2227 * length bit, RealTek took out the 'frame alignment
2228 * error' bit and shifted the other status bits
2229 * over one slot. The OWN, EOR, FS and LS bits are
2230 * still in the same places. We have already extracted
2231 * the frame length and checked the OWN bit, so rather
2232 * than using an alternate bit mapping, we shift the
2233 * status bits one space to the right so we can evaluate
2234 * them using the 8169 status as though it was in the
2235 * same format as that of the 8139C+.
2236 */
2237 if (sc->rl_type == RL_8169)
2238 rxstat >>= 1;
2239
2240 /*
2241 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
2242 * set, but if CRC is clear, it will still be a valid frame.
2243 */
2244 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) {
2245 rxerr = 1;
2246 if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 &&
2247 total_len > 8191 &&
2248 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)
2249 rxerr = 0;
2250 if (rxerr != 0) {
2251 ifp->if_ierrors++;
2252 /*
2253 * If this is part of a multi-fragment packet,
2254 * discard all the pieces.
2255 */
2256 if (sc->rl_head != NULL) {
2257 m_freem(sc->rl_head);
2258 sc->rl_head = sc->rl_tail = NULL;
2259 }
2260 re_discard_rxbuf(sc, i);
2261 continue;
2262 }
2263 }
2264
2265 /*
2266 * If allocating a replacement mbuf fails,
2267 * reload the current one.
2268 */
2269 if (jumbo != 0)
2270 rxerr = re_jumbo_newbuf(sc, i);
2271 else
2272 rxerr = re_newbuf(sc, i);
2273 if (rxerr != 0) {
2274 ifp->if_iqdrops++;
2275 if (sc->rl_head != NULL) {
2276 m_freem(sc->rl_head);
2277 sc->rl_head = sc->rl_tail = NULL;
2278 }
2279 re_discard_rxbuf(sc, i);
2280 continue;
2281 }
2282
2283 if (sc->rl_head != NULL) {
2284 if (jumbo != 0)
2285 m->m_len = total_len;
2286 else {
2287 m->m_len = total_len % RE_RX_DESC_BUFLEN;
2288 if (m->m_len == 0)
2289 m->m_len = RE_RX_DESC_BUFLEN;
2290 }
2291 /*
2292 * Special case: if there's 4 bytes or less
2293 * in this buffer, the mbuf can be discarded:
2294 * the last 4 bytes is the CRC, which we don't
2295 * care about anyway.
2296 */
2297 if (m->m_len <= ETHER_CRC_LEN) {
2298 sc->rl_tail->m_len -=
2299 (ETHER_CRC_LEN - m->m_len);
2300 m_freem(m);
2301 } else {
2302 m->m_len -= ETHER_CRC_LEN;
2303 m->m_flags &= ~M_PKTHDR;
2304 sc->rl_tail->m_next = m;
2305 }
2306 m = sc->rl_head;
2307 sc->rl_head = sc->rl_tail = NULL;
2308 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
2309 } else
2310 m->m_pkthdr.len = m->m_len =
2311 (total_len - ETHER_CRC_LEN);
2312
2313 #ifdef RE_FIXUP_RX
2314 re_fixup_rx(m);
2315 #endif
2316 ifp->if_ipackets++;
2317 m->m_pkthdr.rcvif = ifp;
2318
2319 /* Do RX checksumming if enabled */
2320
2321 if (ifp->if_capenable & IFCAP_RXCSUM) {
2322 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2323 /* Check IP header checksum */
2324 if (rxstat & RL_RDESC_STAT_PROTOID)
2325 m->m_pkthdr.csum_flags |=
2326 CSUM_IP_CHECKED;
2327 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
2328 m->m_pkthdr.csum_flags |=
2329 CSUM_IP_VALID;
2330
2331 /* Check TCP/UDP checksum */
2332 if ((RL_TCPPKT(rxstat) &&
2333 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2334 (RL_UDPPKT(rxstat) &&
2335 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2336 m->m_pkthdr.csum_flags |=
2337 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2338 m->m_pkthdr.csum_data = 0xffff;
2339 }
2340 } else {
2341 /*
2342 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
2343 */
2344 if ((rxstat & RL_RDESC_STAT_PROTOID) &&
2345 (rxvlan & RL_RDESC_IPV4))
2346 m->m_pkthdr.csum_flags |=
2347 CSUM_IP_CHECKED;
2348 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
2349 (rxvlan & RL_RDESC_IPV4))
2350 m->m_pkthdr.csum_flags |=
2351 CSUM_IP_VALID;
2352 if (((rxstat & RL_RDESC_STAT_TCP) &&
2353 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2354 ((rxstat & RL_RDESC_STAT_UDP) &&
2355 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2356 m->m_pkthdr.csum_flags |=
2357 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2358 m->m_pkthdr.csum_data = 0xffff;
2359 }
2360 }
2361 }
2362 maxpkt--;
2363 if (rxvlan & RL_RDESC_VLANCTL_TAG) {
2364 m->m_pkthdr.ether_vtag =
2365 bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
2366 m->m_flags |= M_VLANTAG;
2367 }
2368 RL_UNLOCK(sc);
2369 (*ifp->if_input)(ifp, m);
2370 RL_LOCK(sc);
2371 rx_npkts++;
2372 }
2373
2374 /* Flush the RX DMA ring */
2375
2376 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2377 sc->rl_ldata.rl_rx_list_map,
2378 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2379
2380 sc->rl_ldata.rl_rx_prodidx = i;
2381
2382 if (rx_npktsp != NULL)
2383 *rx_npktsp = rx_npkts;
2384 if (maxpkt)
2385 return (EAGAIN);
2386
2387 return (0);
2388 }
2389
2390 static void
2391 re_txeof(struct rl_softc *sc)
2392 {
2393 struct ifnet *ifp;
2394 struct rl_txdesc *txd;
2395 u_int32_t txstat;
2396 int cons;
2397
2398 cons = sc->rl_ldata.rl_tx_considx;
2399 if (cons == sc->rl_ldata.rl_tx_prodidx)
2400 return;
2401
2402 ifp = sc->rl_ifp;
2403 #ifdef DEV_NETMAP
2404 if (netmap_tx_irq(ifp, 0))
2405 return;
2406 #endif /* DEV_NETMAP */
2407 /* Invalidate the TX descriptor list */
2408 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2409 sc->rl_ldata.rl_tx_list_map,
2410 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2411
2412 for (; cons != sc->rl_ldata.rl_tx_prodidx;
2413 cons = RL_TX_DESC_NXT(sc, cons)) {
2414 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
2415 if (txstat & RL_TDESC_STAT_OWN)
2416 break;
2417 /*
2418 * We only stash mbufs in the last descriptor
2419 * in a fragment chain, which also happens to
2420 * be the only place where the TX status bits
2421 * are valid.
2422 */
2423 if (txstat & RL_TDESC_CMD_EOF) {
2424 txd = &sc->rl_ldata.rl_tx_desc[cons];
2425 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
2426 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2427 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
2428 txd->tx_dmamap);
2429 KASSERT(txd->tx_m != NULL,
2430 ("%s: freeing NULL mbufs!", __func__));
2431 m_freem(txd->tx_m);
2432 txd->tx_m = NULL;
2433 if (txstat & (RL_TDESC_STAT_EXCESSCOL|
2434 RL_TDESC_STAT_COLCNT))
2435 ifp->if_collisions++;
2436 if (txstat & RL_TDESC_STAT_TXERRSUM)
2437 ifp->if_oerrors++;
2438 else
2439 ifp->if_opackets++;
2440 }
2441 sc->rl_ldata.rl_tx_free++;
2442 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2443 }
2444 sc->rl_ldata.rl_tx_considx = cons;
2445
2446 /* No changes made to the TX ring, so no flush needed */
2447
2448 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
2449 #ifdef RE_TX_MODERATION
2450 /*
2451 * If not all descriptors have been reaped yet, reload
2452 * the timer so that we will eventually get another
2453 * interrupt that will cause us to re-enter this routine.
2454 * This is done in case the transmitter has gone idle.
2455 */
2456 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2457 #endif
2458 } else
2459 sc->rl_watchdog_timer = 0;
2460 }
2461
2462 static void
2463 re_tick(void *xsc)
2464 {
2465 struct rl_softc *sc;
2466 struct mii_data *mii;
2467
2468 sc = xsc;
2469
2470 RL_LOCK_ASSERT(sc);
2471
2472 mii = device_get_softc(sc->rl_miibus);
2473 mii_tick(mii);
2474 if ((sc->rl_flags & RL_FLAG_LINK) == 0)
2475 re_miibus_statchg(sc->rl_dev);
2476 /*
2477 * Reclaim transmitted frames here. Technically it is not
2478 * necessary to do here but it ensures periodic reclamation
2479 * regardless of Tx completion interrupt which seems to be
2480 * lost on PCIe based controllers under certain situations.
2481 */
2482 re_txeof(sc);
2483 re_watchdog(sc);
2484 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
2485 }
2486
2487 #ifdef DEVICE_POLLING
2488 static int
2489 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2490 {
2491 struct rl_softc *sc = ifp->if_softc;
2492 int rx_npkts = 0;
2493
2494 RL_LOCK(sc);
2495 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2496 rx_npkts = re_poll_locked(ifp, cmd, count);
2497 RL_UNLOCK(sc);
2498 return (rx_npkts);
2499 }
2500
2501 static int
2502 re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2503 {
2504 struct rl_softc *sc = ifp->if_softc;
2505 int rx_npkts;
2506
2507 RL_LOCK_ASSERT(sc);
2508
2509 sc->rxcycles = count;
2510 re_rxeof(sc, &rx_npkts);
2511 re_txeof(sc);
2512
2513 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2514 re_start_locked(ifp);
2515
2516 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2517 u_int16_t status;
2518
2519 status = CSR_READ_2(sc, RL_ISR);
2520 if (status == 0xffff)
2521 return (rx_npkts);
2522 if (status)
2523 CSR_WRITE_2(sc, RL_ISR, status);
2524 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2525 (sc->rl_flags & RL_FLAG_PCIE))
2526 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2527
2528 /*
2529 * XXX check behaviour on receiver stalls.
2530 */
2531
2532 if (status & RL_ISR_SYSTEM_ERR) {
2533 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2534 re_init_locked(sc);
2535 }
2536 }
2537 return (rx_npkts);
2538 }
2539 #endif /* DEVICE_POLLING */
2540
2541 static int
2542 re_intr(void *arg)
2543 {
2544 struct rl_softc *sc;
2545 uint16_t status;
2546
2547 sc = arg;
2548
2549 status = CSR_READ_2(sc, RL_ISR);
2550 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
2551 return (FILTER_STRAY);
2552 CSR_WRITE_2(sc, RL_IMR, 0);
2553
2554 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2555
2556 return (FILTER_HANDLED);
2557 }
2558
2559 static void
2560 re_int_task(void *arg, int npending)
2561 {
2562 struct rl_softc *sc;
2563 struct ifnet *ifp;
2564 u_int16_t status;
2565 int rval = 0;
2566
2567 sc = arg;
2568 ifp = sc->rl_ifp;
2569
2570 RL_LOCK(sc);
2571
2572 status = CSR_READ_2(sc, RL_ISR);
2573 CSR_WRITE_2(sc, RL_ISR, status);
2574
2575 if (sc->suspended ||
2576 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2577 RL_UNLOCK(sc);
2578 return;
2579 }
2580
2581 #ifdef DEVICE_POLLING
2582 if (ifp->if_capenable & IFCAP_POLLING) {
2583 RL_UNLOCK(sc);
2584 return;
2585 }
2586 #endif
2587
2588 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
2589 rval = re_rxeof(sc, NULL);
2590
2591 /*
2592 * Some chips will ignore a second TX request issued
2593 * while an existing transmission is in progress. If
2594 * the transmitter goes idle but there are still
2595 * packets waiting to be sent, we need to restart the
2596 * channel here to flush them out. This only seems to
2597 * be required with the PCIe devices.
2598 */
2599 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2600 (sc->rl_flags & RL_FLAG_PCIE))
2601 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2602 if (status & (
2603 #ifdef RE_TX_MODERATION
2604 RL_ISR_TIMEOUT_EXPIRED|
2605 #else
2606 RL_ISR_TX_OK|
2607 #endif
2608 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
2609 re_txeof(sc);
2610
2611 if (status & RL_ISR_SYSTEM_ERR) {
2612 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2613 re_init_locked(sc);
2614 }
2615
2616 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2617 re_start_locked(ifp);
2618
2619 RL_UNLOCK(sc);
2620
2621 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
2622 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2623 return;
2624 }
2625
2626 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2627 }
2628
2629 static void
2630 re_intr_msi(void *xsc)
2631 {
2632 struct rl_softc *sc;
2633 struct ifnet *ifp;
2634 uint16_t intrs, status;
2635
2636 sc = xsc;
2637 RL_LOCK(sc);
2638
2639 ifp = sc->rl_ifp;
2640 #ifdef DEVICE_POLLING
2641 if (ifp->if_capenable & IFCAP_POLLING) {
2642 RL_UNLOCK(sc);
2643 return;
2644 }
2645 #endif
2646 /* Disable interrupts. */
2647 CSR_WRITE_2(sc, RL_IMR, 0);
2648 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2649 RL_UNLOCK(sc);
2650 return;
2651 }
2652
2653 intrs = RL_INTRS_CPLUS;
2654 status = CSR_READ_2(sc, RL_ISR);
2655 CSR_WRITE_2(sc, RL_ISR, status);
2656 if (sc->rl_int_rx_act > 0) {
2657 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2658 RL_ISR_RX_OVERRUN);
2659 status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2660 RL_ISR_RX_OVERRUN);
2661 }
2662
2663 if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
2664 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
2665 re_rxeof(sc, NULL);
2666 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2667 if (sc->rl_int_rx_mod != 0 &&
2668 (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
2669 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) {
2670 /* Rearm one-shot timer. */
2671 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2672 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR |
2673 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN);
2674 sc->rl_int_rx_act = 1;
2675 } else {
2676 intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR |
2677 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN;
2678 sc->rl_int_rx_act = 0;
2679 }
2680 }
2681 }
2682
2683 /*
2684 * Some chips will ignore a second TX request issued
2685 * while an existing transmission is in progress. If
2686 * the transmitter goes idle but there are still
2687 * packets waiting to be sent, we need to restart the
2688 * channel here to flush them out. This only seems to
2689 * be required with the PCIe devices.
2690 */
2691 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2692 (sc->rl_flags & RL_FLAG_PCIE))
2693 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2694 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL))
2695 re_txeof(sc);
2696
2697 if (status & RL_ISR_SYSTEM_ERR) {
2698 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2699 re_init_locked(sc);
2700 }
2701
2702 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2703 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2704 re_start_locked(ifp);
2705 CSR_WRITE_2(sc, RL_IMR, intrs);
2706 }
2707 RL_UNLOCK(sc);
2708 }
2709
2710 static int
2711 re_encap(struct rl_softc *sc, struct mbuf **m_head)
2712 {
2713 struct rl_txdesc *txd, *txd_last;
2714 bus_dma_segment_t segs[RL_NTXSEGS];
2715 bus_dmamap_t map;
2716 struct mbuf *m_new;
2717 struct rl_desc *desc;
2718 int nsegs, prod;
2719 int i, error, ei, si;
2720 int padlen;
2721 uint32_t cmdstat, csum_flags, vlanctl;
2722
2723 RL_LOCK_ASSERT(sc);
2724 M_ASSERTPKTHDR((*m_head));
2725
2726 /*
2727 * With some of the RealTek chips, using the checksum offload
2728 * support in conjunction with the autopadding feature results
2729 * in the transmission of corrupt frames. For example, if we
2730 * need to send a really small IP fragment that's less than 60
2731 * bytes in size, and IP header checksumming is enabled, the
2732 * resulting ethernet frame that appears on the wire will
2733 * have garbled payload. To work around this, if TX IP checksum
2734 * offload is enabled, we always manually pad short frames out
2735 * to the minimum ethernet frame size.
2736 */
2737 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
2738 (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
2739 ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2740 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
2741 if (M_WRITABLE(*m_head) == 0) {
2742 /* Get a writable copy. */
2743 m_new = m_dup(*m_head, M_NOWAIT);
2744 m_freem(*m_head);
2745 if (m_new == NULL) {
2746 *m_head = NULL;
2747 return (ENOBUFS);
2748 }
2749 *m_head = m_new;
2750 }
2751 if ((*m_head)->m_next != NULL ||
2752 M_TRAILINGSPACE(*m_head) < padlen) {
2753 m_new = m_defrag(*m_head, M_NOWAIT);
2754 if (m_new == NULL) {
2755 m_freem(*m_head);
2756 *m_head = NULL;
2757 return (ENOBUFS);
2758 }
2759 } else
2760 m_new = *m_head;
2761
2762 /*
2763 * Manually pad short frames, and zero the pad space
2764 * to avoid leaking data.
2765 */
2766 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
2767 m_new->m_pkthdr.len += padlen;
2768 m_new->m_len = m_new->m_pkthdr.len;
2769 *m_head = m_new;
2770 }
2771
2772 prod = sc->rl_ldata.rl_tx_prodidx;
2773 txd = &sc->rl_ldata.rl_tx_desc[prod];
2774 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2775 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2776 if (error == EFBIG) {
2777 m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS);
2778 if (m_new == NULL) {
2779 m_freem(*m_head);
2780 *m_head = NULL;
2781 return (ENOBUFS);
2782 }
2783 *m_head = m_new;
2784 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
2785 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2786 if (error != 0) {
2787 m_freem(*m_head);
2788 *m_head = NULL;
2789 return (error);
2790 }
2791 } else if (error != 0)
2792 return (error);
2793 if (nsegs == 0) {
2794 m_freem(*m_head);
2795 *m_head = NULL;
2796 return (EIO);
2797 }
2798
2799 /* Check for number of available descriptors. */
2800 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
2801 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
2802 return (ENOBUFS);
2803 }
2804
2805 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2806 BUS_DMASYNC_PREWRITE);
2807
2808 /*
2809 * Set up checksum offload. Note: checksum offload bits must
2810 * appear in all descriptors of a multi-descriptor transmit
2811 * attempt. This is according to testing done with an 8169
2812 * chip. This is a requirement.
2813 */
2814 vlanctl = 0;
2815 csum_flags = 0;
2816 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2817 if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) {
2818 csum_flags |= RL_TDESC_CMD_LGSEND;
2819 vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2820 RL_TDESC_CMD_MSSVALV2_SHIFT);
2821 } else {
2822 csum_flags |= RL_TDESC_CMD_LGSEND |
2823 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2824 RL_TDESC_CMD_MSSVAL_SHIFT);
2825 }
2826 } else {
2827 /*
2828 * Unconditionally enable IP checksum if TCP or UDP
2829 * checksum is required. Otherwise, TCP/UDP checksum
2830 * does't make effects.
2831 */
2832 if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
2833 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2834 csum_flags |= RL_TDESC_CMD_IPCSUM;
2835 if (((*m_head)->m_pkthdr.csum_flags &
2836 CSUM_TCP) != 0)
2837 csum_flags |= RL_TDESC_CMD_TCPCSUM;
2838 if (((*m_head)->m_pkthdr.csum_flags &
2839 CSUM_UDP) != 0)
2840 csum_flags |= RL_TDESC_CMD_UDPCSUM;
2841 } else {
2842 vlanctl |= RL_TDESC_CMD_IPCSUMV2;
2843 if (((*m_head)->m_pkthdr.csum_flags &
2844 CSUM_TCP) != 0)
2845 vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
2846 if (((*m_head)->m_pkthdr.csum_flags &
2847 CSUM_UDP) != 0)
2848 vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
2849 }
2850 }
2851 }
2852
2853 /*
2854 * Set up hardware VLAN tagging. Note: vlan tag info must
2855 * appear in all descriptors of a multi-descriptor
2856 * transmission attempt.
2857 */
2858 if ((*m_head)->m_flags & M_VLANTAG)
2859 vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
2860 RL_TDESC_VLANCTL_TAG;
2861
2862 si = prod;
2863 for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
2864 desc = &sc->rl_ldata.rl_tx_list[prod];
2865 desc->rl_vlanctl = htole32(vlanctl);
2866 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
2867 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
2868 cmdstat = segs[i].ds_len;
2869 if (i != 0)
2870 cmdstat |= RL_TDESC_CMD_OWN;
2871 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
2872 cmdstat |= RL_TDESC_CMD_EOR;
2873 desc->rl_cmdstat = htole32(cmdstat | csum_flags);
2874 sc->rl_ldata.rl_tx_free--;
2875 }
2876 /* Update producer index. */
2877 sc->rl_ldata.rl_tx_prodidx = prod;
2878
2879 /* Set EOF on the last descriptor. */
2880 ei = RL_TX_DESC_PRV(sc, prod);
2881 desc = &sc->rl_ldata.rl_tx_list[ei];
2882 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
2883
2884 desc = &sc->rl_ldata.rl_tx_list[si];
2885 /* Set SOF and transfer ownership of packet to the chip. */
2886 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
2887
2888 /*
2889 * Insure that the map for this transmission
2890 * is placed at the array index of the last descriptor
2891 * in this chain. (Swap last and first dmamaps.)
2892 */
2893 txd_last = &sc->rl_ldata.rl_tx_desc[ei];
2894 map = txd->tx_dmamap;
2895 txd->tx_dmamap = txd_last->tx_dmamap;
2896 txd_last->tx_dmamap = map;
2897 txd_last->tx_m = *m_head;
2898
2899 return (0);
2900 }
2901
2902 static void
2903 re_start(struct ifnet *ifp)
2904 {
2905 struct rl_softc *sc;
2906
2907 sc = ifp->if_softc;
2908 RL_LOCK(sc);
2909 re_start_locked(ifp);
2910 RL_UNLOCK(sc);
2911 }
2912
2913 /*
2914 * Main transmit routine for C+ and gigE NICs.
2915 */
2916 static void
2917 re_start_locked(struct ifnet *ifp)
2918 {
2919 struct rl_softc *sc;
2920 struct mbuf *m_head;
2921 int queued;
2922
2923 sc = ifp->if_softc;
2924
2925 #ifdef DEV_NETMAP
2926 /* XXX is this necessary ? */
2927 if (ifp->if_capenable & IFCAP_NETMAP) {
2928 struct netmap_kring *kring = &NA(ifp)->tx_rings[0];
2929 if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) {
2930 /* kick the tx unit */
2931 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2932 #ifdef RE_TX_MODERATION
2933 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2934 #endif
2935 sc->rl_watchdog_timer = 5;
2936 }
2937 return;
2938 }
2939 #endif /* DEV_NETMAP */
2940
2941 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2942 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
2943 return;
2944
2945 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2946 sc->rl_ldata.rl_tx_free > 1;) {
2947 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2948 if (m_head == NULL)
2949 break;
2950
2951 if (re_encap(sc, &m_head) != 0) {
2952 if (m_head == NULL)
2953 break;
2954 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2955 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2956 break;
2957 }
2958
2959 /*
2960 * If there's a BPF listener, bounce a copy of this frame
2961 * to him.
2962 */
2963 ETHER_BPF_MTAP(ifp, m_head);
2964
2965 queued++;
2966 }
2967
2968 if (queued == 0) {
2969 #ifdef RE_TX_MODERATION
2970 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
2971 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2972 #endif
2973 return;
2974 }
2975
2976 /* Flush the TX descriptors */
2977
2978 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2979 sc->rl_ldata.rl_tx_list_map,
2980 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2981
2982 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2983
2984 #ifdef RE_TX_MODERATION
2985 /*
2986 * Use the countdown timer for interrupt moderation.
2987 * 'TX done' interrupts are disabled. Instead, we reset the
2988 * countdown timer, which will begin counting until it hits
2989 * the value in the TIMERINT register, and then trigger an
2990 * interrupt. Each time we write to the TIMERCNT register,
2991 * the timer count is reset to 0.
2992 */
2993 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2994 #endif
2995
2996 /*
2997 * Set a timeout in case the chip goes out to lunch.
2998 */
2999 sc->rl_watchdog_timer = 5;
3000 }
3001
3002 static void
3003 re_set_jumbo(struct rl_softc *sc, int jumbo)
3004 {
3005
3006 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) {
3007 pci_set_max_read_req(sc->rl_dev, 4096);
3008 return;
3009 }
3010
3011 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3012 if (jumbo != 0) {
3013 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) |
3014 RL_CFG3_JUMBO_EN0);
3015 switch (sc->rl_hwrev->rl_rev) {
3016 case RL_HWREV_8168DP:
3017 break;
3018 case RL_HWREV_8168E:
3019 CSR_WRITE_1(sc, sc->rl_cfg4,
3020 CSR_READ_1(sc, sc->rl_cfg4) | 0x01);
3021 break;
3022 default:
3023 CSR_WRITE_1(sc, sc->rl_cfg4,
3024 CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1);
3025 }
3026 } else {
3027 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) &
3028 ~RL_CFG3_JUMBO_EN0);
3029 switch (sc->rl_hwrev->rl_rev) {
3030 case RL_HWREV_8168DP:
3031 break;
3032 case RL_HWREV_8168E:
3033 CSR_WRITE_1(sc, sc->rl_cfg4,
3034 CSR_READ_1(sc, sc->rl_cfg4) & ~0x01);
3035 break;
3036 default:
3037 CSR_WRITE_1(sc, sc->rl_cfg4,
3038 CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1);
3039 }
3040 }
3041 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3042
3043 switch (sc->rl_hwrev->rl_rev) {
3044 case RL_HWREV_8168DP:
3045 pci_set_max_read_req(sc->rl_dev, 4096);
3046 break;
3047 default:
3048 if (jumbo != 0)
3049 pci_set_max_read_req(sc->rl_dev, 512);
3050 else
3051 pci_set_max_read_req(sc->rl_dev, 4096);
3052 }
3053 }
3054
3055 static void
3056 re_init(void *xsc)
3057 {
3058 struct rl_softc *sc = xsc;
3059
3060 RL_LOCK(sc);
3061 re_init_locked(sc);
3062 RL_UNLOCK(sc);
3063 }
3064
3065 static void
3066 re_init_locked(struct rl_softc *sc)
3067 {
3068 struct ifnet *ifp = sc->rl_ifp;
3069 struct mii_data *mii;
3070 uint32_t reg;
3071 uint16_t cfg;
3072 union {
3073 uint32_t align_dummy;
3074 u_char eaddr[ETHER_ADDR_LEN];
3075 } eaddr;
3076
3077 RL_LOCK_ASSERT(sc);
3078
3079 mii = device_get_softc(sc->rl_miibus);
3080
3081 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3082 return;
3083
3084 /*
3085 * Cancel pending I/O and free all RX/TX buffers.
3086 */
3087 re_stop(sc);
3088
3089 /* Put controller into known state. */
3090 re_reset(sc);
3091
3092 /*
3093 * For C+ mode, initialize the RX descriptors and mbufs.
3094 */
3095 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3096 if (ifp->if_mtu > RL_MTU) {
3097 if (re_jrx_list_init(sc) != 0) {
3098 device_printf(sc->rl_dev,
3099 "no memory for jumbo RX buffers\n");
3100 re_stop(sc);
3101 return;
3102 }
3103 /* Disable checksum offloading for jumbo frames. */
3104 ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4);
3105 ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO);
3106 } else {
3107 if (re_rx_list_init(sc) != 0) {
3108 device_printf(sc->rl_dev,
3109 "no memory for RX buffers\n");
3110 re_stop(sc);
3111 return;
3112 }
3113 }
3114 re_set_jumbo(sc, ifp->if_mtu > RL_MTU);
3115 } else {
3116 if (re_rx_list_init(sc) != 0) {
3117 device_printf(sc->rl_dev, "no memory for RX buffers\n");
3118 re_stop(sc);
3119 return;
3120 }
3121 if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3122 pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) {
3123 if (ifp->if_mtu > RL_MTU)
3124 pci_set_max_read_req(sc->rl_dev, 512);
3125 else
3126 pci_set_max_read_req(sc->rl_dev, 4096);
3127 }
3128 }
3129 re_tx_list_init(sc);
3130
3131 /*
3132 * Enable C+ RX and TX mode, as well as VLAN stripping and
3133 * RX checksum offload. We must configure the C+ register
3134 * before all others.
3135 */
3136 cfg = RL_CPLUSCMD_PCI_MRW;
3137 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3138 cfg |= RL_CPLUSCMD_RXCSUM_ENB;
3139 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3140 cfg |= RL_CPLUSCMD_VLANSTRIP;
3141 if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
3142 cfg |= RL_CPLUSCMD_MACSTAT_DIS;
3143 /* XXX magic. */
3144 cfg |= 0x0001;
3145 } else
3146 cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
3147 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
3148 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC ||
3149 sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) {
3150 reg = 0x000fff00;
3151 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0)
3152 reg |= 0x000000ff;
3153 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE)
3154 reg |= 0x00f00000;
3155 CSR_WRITE_4(sc, 0x7c, reg);
3156 /* Disable interrupt mitigation. */
3157 CSR_WRITE_2(sc, 0xe2, 0);
3158 }
3159 /*
3160 * Disable TSO if interface MTU size is greater than MSS
3161 * allowed in controller.
3162 */
3163 if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) {
3164 ifp->if_capenable &= ~IFCAP_TSO4;
3165 ifp->if_hwassist &= ~CSUM_TSO;
3166 }
3167
3168 /*
3169 * Init our MAC address. Even though the chipset
3170 * documentation doesn't mention it, we need to enter "Config
3171 * register write enable" mode to modify the ID registers.
3172 */
3173 /* Copy MAC address on stack to align. */
3174 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN);
3175 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3176 CSR_WRITE_4(sc, RL_IDR0,
3177 htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
3178 CSR_WRITE_4(sc, RL_IDR4,
3179 htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
3180 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3181
3182 /*
3183 * Load the addresses of the RX and TX lists into the chip.
3184 */
3185
3186 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
3187 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
3188 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
3189 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
3190
3191 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
3192 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
3193 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
3194 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
3195
3196 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3197 /* Disable RXDV gate. */
3198 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
3199 ~0x00080000);
3200 }
3201
3202 /*
3203 * Enable transmit and receive for pre-RTL8168G controllers.
3204 * RX/TX MACs should be enabled before RX/TX configuration.
3205 */
3206 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) == 0)
3207 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
3208
3209 /*
3210 * Set the initial TX configuration.
3211 */
3212 if (sc->rl_testmode) {
3213 if (sc->rl_type == RL_8169)
3214 CSR_WRITE_4(sc, RL_TXCFG,
3215 RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
3216 else
3217 CSR_WRITE_4(sc, RL_TXCFG,
3218 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
3219 } else
3220 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
3221
3222 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
3223
3224 /*
3225 * Set the initial RX configuration.
3226 */
3227 re_set_rxmode(sc);
3228
3229 /* Configure interrupt moderation. */
3230 if (sc->rl_type == RL_8169) {
3231 /* Magic from vendor. */
3232 CSR_WRITE_2(sc, RL_INTRMOD, 0x5100);
3233 }
3234
3235 /*
3236 * Enable transmit and receive for RTL8168G and later controllers.
3237 * RX/TX MACs should be enabled after RX/TX configuration.
3238 */
3239 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
3240 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
3241
3242 #ifdef DEVICE_POLLING
3243 /*
3244 * Disable interrupts if we are polling.
3245 */
3246 if (ifp->if_capenable & IFCAP_POLLING)
3247 CSR_WRITE_2(sc, RL_IMR, 0);
3248 else /* otherwise ... */
3249 #endif
3250
3251 /*
3252 * Enable interrupts.
3253 */
3254 if (sc->rl_testmode)
3255 CSR_WRITE_2(sc, RL_IMR, 0);
3256 else
3257 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3258 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
3259
3260 /* Set initial TX threshold */
3261 sc->rl_txthresh = RL_TX_THRESH_INIT;
3262
3263 /* Start RX/TX process. */
3264 CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
3265
3266 /*
3267 * Initialize the timer interrupt register so that
3268 * a timer interrupt will be generated once the timer
3269 * reaches a certain number of ticks. The timer is
3270 * reloaded on each transmit.
3271 */
3272 #ifdef RE_TX_MODERATION
3273 /*
3274 * Use timer interrupt register to moderate TX interrupt
3275 * moderation, which dramatically improves TX frame rate.
3276 */
3277 if (sc->rl_type == RL_8169)
3278 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
3279 else
3280 CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
3281 #else
3282 /*
3283 * Use timer interrupt register to moderate RX interrupt
3284 * moderation.
3285 */
3286 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
3287 intr_filter == 0) {
3288 if (sc->rl_type == RL_8169)
3289 CSR_WRITE_4(sc, RL_TIMERINT_8169,
3290 RL_USECS(sc->rl_int_rx_mod));
3291 } else {
3292 if (sc->rl_type == RL_8169)
3293 CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0));
3294 }
3295 #endif
3296
3297 /*
3298 * For 8169 gigE NICs, set the max allowed RX packet
3299 * size so we can receive jumbo frames.
3300 */
3301 if (sc->rl_type == RL_8169) {
3302 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3303 /*
3304 * For controllers that use new jumbo frame scheme,
3305 * set maximum size of jumbo frame depedning on
3306 * controller revisions.
3307 */
3308 if (ifp->if_mtu > RL_MTU)
3309 CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3310 sc->rl_hwrev->rl_max_mtu +
3311 ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN +
3312 ETHER_CRC_LEN);
3313 else
3314 CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3315 RE_RX_DESC_BUFLEN);
3316 } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3317 sc->rl_hwrev->rl_max_mtu == RL_MTU) {
3318 /* RTL810x has no jumbo frame support. */
3319 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
3320 } else
3321 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
3322 }
3323
3324 if (sc->rl_testmode)
3325 return;
3326
3327 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |
3328 RL_CFG1_DRVLOAD);
3329
3330 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3331 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3332
3333 sc->rl_flags &= ~RL_FLAG_LINK;
3334 mii_mediachg(mii);
3335
3336 sc->rl_watchdog_timer = 0;
3337 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
3338 }
3339
3340 /*
3341 * Set media options.
3342 */
3343 static int
3344 re_ifmedia_upd(struct ifnet *ifp)
3345 {
3346 struct rl_softc *sc;
3347 struct mii_data *mii;
3348 int error;
3349
3350 sc = ifp->if_softc;
3351 mii = device_get_softc(sc->rl_miibus);
3352 RL_LOCK(sc);
3353 error = mii_mediachg(mii);
3354 RL_UNLOCK(sc);
3355
3356 return (error);
3357 }
3358
3359 /*
3360 * Report current media status.
3361 */
3362 static void
3363 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3364 {
3365 struct rl_softc *sc;
3366 struct mii_data *mii;
3367
3368 sc = ifp->if_softc;
3369 mii = device_get_softc(sc->rl_miibus);
3370
3371 RL_LOCK(sc);
3372 mii_pollstat(mii);
3373 ifmr->ifm_active = mii->mii_media_active;
3374 ifmr->ifm_status = mii->mii_media_status;
3375 RL_UNLOCK(sc);
3376 }
3377
3378 static int
3379 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3380 {
3381 struct rl_softc *sc = ifp->if_softc;
3382 struct ifreq *ifr = (struct ifreq *) data;
3383 struct mii_data *mii;
3384 int error = 0;
3385
3386 switch (command) {
3387 case SIOCSIFMTU:
3388 if (ifr->ifr_mtu < ETHERMIN ||
3389 ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu ||
3390 ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 &&
3391 ifr->ifr_mtu > RL_MTU)) {
3392 error = EINVAL;
3393 break;
3394 }
3395 RL_LOCK(sc);
3396 if (ifp->if_mtu != ifr->ifr_mtu) {
3397 ifp->if_mtu = ifr->ifr_mtu;
3398 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3399 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3400 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3401 re_init_locked(sc);
3402 }
3403 if (ifp->if_mtu > RL_TSO_MTU &&
3404 (ifp->if_capenable & IFCAP_TSO4) != 0) {
3405 ifp->if_capenable &= ~(IFCAP_TSO4 |
3406 IFCAP_VLAN_HWTSO);
3407 ifp->if_hwassist &= ~CSUM_TSO;
3408 }
3409 VLAN_CAPABILITIES(ifp);
3410 }
3411 RL_UNLOCK(sc);
3412 break;
3413 case SIOCSIFFLAGS:
3414 RL_LOCK(sc);
3415 if ((ifp->if_flags & IFF_UP) != 0) {
3416 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3417 if (((ifp->if_flags ^ sc->rl_if_flags)
3418 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3419 re_set_rxmode(sc);
3420 } else
3421 re_init_locked(sc);
3422 } else {
3423 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3424 re_stop(sc);
3425 }
3426 sc->rl_if_flags = ifp->if_flags;
3427 RL_UNLOCK(sc);
3428 break;
3429 case SIOCADDMULTI:
3430 case SIOCDELMULTI:
3431 RL_LOCK(sc);
3432 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3433 re_set_rxmode(sc);
3434 RL_UNLOCK(sc);
3435 break;
3436 case SIOCGIFMEDIA:
3437 case SIOCSIFMEDIA:
3438 mii = device_get_softc(sc->rl_miibus);
3439 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3440 break;
3441 case SIOCSIFCAP:
3442 {
3443 int mask, reinit;
3444
3445 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3446 reinit = 0;
3447 #ifdef DEVICE_POLLING
3448 if (mask & IFCAP_POLLING) {
3449 if (ifr->ifr_reqcap & IFCAP_POLLING) {
3450 error = ether_poll_register(re_poll, ifp);
3451 if (error)
3452 return (error);
3453 RL_LOCK(sc);
3454 /* Disable interrupts */
3455 CSR_WRITE_2(sc, RL_IMR, 0x0000);
3456 ifp->if_capenable |= IFCAP_POLLING;
3457 RL_UNLOCK(sc);
3458 } else {
3459 error = ether_poll_deregister(ifp);
3460 /* Enable interrupts. */
3461 RL_LOCK(sc);
3462 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3463 ifp->if_capenable &= ~IFCAP_POLLING;
3464 RL_UNLOCK(sc);
3465 }
3466 }
3467 #endif /* DEVICE_POLLING */
3468 RL_LOCK(sc);
3469 if ((mask & IFCAP_TXCSUM) != 0 &&
3470 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
3471 ifp->if_capenable ^= IFCAP_TXCSUM;
3472 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
3473 ifp->if_hwassist |= RE_CSUM_FEATURES;
3474 else
3475 ifp->if_hwassist &= ~RE_CSUM_FEATURES;
3476 reinit = 1;
3477 }
3478 if ((mask & IFCAP_RXCSUM) != 0 &&
3479 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
3480 ifp->if_capenable ^= IFCAP_RXCSUM;
3481 reinit = 1;
3482 }
3483 if ((mask & IFCAP_TSO4) != 0 &&
3484 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
3485 ifp->if_capenable ^= IFCAP_TSO4;
3486 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
3487 ifp->if_hwassist |= CSUM_TSO;
3488 else
3489 ifp->if_hwassist &= ~CSUM_TSO;
3490 if (ifp->if_mtu > RL_TSO_MTU &&
3491 (ifp->if_capenable & IFCAP_TSO4) != 0) {
3492 ifp->if_capenable &= ~IFCAP_TSO4;
3493 ifp->if_hwassist &= ~CSUM_TSO;
3494 }
3495 }
3496 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
3497 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
3498 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3499 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3500 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
3501 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3502 /* TSO over VLAN requires VLAN hardware tagging. */
3503 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
3504 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
3505 reinit = 1;
3506 }
3507 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3508 (mask & (IFCAP_HWCSUM | IFCAP_TSO4 |
3509 IFCAP_VLAN_HWTSO)) != 0)
3510 reinit = 1;
3511 if ((mask & IFCAP_WOL) != 0 &&
3512 (ifp->if_capabilities & IFCAP_WOL) != 0) {
3513 if ((mask & IFCAP_WOL_UCAST) != 0)
3514 ifp->if_capenable ^= IFCAP_WOL_UCAST;
3515 if ((mask & IFCAP_WOL_MCAST) != 0)
3516 ifp->if_capenable ^= IFCAP_WOL_MCAST;
3517 if ((mask & IFCAP_WOL_MAGIC) != 0)
3518 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3519 }
3520 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3521 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3522 re_init_locked(sc);
3523 }
3524 RL_UNLOCK(sc);
3525 VLAN_CAPABILITIES(ifp);
3526 }
3527 break;
3528 default:
3529 error = ether_ioctl(ifp, command, data);
3530 break;
3531 }
3532
3533 return (error);
3534 }
3535
3536 static void
3537 re_watchdog(struct rl_softc *sc)
3538 {
3539 struct ifnet *ifp;
3540
3541 RL_LOCK_ASSERT(sc);
3542
3543 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
3544 return;
3545
3546 ifp = sc->rl_ifp;
3547 re_txeof(sc);
3548 if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
3549 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
3550 "-- recovering\n");
3551 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3552 re_start_locked(ifp);
3553 return;
3554 }
3555
3556 if_printf(ifp, "watchdog timeout\n");
3557 ifp->if_oerrors++;
3558
3559 re_rxeof(sc, NULL);
3560 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3561 re_init_locked(sc);
3562 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3563 re_start_locked(ifp);
3564 }
3565
3566 /*
3567 * Stop the adapter and free any mbufs allocated to the
3568 * RX and TX lists.
3569 */
3570 static void
3571 re_stop(struct rl_softc *sc)
3572 {
3573 int i;
3574 struct ifnet *ifp;
3575 struct rl_txdesc *txd;
3576 struct rl_rxdesc *rxd;
3577
3578 RL_LOCK_ASSERT(sc);
3579
3580 ifp = sc->rl_ifp;
3581
3582 sc->rl_watchdog_timer = 0;
3583 callout_stop(&sc->rl_stat_callout);
3584 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3585
3586 /*
3587 * Disable accepting frames to put RX MAC into idle state.
3588 * Otherwise it's possible to get frames while stop command
3589 * execution is in progress and controller can DMA the frame
3590 * to already freed RX buffer during that period.
3591 */
3592 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &
3593 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI |
3594 RL_RXCFG_RX_BROAD));
3595
3596 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3597 /* Enable RXDV gate. */
3598 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) |
3599 0x00080000);
3600 }
3601
3602 if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) {
3603 for (i = RL_TIMEOUT; i > 0; i--) {
3604 if ((CSR_READ_1(sc, sc->rl_txstart) &
3605 RL_TXSTART_START) == 0)
3606 break;
3607 DELAY(20);
3608 }
3609 if (i == 0)
3610 device_printf(sc->rl_dev,
3611 "stopping TX poll timed out!\n");
3612 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3613 } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) {
3614 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
3615 RL_CMD_RX_ENB);
3616 if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) {
3617 for (i = RL_TIMEOUT; i > 0; i--) {
3618 if ((CSR_READ_4(sc, RL_TXCFG) &
3619 RL_TXCFG_QUEUE_EMPTY) != 0)
3620 break;
3621 DELAY(100);
3622 }
3623 if (i == 0)
3624 device_printf(sc->rl_dev,
3625 "stopping TXQ timed out!\n");
3626 }
3627 } else
3628 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3629 DELAY(1000);
3630 CSR_WRITE_2(sc, RL_IMR, 0x0000);
3631 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
3632
3633 if (sc->rl_head != NULL) {
3634 m_freem(sc->rl_head);
3635 sc->rl_head = sc->rl_tail = NULL;
3636 }
3637
3638 /* Free the TX list buffers. */
3639 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
3640 txd = &sc->rl_ldata.rl_tx_desc[i];
3641 if (txd->tx_m != NULL) {
3642 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3643 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3644 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
3645 txd->tx_dmamap);
3646 m_freem(txd->tx_m);
3647 txd->tx_m = NULL;
3648 }
3649 }
3650
3651 /* Free the RX list buffers. */
3652 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3653 rxd = &sc->rl_ldata.rl_rx_desc[i];
3654 if (rxd->rx_m != NULL) {
3655 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
3656 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3657 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
3658 rxd->rx_dmamap);
3659 m_freem(rxd->rx_m);
3660 rxd->rx_m = NULL;
3661 }
3662 }
3663
3664 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3665 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3666 rxd = &sc->rl_ldata.rl_jrx_desc[i];
3667 if (rxd->rx_m != NULL) {
3668 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag,
3669 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3670 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag,
3671 rxd->rx_dmamap);
3672 m_freem(rxd->rx_m);
3673 rxd->rx_m = NULL;
3674 }
3675 }
3676 }
3677 }
3678
3679 /*
3680 * Device suspend routine. Stop the interface and save some PCI
3681 * settings in case the BIOS doesn't restore them properly on
3682 * resume.
3683 */
3684 static int
3685 re_suspend(device_t dev)
3686 {
3687 struct rl_softc *sc;
3688
3689 sc = device_get_softc(dev);
3690
3691 RL_LOCK(sc);
3692 re_stop(sc);
3693 re_setwol(sc);
3694 sc->suspended = 1;
3695 RL_UNLOCK(sc);
3696
3697 return (0);
3698 }
3699
3700 /*
3701 * Device resume routine. Restore some PCI settings in case the BIOS
3702 * doesn't, re-enable busmastering, and restart the interface if
3703 * appropriate.
3704 */
3705 static int
3706 re_resume(device_t dev)
3707 {
3708 struct rl_softc *sc;
3709 struct ifnet *ifp;
3710
3711 sc = device_get_softc(dev);
3712
3713 RL_LOCK(sc);
3714
3715 ifp = sc->rl_ifp;
3716 /* Take controller out of sleep mode. */
3717 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3718 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3719 CSR_WRITE_1(sc, RL_GPIO,
3720 CSR_READ_1(sc, RL_GPIO) | 0x01);
3721 }
3722
3723 /*
3724 * Clear WOL matching such that normal Rx filtering
3725 * wouldn't interfere with WOL patterns.
3726 */
3727 re_clrwol(sc);
3728
3729 /* reinitialize interface if necessary */
3730 if (ifp->if_flags & IFF_UP)
3731 re_init_locked(sc);
3732
3733 sc->suspended = 0;
3734 RL_UNLOCK(sc);
3735
3736 return (0);
3737 }
3738
3739 /*
3740 * Stop all chip I/O so that the kernel's probe routines don't
3741 * get confused by errant DMAs when rebooting.
3742 */
3743 static int
3744 re_shutdown(device_t dev)
3745 {
3746 struct rl_softc *sc;
3747
3748 sc = device_get_softc(dev);
3749
3750 RL_LOCK(sc);
3751 re_stop(sc);
3752 /*
3753 * Mark interface as down since otherwise we will panic if
3754 * interrupt comes in later on, which can happen in some
3755 * cases.
3756 */
3757 sc->rl_ifp->if_flags &= ~IFF_UP;
3758 re_setwol(sc);
3759 RL_UNLOCK(sc);
3760
3761 return (0);
3762 }
3763
3764 static void
3765 re_set_linkspeed(struct rl_softc *sc)
3766 {
3767 struct mii_softc *miisc;
3768 struct mii_data *mii;
3769 int aneg, i, phyno;
3770
3771 RL_LOCK_ASSERT(sc);
3772
3773 mii = device_get_softc(sc->rl_miibus);
3774 mii_pollstat(mii);
3775 aneg = 0;
3776 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3777 (IFM_ACTIVE | IFM_AVALID)) {
3778 switch IFM_SUBTYPE(mii->mii_media_active) {
3779 case IFM_10_T:
3780 case IFM_100_TX:
3781 return;
3782 case IFM_1000_T:
3783 aneg++;
3784 break;
3785 default:
3786 break;
3787 }
3788 }
3789 miisc = LIST_FIRST(&mii->mii_phys);
3790 phyno = miisc->mii_phy;
3791 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3792 PHY_RESET(miisc);
3793 re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0);
3794 re_miibus_writereg(sc->rl_dev, phyno,
3795 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3796 re_miibus_writereg(sc->rl_dev, phyno,
3797 MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
3798 DELAY(1000);
3799 if (aneg != 0) {
3800 /*
3801 * Poll link state until re(4) get a 10/100Mbps link.
3802 */
3803 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3804 mii_pollstat(mii);
3805 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3806 == (IFM_ACTIVE | IFM_AVALID)) {
3807 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3808 case IFM_10_T:
3809 case IFM_100_TX:
3810 return;
3811 default:
3812 break;
3813 }
3814 }
3815 RL_UNLOCK(sc);
3816 pause("relnk", hz);
3817 RL_LOCK(sc);
3818 }
3819 if (i == MII_ANEGTICKS_GIGE)
3820 device_printf(sc->rl_dev,
3821 "establishing a link failed, WOL may not work!");
3822 }
3823 /*
3824 * No link, force MAC to have 100Mbps, full-duplex link.
3825 * MAC does not require reprogramming on resolved speed/duplex,
3826 * so this is just for completeness.
3827 */
3828 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3829 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3830 }
3831
3832 static void
3833 re_setwol(struct rl_softc *sc)
3834 {
3835 struct ifnet *ifp;
3836 int pmc;
3837 uint16_t pmstat;
3838 uint8_t v;
3839
3840 RL_LOCK_ASSERT(sc);
3841
3842 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3843 return;
3844
3845 ifp = sc->rl_ifp;
3846 /* Put controller into sleep mode. */
3847 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3848 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3849 CSR_WRITE_1(sc, RL_GPIO,
3850 CSR_READ_1(sc, RL_GPIO) & ~0x01);
3851 }
3852 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
3853 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3854 /* Disable RXDV gate. */
3855 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
3856 ~0x00080000);
3857 }
3858 re_set_rxmode(sc);
3859 if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0)
3860 re_set_linkspeed(sc);
3861 if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
3862 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
3863 }
3864 /* Enable config register write. */
3865 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3866
3867 /* Enable PME. */
3868 v = CSR_READ_1(sc, sc->rl_cfg1);
3869 v &= ~RL_CFG1_PME;
3870 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3871 v |= RL_CFG1_PME;
3872 CSR_WRITE_1(sc, sc->rl_cfg1, v);
3873
3874 v = CSR_READ_1(sc, sc->rl_cfg3);
3875 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3876 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3877 v |= RL_CFG3_WOL_MAGIC;
3878 CSR_WRITE_1(sc, sc->rl_cfg3, v);
3879
3880 v = CSR_READ_1(sc, sc->rl_cfg5);
3881 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST |
3882 RL_CFG5_WOL_LANWAKE);
3883 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
3884 v |= RL_CFG5_WOL_UCAST;
3885 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
3886 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
3887 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3888 v |= RL_CFG5_WOL_LANWAKE;
3889 CSR_WRITE_1(sc, sc->rl_cfg5, v);
3890
3891 /* Config register write done. */
3892 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3893
3894 if ((ifp->if_capenable & IFCAP_WOL) == 0 &&
3895 (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
3896 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80);
3897 /*
3898 * It seems that hardware resets its link speed to 100Mbps in
3899 * power down mode so switching to 100Mbps in driver is not
3900 * needed.
3901 */
3902
3903 /* Request PME if WOL is requested. */
3904 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
3905 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3906 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3907 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3908 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3909 }
3910
3911 static void
3912 re_clrwol(struct rl_softc *sc)
3913 {
3914 int pmc;
3915 uint8_t v;
3916
3917 RL_LOCK_ASSERT(sc);
3918
3919 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3920 return;
3921
3922 /* Enable config register write. */
3923 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3924
3925 v = CSR_READ_1(sc, sc->rl_cfg3);
3926 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3927 CSR_WRITE_1(sc, sc->rl_cfg3, v);
3928
3929 /* Config register write done. */
3930 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3931
3932 v = CSR_READ_1(sc, sc->rl_cfg5);
3933 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3934 v &= ~RL_CFG5_WOL_LANWAKE;
3935 CSR_WRITE_1(sc, sc->rl_cfg5, v);
3936 }
3937
3938 static void
3939 re_add_sysctls(struct rl_softc *sc)
3940 {
3941 struct sysctl_ctx_list *ctx;
3942 struct sysctl_oid_list *children;
3943 int error;
3944
3945 ctx = device_get_sysctl_ctx(sc->rl_dev);
3946 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
3947
3948 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats",
3949 CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I",
3950 "Statistics Information");
3951 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
3952 return;
3953
3954 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod",
3955 CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0,
3956 sysctl_hw_re_int_mod, "I", "re RX interrupt moderation");
3957 /* Pull in device tunables. */
3958 sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3959 error = resource_int_value(device_get_name(sc->rl_dev),
3960 device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod);
3961 if (error == 0) {
3962 if (sc->rl_int_rx_mod < RL_TIMER_MIN ||
3963 sc->rl_int_rx_mod > RL_TIMER_MAX) {
3964 device_printf(sc->rl_dev, "int_rx_mod value out of "
3965 "range; using default: %d\n",
3966 RL_TIMER_DEFAULT);
3967 sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3968 }
3969 }
3970 }
3971
3972 static int
3973 re_sysctl_stats(SYSCTL_HANDLER_ARGS)
3974 {
3975 struct rl_softc *sc;
3976 struct rl_stats *stats;
3977 int error, i, result;
3978
3979 result = -1;
3980 error = sysctl_handle_int(oidp, &result, 0, req);
3981 if (error || req->newptr == NULL)
3982 return (error);
3983
3984 if (result == 1) {
3985 sc = (struct rl_softc *)arg1;
3986 RL_LOCK(sc);
3987 if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3988 RL_UNLOCK(sc);
3989 goto done;
3990 }
3991 bus_dmamap_sync(sc->rl_ldata.rl_stag,
3992 sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD);
3993 CSR_WRITE_4(sc, RL_DUMPSTATS_HI,
3994 RL_ADDR_HI(sc->rl_ldata.rl_stats_addr));
3995 CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3996 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr));
3997 CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3998 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr |
3999 RL_DUMPSTATS_START));
4000 for (i = RL_TIMEOUT; i > 0; i--) {
4001 if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) &
4002 RL_DUMPSTATS_START) == 0)
4003 break;
4004 DELAY(1000);
4005 }
4006 bus_dmamap_sync(sc->rl_ldata.rl_stag,
4007 sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD);
4008 RL_UNLOCK(sc);
4009 if (i == 0) {
4010 device_printf(sc->rl_dev,
4011 "DUMP statistics request timedout\n");
4012 return (ETIMEDOUT);
4013 }
4014 done:
4015 stats = sc->rl_ldata.rl_stats;
4016 printf("%s statistics:\n", device_get_nameunit(sc->rl_dev));
4017 printf("Tx frames : %ju\n",
4018 (uintmax_t)le64toh(stats->rl_tx_pkts));
4019 printf("Rx frames : %ju\n",
4020 (uintmax_t)le64toh(stats->rl_rx_pkts));
4021 printf("Tx errors : %ju\n",
4022 (uintmax_t)le64toh(stats->rl_tx_errs));
4023 printf("Rx errors : %u\n",
4024 le32toh(stats->rl_rx_errs));
4025 printf("Rx missed frames : %u\n",
4026 (uint32_t)le16toh(stats->rl_missed_pkts));
4027 printf("Rx frame alignment errs : %u\n",
4028 (uint32_t)le16toh(stats->rl_rx_framealign_errs));
4029 printf("Tx single collisions : %u\n",
4030 le32toh(stats->rl_tx_onecoll));
4031 printf("Tx multiple collisions : %u\n",
4032 le32toh(stats->rl_tx_multicolls));
4033 printf("Rx unicast frames : %ju\n",
4034 (uintmax_t)le64toh(stats->rl_rx_ucasts));
4035 printf("Rx broadcast frames : %ju\n",
4036 (uintmax_t)le64toh(stats->rl_rx_bcasts));
4037 printf("Rx multicast frames : %u\n",
4038 le32toh(stats->rl_rx_mcasts));
4039 printf("Tx aborts : %u\n",
4040 (uint32_t)le16toh(stats->rl_tx_aborts));
4041 printf("Tx underruns : %u\n",
4042 (uint32_t)le16toh(stats->rl_rx_underruns));
4043 }
4044
4045 return (error);
4046 }
4047
4048 static int
4049 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4050 {
4051 int error, value;
4052
4053 if (arg1 == NULL)
4054 return (EINVAL);
4055 value = *(int *)arg1;
4056 error = sysctl_handle_int(oidp, &value, 0, req);
4057 if (error || req->newptr == NULL)
4058 return (error);
4059 if (value < low || value > high)
4060 return (EINVAL);
4061 *(int *)arg1 = value;
4062
4063 return (0);
4064 }
4065
4066 static int
4067 sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)
4068 {
4069
4070 return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN,
4071 RL_TIMER_MAX));
4072 }
Cache object: 337042b1caded67024922c19fea3ad75
|