1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2
3 /*
4 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Sepherosa Ziehau <sepherosa@gmail.com> and
8 * Matthew Dillon <dillon@apollo.backplane.com>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
40 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
41 *
42 * Permission to use, copy, modify, and distribute this software for any
43 * purpose with or without fee is hereby granted, provided that the above
44 * copyright notice and this permission notice appear in all copies.
45 *
46 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
47 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
48 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
49 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
50 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
51 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
52 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
53 */
54
55 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
56
57 #include "opt_ifpoll.h"
58
59 #include <sys/param.h>
60 #include <sys/endian.h>
61 #include <sys/kernel.h>
62 #include <sys/bus.h>
63 #include <sys/interrupt.h>
64 #include <sys/proc.h>
65 #include <sys/rman.h>
66 #include <sys/serialize.h>
67 #include <sys/socket.h>
68 #include <sys/sockio.h>
69 #include <sys/sysctl.h>
70
71 #include <net/ethernet.h>
72 #include <net/if.h>
73 #include <net/bpf.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_poll.h>
78 #include <net/ifq_var.h>
79 #include <net/if_types.h>
80 #include <net/if_var.h>
81 #include <net/vlan/if_vlan_var.h>
82 #include <net/vlan/if_vlan_ether.h>
83
84 #include <bus/pci/pcireg.h>
85 #include <bus/pci/pcivar.h>
86 #include "pcidevs.h"
87
88 #include <dev/netif/mii_layer/mii.h>
89 #include <dev/netif/mii_layer/miivar.h>
90
91 #include "miibus_if.h"
92
93 #include <dev/netif/nfe/if_nfereg.h>
94 #include <dev/netif/nfe/if_nfevar.h>
95
96 #define NFE_CSUM
97 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
98
99 static int nfe_probe(device_t);
100 static int nfe_attach(device_t);
101 static int nfe_detach(device_t);
102 static void nfe_shutdown(device_t);
103 static int nfe_resume(device_t);
104 static int nfe_suspend(device_t);
105
106 static int nfe_miibus_readreg(device_t, int, int);
107 static void nfe_miibus_writereg(device_t, int, int, int);
108 static void nfe_miibus_statchg(device_t);
109
110 #ifdef IFPOLL_ENABLE
111 static void nfe_npoll(struct ifnet *, struct ifpoll_info *);
112 static void nfe_npoll_compat(struct ifnet *, void *, int);
113 static void nfe_disable_intrs(struct nfe_softc *);
114 #endif
115 static void nfe_intr(void *);
116 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
117 static int nfe_rxeof(struct nfe_softc *);
118 static int nfe_txeof(struct nfe_softc *, int);
119 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
120 struct mbuf *);
121 static void nfe_start(struct ifnet *, struct ifaltq_subque *);
122 static void nfe_watchdog(struct ifnet *);
123 static void nfe_init(void *);
124 static void nfe_stop(struct nfe_softc *);
125 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
126 static void nfe_jfree(void *);
127 static void nfe_jref(void *);
128 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
129 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
130 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
131 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
132 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
133 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
134 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
135 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
136 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
137 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
138 static int nfe_ifmedia_upd(struct ifnet *);
139 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
140 static void nfe_setmulti(struct nfe_softc *);
141 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
142 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
143 static void nfe_powerup(device_t);
144 static void nfe_mac_reset(struct nfe_softc *);
145 static void nfe_tick(void *);
146 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
147 int, bus_addr_t);
148 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
149 int);
150 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
151 int);
152 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
153 int);
154 static void nfe_enable_intrs(struct nfe_softc *);
155
156 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS);
157
158 #define NFE_DEBUG
159 #ifdef NFE_DEBUG
160
161 static int nfe_debug = 0;
162 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT;
163 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT;
164 /*
165 * hw timer simulated interrupt moderation @4000Hz. Negative values
166 * disable the timer when the discrete interrupt rate falls below
167 * the moderation rate.
168 *
169 * XXX 8000Hz might be better but if the interrupt is shared it can
170 * blow out the cpu.
171 */
172 static int nfe_imtime = -250; /* uS */
173
174 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count);
175 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count);
176 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime);
177 TUNABLE_INT("hw.nfe.debug", &nfe_debug);
178
179 #define DPRINTF(sc, fmt, ...) do { \
180 if ((sc)->sc_debug) { \
181 if_printf(&(sc)->arpcom.ac_if, \
182 fmt, __VA_ARGS__); \
183 } \
184 } while (0)
185
186 #define DPRINTFN(sc, lv, fmt, ...) do { \
187 if ((sc)->sc_debug >= (lv)) { \
188 if_printf(&(sc)->arpcom.ac_if, \
189 fmt, __VA_ARGS__); \
190 } \
191 } while (0)
192
193 #else /* !NFE_DEBUG */
194
195 #define DPRINTF(sc, fmt, ...)
196 #define DPRINTFN(sc, lv, fmt, ...)
197
198 #endif /* NFE_DEBUG */
199
200 static const struct nfe_dev {
201 uint16_t vid;
202 uint16_t did;
203 const char *desc;
204 } nfe_devices[] = {
205 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
206 "NVIDIA nForce Fast Ethernet" },
207
208 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
209 "NVIDIA nForce2 Fast Ethernet" },
210
211 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
212 "NVIDIA nForce3 Gigabit Ethernet" },
213
214 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
215 chipset, and possibly also the 400R; it might be both nForce2- and
216 nForce3-based boards can use the same MCPs (= southbridges) */
217 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
218 "NVIDIA nForce3 Gigabit Ethernet" },
219
220 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
221 "NVIDIA nForce3 Gigabit Ethernet" },
222
223 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
224 "NVIDIA nForce3 Gigabit Ethernet" },
225
226 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
227 "NVIDIA nForce3 Gigabit Ethernet" },
228
229 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
230 "NVIDIA CK804 Gigabit Ethernet" },
231
232 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
233 "NVIDIA CK804 Gigabit Ethernet" },
234
235 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
236 "NVIDIA MCP04 Gigabit Ethernet" },
237
238 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
239 "NVIDIA MCP04 Gigabit Ethernet" },
240
241 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
242 "NVIDIA MCP51 Gigabit Ethernet" },
243
244 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
245 "NVIDIA MCP51 Gigabit Ethernet" },
246
247 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
248 "NVIDIA MCP55 Gigabit Ethernet" },
249
250 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
251 "NVIDIA MCP55 Gigabit Ethernet" },
252
253 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
254 "NVIDIA MCP61 Gigabit Ethernet" },
255
256 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
257 "NVIDIA MCP61 Gigabit Ethernet" },
258
259 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
260 "NVIDIA MCP61 Gigabit Ethernet" },
261
262 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
263 "NVIDIA MCP61 Gigabit Ethernet" },
264
265 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
266 "NVIDIA MCP65 Gigabit Ethernet" },
267
268 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
269 "NVIDIA MCP65 Gigabit Ethernet" },
270
271 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
272 "NVIDIA MCP65 Gigabit Ethernet" },
273
274 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
275 "NVIDIA MCP65 Gigabit Ethernet" },
276
277 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
278 "NVIDIA MCP67 Gigabit Ethernet" },
279
280 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
281 "NVIDIA MCP67 Gigabit Ethernet" },
282
283 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
284 "NVIDIA MCP67 Gigabit Ethernet" },
285
286 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
287 "NVIDIA MCP67 Gigabit Ethernet" },
288
289 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
290 "NVIDIA MCP73 Gigabit Ethernet" },
291
292 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
293 "NVIDIA MCP73 Gigabit Ethernet" },
294
295 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
296 "NVIDIA MCP73 Gigabit Ethernet" },
297
298 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
299 "NVIDIA MCP73 Gigabit Ethernet" },
300
301 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
302 "NVIDIA MCP77 Gigabit Ethernet" },
303
304 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
305 "NVIDIA MCP77 Gigabit Ethernet" },
306
307 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
308 "NVIDIA MCP77 Gigabit Ethernet" },
309
310 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
311 "NVIDIA MCP77 Gigabit Ethernet" },
312
313 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
314 "NVIDIA MCP79 Gigabit Ethernet" },
315
316 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
317 "NVIDIA MCP79 Gigabit Ethernet" },
318
319 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
320 "NVIDIA MCP79 Gigabit Ethernet" },
321
322 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
323 "NVIDIA MCP79 Gigabit Ethernet" },
324
325 { 0, 0, NULL }
326 };
327
328 static device_method_t nfe_methods[] = {
329 /* Device interface */
330 DEVMETHOD(device_probe, nfe_probe),
331 DEVMETHOD(device_attach, nfe_attach),
332 DEVMETHOD(device_detach, nfe_detach),
333 DEVMETHOD(device_suspend, nfe_suspend),
334 DEVMETHOD(device_resume, nfe_resume),
335 DEVMETHOD(device_shutdown, nfe_shutdown),
336
337 /* Bus interface */
338 DEVMETHOD(bus_print_child, bus_generic_print_child),
339 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
340
341 /* MII interface */
342 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
343 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
344 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
345
346 DEVMETHOD_END
347 };
348
349 static driver_t nfe_driver = {
350 "nfe",
351 nfe_methods,
352 sizeof(struct nfe_softc)
353 };
354
355 static devclass_t nfe_devclass;
356
357 DECLARE_DUMMY_MODULE(if_nfe);
358 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
359 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, NULL, NULL);
360 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, NULL, NULL);
361
362 /*
363 * NOTE: NFE_WORDALIGN support is guesswork right now.
364 */
365 static int
366 nfe_probe(device_t dev)
367 {
368 const struct nfe_dev *n;
369 uint16_t vid, did;
370
371 vid = pci_get_vendor(dev);
372 did = pci_get_device(dev);
373 for (n = nfe_devices; n->desc != NULL; ++n) {
374 if (vid == n->vid && did == n->did) {
375 struct nfe_softc *sc = device_get_softc(dev);
376
377 switch (did) {
378 case PCI_PRODUCT_NVIDIA_NFORCE_LAN:
379 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN:
380 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1:
381 sc->sc_caps = NFE_NO_PWRCTL |
382 NFE_FIX_EADDR;
383 break;
384 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
385 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
386 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
387 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
388 sc->sc_caps = NFE_JUMBO_SUP |
389 NFE_HW_CSUM |
390 NFE_NO_PWRCTL |
391 NFE_FIX_EADDR;
392 break;
393 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
394 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
395 sc->sc_caps = NFE_FIX_EADDR;
396 /* FALL THROUGH */
397 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
398 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
399 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
400 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
401 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
402 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
403 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
404 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
405 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
406 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
407 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
408 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
409 sc->sc_caps |= NFE_40BIT_ADDR;
410 break;
411 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
412 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
413 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
414 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
415 sc->sc_caps = NFE_JUMBO_SUP |
416 NFE_40BIT_ADDR |
417 NFE_HW_CSUM |
418 NFE_NO_PWRCTL |
419 NFE_FIX_EADDR;
420 break;
421 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
422 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
423 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
424 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
425 sc->sc_caps = NFE_JUMBO_SUP |
426 NFE_40BIT_ADDR;
427 break;
428 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
429 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
430 sc->sc_caps = NFE_JUMBO_SUP |
431 NFE_40BIT_ADDR |
432 NFE_HW_CSUM |
433 NFE_HW_VLAN |
434 NFE_FIX_EADDR;
435 break;
436 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
437 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
438 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
439 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
440 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
441 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
442 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
443 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
444 sc->sc_caps = NFE_40BIT_ADDR |
445 NFE_HW_CSUM |
446 NFE_WORDALIGN;
447 break;
448 }
449
450 device_set_desc(dev, n->desc);
451 device_set_async_attach(dev, TRUE);
452 return 0;
453 }
454 }
455 return ENXIO;
456 }
457
458 static int
459 nfe_attach(device_t dev)
460 {
461 struct nfe_softc *sc = device_get_softc(dev);
462 struct ifnet *ifp = &sc->arpcom.ac_if;
463 uint8_t eaddr[ETHER_ADDR_LEN];
464 bus_addr_t lowaddr;
465 int error;
466
467 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
468 lwkt_serialize_init(&sc->sc_jbuf_serializer);
469
470 /*
471 * Initialize sysctl variables
472 */
473 sc->sc_rx_ring_count = nfe_rx_ring_count;
474 sc->sc_tx_ring_count = nfe_tx_ring_count;
475 sc->sc_debug = nfe_debug;
476 if (nfe_imtime < 0) {
477 sc->sc_flags |= NFE_F_DYN_IM;
478 sc->sc_imtime = -nfe_imtime;
479 } else {
480 sc->sc_imtime = nfe_imtime;
481 }
482 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
483
484 sc->sc_mem_rid = PCIR_BAR(0);
485
486 if (sc->sc_caps & NFE_40BIT_ADDR)
487 sc->rxtxctl_desc = NFE_RXTX_DESC_V3;
488 else if (sc->sc_caps & NFE_JUMBO_SUP)
489 sc->rxtxctl_desc = NFE_RXTX_DESC_V2;
490
491 #ifndef BURN_BRIDGES
492 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
493 uint32_t mem, irq;
494
495 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
496 irq = pci_read_config(dev, PCIR_INTLINE, 4);
497
498 device_printf(dev, "chip is in D%d power mode "
499 "-- setting to D0\n", pci_get_powerstate(dev));
500
501 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
502
503 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
504 pci_write_config(dev, PCIR_INTLINE, irq, 4);
505 }
506 #endif /* !BURN_BRIDGE */
507
508 /* Enable bus mastering */
509 pci_enable_busmaster(dev);
510
511 /* Allocate IO memory */
512 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
513 &sc->sc_mem_rid, RF_ACTIVE);
514 if (sc->sc_mem_res == NULL) {
515 device_printf(dev, "could not allocate io memory\n");
516 return ENXIO;
517 }
518 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
519 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
520
521 /* Allocate IRQ */
522 sc->sc_irq_rid = 0;
523 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
524 &sc->sc_irq_rid,
525 RF_SHAREABLE | RF_ACTIVE);
526 if (sc->sc_irq_res == NULL) {
527 device_printf(dev, "could not allocate irq\n");
528 error = ENXIO;
529 goto fail;
530 }
531
532 /* Disable WOL */
533 NFE_WRITE(sc, NFE_WOL_CTL, 0);
534
535 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
536 nfe_powerup(dev);
537
538 nfe_get_macaddr(sc, eaddr);
539
540 /*
541 * Allocate top level DMA tag
542 */
543 if (sc->sc_caps & NFE_40BIT_ADDR)
544 lowaddr = NFE_BUS_SPACE_MAXADDR;
545 else
546 lowaddr = BUS_SPACE_MAXADDR_32BIT;
547 error = bus_dma_tag_create(NULL, /* parent */
548 1, 0, /* alignment, boundary */
549 lowaddr, /* lowaddr */
550 BUS_SPACE_MAXADDR, /* highaddr */
551 NULL, NULL, /* filter, filterarg */
552 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
553 0, /* nsegments */
554 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
555 0, /* flags */
556 &sc->sc_dtag);
557 if (error) {
558 device_printf(dev, "could not allocate parent dma tag\n");
559 goto fail;
560 }
561
562 /*
563 * Allocate Tx and Rx rings.
564 */
565 error = nfe_alloc_tx_ring(sc, &sc->txq);
566 if (error) {
567 device_printf(dev, "could not allocate Tx ring\n");
568 goto fail;
569 }
570
571 error = nfe_alloc_rx_ring(sc, &sc->rxq);
572 if (error) {
573 device_printf(dev, "could not allocate Rx ring\n");
574 goto fail;
575 }
576
577 /*
578 * Create sysctl tree
579 */
580 sysctl_ctx_init(&sc->sc_sysctl_ctx);
581 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
582 SYSCTL_STATIC_CHILDREN(_hw),
583 OID_AUTO,
584 device_get_nameunit(dev),
585 CTLFLAG_RD, 0, "");
586 if (sc->sc_sysctl_tree == NULL) {
587 device_printf(dev, "can't add sysctl node\n");
588 error = ENXIO;
589 goto fail;
590 }
591 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
592 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
593 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW,
594 sc, 0, nfe_sysctl_imtime, "I",
595 "Interrupt moderation time (usec). "
596 "0 to disable interrupt moderation.");
597 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
598 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
599 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count,
600 0, "RX ring count");
601 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
602 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
603 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count,
604 0, "TX ring count");
605 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
606 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
607 "debug", CTLFLAG_RW, &sc->sc_debug,
608 0, "control debugging printfs");
609
610 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
611 nfe_ifmedia_sts);
612 if (error) {
613 device_printf(dev, "MII without any phy\n");
614 goto fail;
615 }
616
617 ifp->if_softc = sc;
618 ifp->if_mtu = ETHERMTU;
619 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
620 ifp->if_ioctl = nfe_ioctl;
621 ifp->if_start = nfe_start;
622 #ifdef IFPOLL_ENABLE
623 ifp->if_npoll = nfe_npoll;
624 #endif
625 ifp->if_watchdog = nfe_watchdog;
626 ifp->if_init = nfe_init;
627 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count);
628 ifq_set_ready(&ifp->if_snd);
629
630 ifp->if_capabilities = IFCAP_VLAN_MTU;
631
632 if (sc->sc_caps & NFE_HW_VLAN)
633 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
634
635 #ifdef NFE_CSUM
636 if (sc->sc_caps & NFE_HW_CSUM) {
637 ifp->if_capabilities |= IFCAP_HWCSUM;
638 ifp->if_hwassist = NFE_CSUM_FEATURES;
639 }
640 #else
641 sc->sc_caps &= ~NFE_HW_CSUM;
642 #endif
643 ifp->if_capenable = ifp->if_capabilities;
644
645 callout_init(&sc->sc_tick_ch);
646
647 ether_ifattach(ifp, eaddr, NULL);
648
649 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res));
650
651 #ifdef IFPOLL_ENABLE
652 ifpoll_compat_setup(&sc->sc_npoll,
653 &sc->sc_sysctl_ctx, sc->sc_sysctl_tree, device_get_unit(dev),
654 ifp->if_serializer);
655 #endif
656
657 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
658 &sc->sc_ih, ifp->if_serializer);
659 if (error) {
660 device_printf(dev, "could not setup intr\n");
661 ether_ifdetach(ifp);
662 goto fail;
663 }
664
665 return 0;
666 fail:
667 nfe_detach(dev);
668 return error;
669 }
670
671 static int
672 nfe_detach(device_t dev)
673 {
674 struct nfe_softc *sc = device_get_softc(dev);
675
676 if (device_is_attached(dev)) {
677 struct ifnet *ifp = &sc->arpcom.ac_if;
678
679 lwkt_serialize_enter(ifp->if_serializer);
680 nfe_stop(sc);
681 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
682 lwkt_serialize_exit(ifp->if_serializer);
683
684 ether_ifdetach(ifp);
685 }
686
687 if (sc->sc_miibus != NULL)
688 device_delete_child(dev, sc->sc_miibus);
689 bus_generic_detach(dev);
690
691 if (sc->sc_sysctl_tree != NULL)
692 sysctl_ctx_free(&sc->sc_sysctl_ctx);
693
694 if (sc->sc_irq_res != NULL) {
695 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
696 sc->sc_irq_res);
697 }
698
699 if (sc->sc_mem_res != NULL) {
700 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
701 sc->sc_mem_res);
702 }
703
704 nfe_free_tx_ring(sc, &sc->txq);
705 nfe_free_rx_ring(sc, &sc->rxq);
706 if (sc->sc_dtag != NULL)
707 bus_dma_tag_destroy(sc->sc_dtag);
708
709 return 0;
710 }
711
712 static void
713 nfe_shutdown(device_t dev)
714 {
715 struct nfe_softc *sc = device_get_softc(dev);
716 struct ifnet *ifp = &sc->arpcom.ac_if;
717
718 lwkt_serialize_enter(ifp->if_serializer);
719 nfe_stop(sc);
720 lwkt_serialize_exit(ifp->if_serializer);
721 }
722
723 static int
724 nfe_suspend(device_t dev)
725 {
726 struct nfe_softc *sc = device_get_softc(dev);
727 struct ifnet *ifp = &sc->arpcom.ac_if;
728
729 lwkt_serialize_enter(ifp->if_serializer);
730 nfe_stop(sc);
731 lwkt_serialize_exit(ifp->if_serializer);
732
733 return 0;
734 }
735
736 static int
737 nfe_resume(device_t dev)
738 {
739 struct nfe_softc *sc = device_get_softc(dev);
740 struct ifnet *ifp = &sc->arpcom.ac_if;
741
742 lwkt_serialize_enter(ifp->if_serializer);
743 if (ifp->if_flags & IFF_UP)
744 nfe_init(sc);
745 lwkt_serialize_exit(ifp->if_serializer);
746
747 return 0;
748 }
749
750 static void
751 nfe_miibus_statchg(device_t dev)
752 {
753 struct nfe_softc *sc = device_get_softc(dev);
754 struct mii_data *mii = device_get_softc(sc->sc_miibus);
755 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
756
757 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer);
758
759 phy = NFE_READ(sc, NFE_PHY_IFACE);
760 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
761
762 seed = NFE_READ(sc, NFE_RNDSEED);
763 seed &= ~NFE_SEED_MASK;
764
765 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
766 phy |= NFE_PHY_HDX; /* half-duplex */
767 misc |= NFE_MISC1_HDX;
768 }
769
770 switch (IFM_SUBTYPE(mii->mii_media_active)) {
771 case IFM_1000_T: /* full-duplex only */
772 link |= NFE_MEDIA_1000T;
773 seed |= NFE_SEED_1000T;
774 phy |= NFE_PHY_1000T;
775 break;
776 case IFM_100_TX:
777 link |= NFE_MEDIA_100TX;
778 seed |= NFE_SEED_100TX;
779 phy |= NFE_PHY_100TX;
780 break;
781 case IFM_10_T:
782 link |= NFE_MEDIA_10T;
783 seed |= NFE_SEED_10T;
784 break;
785 }
786
787 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
788
789 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
790 NFE_WRITE(sc, NFE_MISC1, misc);
791 NFE_WRITE(sc, NFE_LINKSPEED, link);
792 }
793
794 static int
795 nfe_miibus_readreg(device_t dev, int phy, int reg)
796 {
797 struct nfe_softc *sc = device_get_softc(dev);
798 uint32_t val;
799 int ntries;
800
801 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
802
803 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
804 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
805 DELAY(100);
806 }
807
808 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
809
810 for (ntries = 0; ntries < 1000; ntries++) {
811 DELAY(100);
812 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
813 break;
814 }
815 if (ntries == 1000) {
816 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
817 return 0;
818 }
819
820 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
821 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
822 return 0;
823 }
824
825 val = NFE_READ(sc, NFE_PHY_DATA);
826 if (val != 0xffffffff && val != 0)
827 sc->mii_phyaddr = phy;
828
829 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
830
831 return val;
832 }
833
834 static void
835 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
836 {
837 struct nfe_softc *sc = device_get_softc(dev);
838 uint32_t ctl;
839 int ntries;
840
841 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
842
843 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
844 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
845 DELAY(100);
846 }
847
848 NFE_WRITE(sc, NFE_PHY_DATA, val);
849 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
850 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
851
852 for (ntries = 0; ntries < 1000; ntries++) {
853 DELAY(100);
854 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
855 break;
856 }
857
858 #ifdef NFE_DEBUG
859 if (ntries == 1000)
860 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
861 #endif
862 }
863
864 #ifdef IFPOLL_ENABLE
865
866 static void
867 nfe_npoll_compat(struct ifnet *ifp, void *arg __unused, int count __unused)
868 {
869 struct nfe_softc *sc = ifp->if_softc;
870
871 ASSERT_SERIALIZED(ifp->if_serializer);
872
873 nfe_rxeof(sc);
874 nfe_txeof(sc, 1);
875 }
876
877 static void
878 nfe_disable_intrs(struct nfe_softc *sc)
879 {
880 /* Disable interrupts */
881 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
882 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
883 sc->sc_npoll.ifpc_stcount = 0;
884 }
885
886 static void
887 nfe_npoll(struct ifnet *ifp, struct ifpoll_info *info)
888 {
889 struct nfe_softc *sc = ifp->if_softc;
890
891 ASSERT_SERIALIZED(ifp->if_serializer);
892
893 if (info != NULL) {
894 int cpuid = sc->sc_npoll.ifpc_cpuid;
895
896 info->ifpi_rx[cpuid].poll_func = nfe_npoll_compat;
897 info->ifpi_rx[cpuid].arg = NULL;
898 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
899
900 if (ifp->if_flags & IFF_RUNNING)
901 nfe_disable_intrs(sc);
902 ifq_set_cpuid(&ifp->if_snd, cpuid);
903 } else {
904 if (ifp->if_flags & IFF_RUNNING)
905 nfe_enable_intrs(sc);
906 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res));
907 }
908 }
909
910 #endif /* IFPOLL_ENABLE */
911
912 static void
913 nfe_intr(void *arg)
914 {
915 struct nfe_softc *sc = arg;
916 struct ifnet *ifp = &sc->arpcom.ac_if;
917 uint32_t r;
918
919 r = NFE_READ(sc, NFE_IRQ_STATUS);
920 if (r == 0)
921 return; /* not for us */
922 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
923
924 if (sc->sc_rate_second != time_uptime) {
925 /*
926 * Calculate sc_rate_avg - interrupts per second.
927 */
928 sc->sc_rate_second = time_uptime;
929 if (sc->sc_rate_avg < sc->sc_rate_acc)
930 sc->sc_rate_avg = sc->sc_rate_acc;
931 else
932 sc->sc_rate_avg = (sc->sc_rate_avg * 3 +
933 sc->sc_rate_acc) / 4;
934 sc->sc_rate_acc = 0;
935 } else if (sc->sc_rate_avg < sc->sc_rate_acc) {
936 /*
937 * Don't wait for a tick to roll over if we are taking
938 * a lot of interrupts.
939 */
940 sc->sc_rate_avg = sc->sc_rate_acc;
941 }
942
943 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
944
945 if (r & NFE_IRQ_LINK) {
946 NFE_READ(sc, NFE_PHY_STATUS);
947 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
948 DPRINTF(sc, "link state changed %s\n", "");
949 }
950
951 if (ifp->if_flags & IFF_RUNNING) {
952 int ret;
953 int rate;
954
955 /* check Rx ring */
956 ret = nfe_rxeof(sc);
957
958 /* check Tx ring */
959 ret |= nfe_txeof(sc, 1);
960
961 /* update the rate accumulator */
962 if (ret)
963 ++sc->sc_rate_acc;
964
965 if (sc->sc_flags & NFE_F_DYN_IM) {
966 rate = 1000000 / sc->sc_imtime;
967 if ((sc->sc_flags & NFE_F_IRQ_TIMER) == 0 &&
968 sc->sc_rate_avg > rate) {
969 /*
970 * Use the hardware timer to reduce the
971 * interrupt rate if the discrete interrupt
972 * rate has exceeded our threshold.
973 */
974 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER);
975 sc->sc_flags |= NFE_F_IRQ_TIMER;
976 } else if ((sc->sc_flags & NFE_F_IRQ_TIMER) &&
977 sc->sc_rate_avg <= rate) {
978 /*
979 * Use discrete TX/RX interrupts if the rate
980 * has fallen below our threshold.
981 */
982 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER);
983 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
984
985 /*
986 * Recollect, mainly to avoid the possible race
987 * introduced by changing interrupt masks.
988 */
989 nfe_rxeof(sc);
990 nfe_txeof(sc, 1);
991 }
992 }
993 }
994 }
995
996 static int
997 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
998 {
999 struct nfe_softc *sc = ifp->if_softc;
1000 struct ifreq *ifr = (struct ifreq *)data;
1001 struct mii_data *mii;
1002 int error = 0, mask, jumbo_cap;
1003
1004 ASSERT_SERIALIZED(ifp->if_serializer);
1005
1006 switch (cmd) {
1007 case SIOCSIFMTU:
1008 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL)
1009 jumbo_cap = 1;
1010 else
1011 jumbo_cap = 0;
1012
1013 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) ||
1014 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) {
1015 return EINVAL;
1016 } else if (ifp->if_mtu != ifr->ifr_mtu) {
1017 ifp->if_mtu = ifr->ifr_mtu;
1018 if (ifp->if_flags & IFF_RUNNING)
1019 nfe_init(sc);
1020 }
1021 break;
1022 case SIOCSIFFLAGS:
1023 if (ifp->if_flags & IFF_UP) {
1024 /*
1025 * If only the PROMISC or ALLMULTI flag changes, then
1026 * don't do a full re-init of the chip, just update
1027 * the Rx filter.
1028 */
1029 if ((ifp->if_flags & IFF_RUNNING) &&
1030 ((ifp->if_flags ^ sc->sc_if_flags) &
1031 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1032 nfe_setmulti(sc);
1033 } else {
1034 if (!(ifp->if_flags & IFF_RUNNING))
1035 nfe_init(sc);
1036 }
1037 } else {
1038 if (ifp->if_flags & IFF_RUNNING)
1039 nfe_stop(sc);
1040 }
1041 sc->sc_if_flags = ifp->if_flags;
1042 break;
1043 case SIOCADDMULTI:
1044 case SIOCDELMULTI:
1045 if (ifp->if_flags & IFF_RUNNING)
1046 nfe_setmulti(sc);
1047 break;
1048 case SIOCSIFMEDIA:
1049 case SIOCGIFMEDIA:
1050 mii = device_get_softc(sc->sc_miibus);
1051 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1052 break;
1053 case SIOCSIFCAP:
1054 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM;
1055 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) {
1056 ifp->if_capenable ^= mask;
1057 if (IFCAP_TXCSUM & ifp->if_capenable)
1058 ifp->if_hwassist = NFE_CSUM_FEATURES;
1059 else
1060 ifp->if_hwassist = 0;
1061
1062 if (ifp->if_flags & IFF_RUNNING)
1063 nfe_init(sc);
1064 }
1065 break;
1066 default:
1067 error = ether_ioctl(ifp, cmd, data);
1068 break;
1069 }
1070 return error;
1071 }
1072
1073 static int
1074 nfe_rxeof(struct nfe_softc *sc)
1075 {
1076 struct ifnet *ifp = &sc->arpcom.ac_if;
1077 struct nfe_rx_ring *ring = &sc->rxq;
1078 int reap;
1079
1080 reap = 0;
1081 for (;;) {
1082 struct nfe_rx_data *data = &ring->data[ring->cur];
1083 struct mbuf *m;
1084 uint16_t flags;
1085 int len, error;
1086
1087 if (sc->sc_caps & NFE_40BIT_ADDR) {
1088 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
1089
1090 flags = le16toh(desc64->flags);
1091 len = le16toh(desc64->length) & 0x3fff;
1092 } else {
1093 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
1094
1095 flags = le16toh(desc32->flags);
1096 len = le16toh(desc32->length) & 0x3fff;
1097 }
1098
1099 if (flags & NFE_RX_READY)
1100 break;
1101
1102 reap = 1;
1103
1104 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1105 if (!(flags & NFE_RX_VALID_V1))
1106 goto skip;
1107
1108 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
1109 flags &= ~NFE_RX_ERROR;
1110 len--; /* fix buffer length */
1111 }
1112 } else {
1113 if (!(flags & NFE_RX_VALID_V2))
1114 goto skip;
1115
1116 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
1117 flags &= ~NFE_RX_ERROR;
1118 len--; /* fix buffer length */
1119 }
1120 }
1121
1122 if (flags & NFE_RX_ERROR) {
1123 IFNET_STAT_INC(ifp, ierrors, 1);
1124 goto skip;
1125 }
1126
1127 m = data->m;
1128
1129 if (sc->sc_flags & NFE_F_USE_JUMBO)
1130 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
1131 else
1132 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
1133 if (error) {
1134 IFNET_STAT_INC(ifp, ierrors, 1);
1135 goto skip;
1136 }
1137
1138 /* finalize mbuf */
1139 m->m_pkthdr.len = m->m_len = len;
1140 m->m_pkthdr.rcvif = ifp;
1141
1142 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
1143 (flags & NFE_RX_CSUMOK)) {
1144 if (flags & NFE_RX_IP_CSUMOK_V2) {
1145 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1146 CSUM_IP_VALID;
1147 }
1148
1149 if (flags &
1150 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) {
1151 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1152 CSUM_PSEUDO_HDR |
1153 CSUM_FRAG_NOT_CHECKED;
1154 m->m_pkthdr.csum_data = 0xffff;
1155 }
1156 }
1157
1158 IFNET_STAT_INC(ifp, ipackets, 1);
1159 ifp->if_input(ifp, m);
1160 skip:
1161 nfe_set_ready_rxdesc(sc, ring, ring->cur);
1162 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count;
1163 }
1164 return reap;
1165 }
1166
1167 static int
1168 nfe_txeof(struct nfe_softc *sc, int start)
1169 {
1170 struct ifnet *ifp = &sc->arpcom.ac_if;
1171 struct nfe_tx_ring *ring = &sc->txq;
1172 struct nfe_tx_data *data = NULL;
1173
1174 while (ring->next != ring->cur) {
1175 uint16_t flags;
1176
1177 if (sc->sc_caps & NFE_40BIT_ADDR)
1178 flags = le16toh(ring->desc64[ring->next].flags);
1179 else
1180 flags = le16toh(ring->desc32[ring->next].flags);
1181
1182 if (flags & NFE_TX_VALID)
1183 break;
1184
1185 data = &ring->data[ring->next];
1186
1187 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1188 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1189 goto skip;
1190
1191 if ((flags & NFE_TX_ERROR_V1) != 0) {
1192 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
1193 NFE_V1_TXERR);
1194 IFNET_STAT_INC(ifp, oerrors, 1);
1195 } else {
1196 IFNET_STAT_INC(ifp, opackets, 1);
1197 }
1198 } else {
1199 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1200 goto skip;
1201
1202 if ((flags & NFE_TX_ERROR_V2) != 0) {
1203 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
1204 NFE_V2_TXERR);
1205 IFNET_STAT_INC(ifp, oerrors, 1);
1206 } else {
1207 IFNET_STAT_INC(ifp, opackets, 1);
1208 }
1209 }
1210
1211 if (data->m == NULL) { /* should not get there */
1212 if_printf(ifp,
1213 "last fragment bit w/o associated mbuf!\n");
1214 goto skip;
1215 }
1216
1217 /* last fragment of the mbuf chain transmitted */
1218 bus_dmamap_unload(ring->data_tag, data->map);
1219 m_freem(data->m);
1220 data->m = NULL;
1221 skip:
1222 ring->queued--;
1223 KKASSERT(ring->queued >= 0);
1224 ring->next = (ring->next + 1) % sc->sc_tx_ring_count;
1225 }
1226
1227 if (sc->sc_tx_ring_count - ring->queued >=
1228 sc->sc_tx_spare + NFE_NSEG_RSVD)
1229 ifq_clr_oactive(&ifp->if_snd);
1230
1231 if (ring->queued == 0)
1232 ifp->if_timer = 0;
1233
1234 if (start && !ifq_is_empty(&ifp->if_snd))
1235 if_devstart(ifp);
1236
1237 if (data != NULL)
1238 return 1;
1239 else
1240 return 0;
1241 }
1242
1243 static int
1244 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
1245 {
1246 bus_dma_segment_t segs[NFE_MAX_SCATTER];
1247 struct nfe_tx_data *data, *data_map;
1248 bus_dmamap_t map;
1249 struct nfe_desc64 *desc64 = NULL;
1250 struct nfe_desc32 *desc32 = NULL;
1251 uint16_t flags = 0;
1252 uint32_t vtag = 0;
1253 int error, i, j, maxsegs, nsegs;
1254
1255 data = &ring->data[ring->cur];
1256 map = data->map;
1257 data_map = data; /* Remember who owns the DMA map */
1258
1259 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD;
1260 if (maxsegs > NFE_MAX_SCATTER)
1261 maxsegs = NFE_MAX_SCATTER;
1262 KASSERT(maxsegs >= sc->sc_tx_spare,
1263 ("not enough segments %d,%d", maxsegs, sc->sc_tx_spare));
1264
1265 error = bus_dmamap_load_mbuf_defrag(ring->data_tag, map, &m0,
1266 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1267 if (error)
1268 goto back;
1269 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1270
1271 error = 0;
1272
1273 /* setup h/w VLAN tagging */
1274 if (m0->m_flags & M_VLANTAG)
1275 vtag = m0->m_pkthdr.ether_vlantag;
1276
1277 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) {
1278 if (m0->m_pkthdr.csum_flags & CSUM_IP)
1279 flags |= NFE_TX_IP_CSUM;
1280 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
1281 flags |= NFE_TX_TCP_CSUM;
1282 }
1283
1284 /*
1285 * XXX urm. somebody is unaware of how hardware works. You
1286 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1287 * the ring until the entire chain is actually *VALID*. Otherwise
1288 * the hardware may encounter a partially initialized chain that
1289 * is marked as being ready to go when it in fact is not ready to
1290 * go.
1291 */
1292
1293 for (i = 0; i < nsegs; i++) {
1294 j = (ring->cur + i) % sc->sc_tx_ring_count;
1295 data = &ring->data[j];
1296
1297 if (sc->sc_caps & NFE_40BIT_ADDR) {
1298 desc64 = &ring->desc64[j];
1299 desc64->physaddr[0] =
1300 htole32(NFE_ADDR_HI(segs[i].ds_addr));
1301 desc64->physaddr[1] =
1302 htole32(NFE_ADDR_LO(segs[i].ds_addr));
1303 desc64->length = htole16(segs[i].ds_len - 1);
1304 desc64->vtag = htole32(vtag);
1305 desc64->flags = htole16(flags);
1306 } else {
1307 desc32 = &ring->desc32[j];
1308 desc32->physaddr = htole32(segs[i].ds_addr);
1309 desc32->length = htole16(segs[i].ds_len - 1);
1310 desc32->flags = htole16(flags);
1311 }
1312
1313 /* csum flags and vtag belong to the first fragment only */
1314 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1315 vtag = 0;
1316
1317 ring->queued++;
1318 KKASSERT(ring->queued <= sc->sc_tx_ring_count);
1319 }
1320
1321 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1322 if (sc->sc_caps & NFE_40BIT_ADDR) {
1323 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1324 } else {
1325 if (sc->sc_caps & NFE_JUMBO_SUP)
1326 flags = NFE_TX_LASTFRAG_V2;
1327 else
1328 flags = NFE_TX_LASTFRAG_V1;
1329 desc32->flags |= htole16(flags);
1330 }
1331
1332 /*
1333 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1334 * whole mess until the first descriptor in the map is flagged.
1335 */
1336 for (i = nsegs - 1; i >= 0; --i) {
1337 j = (ring->cur + i) % sc->sc_tx_ring_count;
1338 if (sc->sc_caps & NFE_40BIT_ADDR) {
1339 desc64 = &ring->desc64[j];
1340 desc64->flags |= htole16(NFE_TX_VALID);
1341 } else {
1342 desc32 = &ring->desc32[j];
1343 desc32->flags |= htole16(NFE_TX_VALID);
1344 }
1345 }
1346 ring->cur = (ring->cur + nsegs) % sc->sc_tx_ring_count;
1347
1348 /* Exchange DMA map */
1349 data_map->map = data->map;
1350 data->map = map;
1351 data->m = m0;
1352 back:
1353 if (error)
1354 m_freem(m0);
1355 return error;
1356 }
1357
1358 static void
1359 nfe_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1360 {
1361 struct nfe_softc *sc = ifp->if_softc;
1362 struct nfe_tx_ring *ring = &sc->txq;
1363 int count = 0, oactive = 0;
1364 struct mbuf *m0;
1365
1366 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1367 ASSERT_SERIALIZED(ifp->if_serializer);
1368
1369 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1370 return;
1371
1372 for (;;) {
1373 int error;
1374
1375 if (sc->sc_tx_ring_count - ring->queued <
1376 sc->sc_tx_spare + NFE_NSEG_RSVD) {
1377 if (oactive) {
1378 ifq_set_oactive(&ifp->if_snd);
1379 break;
1380 }
1381
1382 nfe_txeof(sc, 0);
1383 oactive = 1;
1384 continue;
1385 }
1386
1387 m0 = ifq_dequeue(&ifp->if_snd);
1388 if (m0 == NULL)
1389 break;
1390
1391 ETHER_BPF_MTAP(ifp, m0);
1392
1393 error = nfe_encap(sc, ring, m0);
1394 if (error) {
1395 IFNET_STAT_INC(ifp, oerrors, 1);
1396 if (error == EFBIG) {
1397 if (oactive) {
1398 ifq_set_oactive(&ifp->if_snd);
1399 break;
1400 }
1401 nfe_txeof(sc, 0);
1402 oactive = 1;
1403 }
1404 continue;
1405 } else {
1406 oactive = 0;
1407 }
1408 ++count;
1409
1410 /*
1411 * NOTE:
1412 * `m0' may be freed in nfe_encap(), so
1413 * it should not be touched any more.
1414 */
1415 }
1416
1417 if (count == 0) /* nothing sent */
1418 return;
1419
1420 /* Kick Tx */
1421 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1422
1423 /*
1424 * Set a timeout in case the chip goes out to lunch.
1425 */
1426 ifp->if_timer = 5;
1427 }
1428
1429 static void
1430 nfe_watchdog(struct ifnet *ifp)
1431 {
1432 struct nfe_softc *sc = ifp->if_softc;
1433
1434 ASSERT_SERIALIZED(ifp->if_serializer);
1435
1436 if (ifp->if_flags & IFF_RUNNING) {
1437 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1438 nfe_txeof(sc, 1);
1439 return;
1440 }
1441
1442 if_printf(ifp, "watchdog timeout\n");
1443
1444 nfe_init(ifp->if_softc);
1445
1446 IFNET_STAT_INC(ifp, oerrors, 1);
1447 }
1448
1449 static void
1450 nfe_init(void *xsc)
1451 {
1452 struct nfe_softc *sc = xsc;
1453 struct ifnet *ifp = &sc->arpcom.ac_if;
1454 uint32_t tmp;
1455 int error;
1456
1457 ASSERT_SERIALIZED(ifp->if_serializer);
1458
1459 nfe_stop(sc);
1460
1461 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
1462 nfe_mac_reset(sc);
1463
1464 /*
1465 * NOTE:
1466 * Switching between jumbo frames and normal frames should
1467 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1468 */
1469 if (ifp->if_mtu > ETHERMTU) {
1470 sc->sc_flags |= NFE_F_USE_JUMBO;
1471 sc->rxq.bufsz = NFE_JBYTES;
1472 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO;
1473 if (bootverbose)
1474 if_printf(ifp, "use jumbo frames\n");
1475 } else {
1476 sc->sc_flags &= ~NFE_F_USE_JUMBO;
1477 sc->rxq.bufsz = MCLBYTES;
1478 sc->sc_tx_spare = NFE_NSEG_SPARE;
1479 if (bootverbose)
1480 if_printf(ifp, "use non-jumbo frames\n");
1481 }
1482
1483 error = nfe_init_tx_ring(sc, &sc->txq);
1484 if (error) {
1485 nfe_stop(sc);
1486 return;
1487 }
1488
1489 error = nfe_init_rx_ring(sc, &sc->rxq);
1490 if (error) {
1491 nfe_stop(sc);
1492 return;
1493 }
1494
1495 NFE_WRITE(sc, NFE_TX_POLL, 0);
1496 NFE_WRITE(sc, NFE_STATUS, 0);
1497
1498 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc;
1499
1500 if (ifp->if_capenable & IFCAP_RXCSUM)
1501 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1502
1503 /*
1504 * Although the adapter is capable of stripping VLAN tags from received
1505 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1506 * purpose. This will be done in software by our network stack.
1507 */
1508 if (sc->sc_caps & NFE_HW_VLAN)
1509 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1510
1511 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1512 DELAY(10);
1513 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1514
1515 if (sc->sc_caps & NFE_HW_VLAN)
1516 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1517
1518 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1519
1520 /* set MAC address */
1521 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1522
1523 /* tell MAC where rings are in memory */
1524 if (sc->sc_caps & NFE_40BIT_ADDR) {
1525 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
1526 NFE_ADDR_HI(sc->rxq.physaddr));
1527 }
1528 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr));
1529
1530 if (sc->sc_caps & NFE_40BIT_ADDR) {
1531 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI,
1532 NFE_ADDR_HI(sc->txq.physaddr));
1533 }
1534 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
1535
1536 NFE_WRITE(sc, NFE_RING_SIZE,
1537 (sc->sc_rx_ring_count - 1) << 16 |
1538 (sc->sc_tx_ring_count - 1));
1539
1540 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1541
1542 /* force MAC to wakeup */
1543 tmp = NFE_READ(sc, NFE_PWR_STATE);
1544 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1545 DELAY(10);
1546 tmp = NFE_READ(sc, NFE_PWR_STATE);
1547 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1548
1549 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1550 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1551 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1552
1553 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1554 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1555
1556 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1557
1558 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1559 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1560 DELAY(10);
1561 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1562
1563 /* set Rx filter */
1564 nfe_setmulti(sc);
1565
1566 nfe_ifmedia_upd(ifp);
1567
1568 /* enable Rx */
1569 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1570
1571 /* enable Tx */
1572 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1573
1574 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1575
1576 #ifdef IFPOLL_ENABLE
1577 if (ifp->if_flags & IFF_NPOLLING)
1578 nfe_disable_intrs(sc);
1579 else
1580 #endif
1581 nfe_enable_intrs(sc);
1582
1583 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1584
1585 ifp->if_flags |= IFF_RUNNING;
1586 ifq_clr_oactive(&ifp->if_snd);
1587
1588 /*
1589 * If we had stuff in the tx ring before its all cleaned out now
1590 * so we are not going to get an interrupt, jump-start any pending
1591 * output.
1592 */
1593 if (!ifq_is_empty(&ifp->if_snd))
1594 if_devstart(ifp);
1595 }
1596
1597 static void
1598 nfe_stop(struct nfe_softc *sc)
1599 {
1600 struct ifnet *ifp = &sc->arpcom.ac_if;
1601 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
1602 int i;
1603
1604 ASSERT_SERIALIZED(ifp->if_serializer);
1605
1606 callout_stop(&sc->sc_tick_ch);
1607
1608 ifp->if_timer = 0;
1609 ifp->if_flags &= ~IFF_RUNNING;
1610 ifq_clr_oactive(&ifp->if_snd);
1611 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
1612
1613 #define WAITMAX 50000
1614
1615 /*
1616 * Abort Tx
1617 */
1618 NFE_WRITE(sc, NFE_TX_CTL, 0);
1619 for (i = 0; i < WAITMAX; ++i) {
1620 DELAY(100);
1621 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0)
1622 break;
1623 }
1624 if (i == WAITMAX)
1625 if_printf(ifp, "can't stop TX\n");
1626 DELAY(100);
1627
1628 /*
1629 * Disable Rx
1630 */
1631 NFE_WRITE(sc, NFE_RX_CTL, 0);
1632 for (i = 0; i < WAITMAX; ++i) {
1633 DELAY(100);
1634 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0)
1635 break;
1636 }
1637 if (i == WAITMAX)
1638 if_printf(ifp, "can't stop RX\n");
1639 DELAY(100);
1640
1641 #undef WAITMAX
1642
1643 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
1644 DELAY(10);
1645 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
1646
1647 /* Disable interrupts */
1648 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1649
1650 /* Reset Tx and Rx rings */
1651 nfe_reset_tx_ring(sc, &sc->txq);
1652 nfe_reset_rx_ring(sc, &sc->rxq);
1653 }
1654
1655 static int
1656 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1657 {
1658 int i, j, error, descsize;
1659 bus_dmamem_t dmem;
1660 void **desc;
1661
1662 if (sc->sc_caps & NFE_40BIT_ADDR) {
1663 desc = (void *)&ring->desc64;
1664 descsize = sizeof(struct nfe_desc64);
1665 } else {
1666 desc = (void *)&ring->desc32;
1667 descsize = sizeof(struct nfe_desc32);
1668 }
1669
1670 ring->bufsz = MCLBYTES;
1671 ring->cur = ring->next = 0;
1672
1673 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1674 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1675 sc->sc_rx_ring_count * descsize,
1676 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1677 if (error) {
1678 if_printf(&sc->arpcom.ac_if,
1679 "could not create RX desc ring\n");
1680 return error;
1681 }
1682 ring->tag = dmem.dmem_tag;
1683 ring->map = dmem.dmem_map;
1684 *desc = dmem.dmem_addr;
1685 ring->physaddr = dmem.dmem_busaddr;
1686
1687 if (sc->sc_caps & NFE_JUMBO_SUP) {
1688 ring->jbuf =
1689 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc),
1690 M_DEVBUF, M_WAITOK | M_ZERO);
1691
1692 error = nfe_jpool_alloc(sc, ring);
1693 if (error) {
1694 if_printf(&sc->arpcom.ac_if,
1695 "could not allocate jumbo frames\n");
1696 kfree(ring->jbuf, M_DEVBUF);
1697 ring->jbuf = NULL;
1698 /* Allow jumbo frame allocation to fail */
1699 }
1700 }
1701
1702 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count,
1703 M_DEVBUF, M_WAITOK | M_ZERO);
1704
1705 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
1706 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1707 NULL, NULL,
1708 MCLBYTES, 1, MCLBYTES,
1709 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
1710 &ring->data_tag);
1711 if (error) {
1712 if_printf(&sc->arpcom.ac_if,
1713 "could not create RX mbuf DMA tag\n");
1714 return error;
1715 }
1716
1717 /* Create a spare RX mbuf DMA map */
1718 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK,
1719 &ring->data_tmpmap);
1720 if (error) {
1721 if_printf(&sc->arpcom.ac_if,
1722 "could not create spare RX mbuf DMA map\n");
1723 bus_dma_tag_destroy(ring->data_tag);
1724 ring->data_tag = NULL;
1725 return error;
1726 }
1727
1728 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1729 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK,
1730 &ring->data[i].map);
1731 if (error) {
1732 if_printf(&sc->arpcom.ac_if,
1733 "could not create %dth RX mbuf DMA mapn", i);
1734 goto fail;
1735 }
1736 }
1737 return 0;
1738 fail:
1739 for (j = 0; j < i; ++j)
1740 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1741 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1742 bus_dma_tag_destroy(ring->data_tag);
1743 ring->data_tag = NULL;
1744 return error;
1745 }
1746
1747 static void
1748 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1749 {
1750 int i;
1751
1752 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1753 struct nfe_rx_data *data = &ring->data[i];
1754
1755 if (data->m != NULL) {
1756 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0)
1757 bus_dmamap_unload(ring->data_tag, data->map);
1758 m_freem(data->m);
1759 data->m = NULL;
1760 }
1761 }
1762
1763 ring->cur = ring->next = 0;
1764 }
1765
1766 static int
1767 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1768 {
1769 int i;
1770
1771 for (i = 0; i < sc->sc_rx_ring_count; ++i) {
1772 int error;
1773
1774 /* XXX should use a function pointer */
1775 if (sc->sc_flags & NFE_F_USE_JUMBO)
1776 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1777 else
1778 error = nfe_newbuf_std(sc, ring, i, 1);
1779 if (error) {
1780 if_printf(&sc->arpcom.ac_if,
1781 "could not allocate RX buffer\n");
1782 return error;
1783 }
1784 nfe_set_ready_rxdesc(sc, ring, i);
1785 }
1786 return 0;
1787 }
1788
1789 static void
1790 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1791 {
1792 if (ring->data_tag != NULL) {
1793 struct nfe_rx_data *data;
1794 int i;
1795
1796 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1797 data = &ring->data[i];
1798
1799 if (data->m != NULL) {
1800 bus_dmamap_unload(ring->data_tag, data->map);
1801 m_freem(data->m);
1802 }
1803 bus_dmamap_destroy(ring->data_tag, data->map);
1804 }
1805 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1806 bus_dma_tag_destroy(ring->data_tag);
1807 }
1808
1809 nfe_jpool_free(sc, ring);
1810
1811 if (ring->jbuf != NULL)
1812 kfree(ring->jbuf, M_DEVBUF);
1813 if (ring->data != NULL)
1814 kfree(ring->data, M_DEVBUF);
1815
1816 if (ring->tag != NULL) {
1817 void *desc;
1818
1819 if (sc->sc_caps & NFE_40BIT_ADDR)
1820 desc = ring->desc64;
1821 else
1822 desc = ring->desc32;
1823
1824 bus_dmamap_unload(ring->tag, ring->map);
1825 bus_dmamem_free(ring->tag, desc, ring->map);
1826 bus_dma_tag_destroy(ring->tag);
1827 }
1828 }
1829
1830 static struct nfe_jbuf *
1831 nfe_jalloc(struct nfe_softc *sc)
1832 {
1833 struct ifnet *ifp = &sc->arpcom.ac_if;
1834 struct nfe_jbuf *jbuf;
1835
1836 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1837
1838 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1839 if (jbuf != NULL) {
1840 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1841 jbuf->inuse = 1;
1842 } else {
1843 if_printf(ifp, "no free jumbo buffer\n");
1844 }
1845
1846 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1847
1848 return jbuf;
1849 }
1850
1851 static void
1852 nfe_jfree(void *arg)
1853 {
1854 struct nfe_jbuf *jbuf = arg;
1855 struct nfe_softc *sc = jbuf->sc;
1856 struct nfe_rx_ring *ring = jbuf->ring;
1857
1858 if (&ring->jbuf[jbuf->slot] != jbuf)
1859 panic("%s: free wrong jumbo buffer", __func__);
1860 else if (jbuf->inuse == 0)
1861 panic("%s: jumbo buffer already freed", __func__);
1862
1863 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1864 atomic_subtract_int(&jbuf->inuse, 1);
1865 if (jbuf->inuse == 0)
1866 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1867 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1868 }
1869
1870 static void
1871 nfe_jref(void *arg)
1872 {
1873 struct nfe_jbuf *jbuf = arg;
1874 struct nfe_rx_ring *ring = jbuf->ring;
1875
1876 if (&ring->jbuf[jbuf->slot] != jbuf)
1877 panic("%s: ref wrong jumbo buffer", __func__);
1878 else if (jbuf->inuse == 0)
1879 panic("%s: jumbo buffer already freed", __func__);
1880
1881 atomic_add_int(&jbuf->inuse, 1);
1882 }
1883
1884 static int
1885 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1886 {
1887 struct nfe_jbuf *jbuf;
1888 bus_dmamem_t dmem;
1889 bus_addr_t physaddr;
1890 caddr_t buf;
1891 int i, error;
1892
1893 /*
1894 * Allocate a big chunk of DMA'able memory.
1895 */
1896 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1897 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1898 NFE_JPOOL_SIZE(sc),
1899 BUS_DMA_WAITOK, &dmem);
1900 if (error) {
1901 if_printf(&sc->arpcom.ac_if,
1902 "could not create jumbo buffer\n");
1903 return error;
1904 }
1905 ring->jtag = dmem.dmem_tag;
1906 ring->jmap = dmem.dmem_map;
1907 ring->jpool = dmem.dmem_addr;
1908 physaddr = dmem.dmem_busaddr;
1909
1910 /* ..and split it into 9KB chunks */
1911 SLIST_INIT(&ring->jfreelist);
1912
1913 buf = ring->jpool;
1914 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) {
1915 jbuf = &ring->jbuf[i];
1916
1917 jbuf->sc = sc;
1918 jbuf->ring = ring;
1919 jbuf->inuse = 0;
1920 jbuf->slot = i;
1921 jbuf->buf = buf;
1922 jbuf->physaddr = physaddr;
1923
1924 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1925
1926 buf += NFE_JBYTES;
1927 physaddr += NFE_JBYTES;
1928 }
1929
1930 return 0;
1931 }
1932
1933 static void
1934 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1935 {
1936 if (ring->jtag != NULL) {
1937 bus_dmamap_unload(ring->jtag, ring->jmap);
1938 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1939 bus_dma_tag_destroy(ring->jtag);
1940 }
1941 }
1942
1943 static int
1944 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1945 {
1946 int i, j, error, descsize;
1947 bus_dmamem_t dmem;
1948 void **desc;
1949
1950 if (sc->sc_caps & NFE_40BIT_ADDR) {
1951 desc = (void *)&ring->desc64;
1952 descsize = sizeof(struct nfe_desc64);
1953 } else {
1954 desc = (void *)&ring->desc32;
1955 descsize = sizeof(struct nfe_desc32);
1956 }
1957
1958 ring->queued = 0;
1959 ring->cur = ring->next = 0;
1960
1961 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1962 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1963 sc->sc_tx_ring_count * descsize,
1964 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1965 if (error) {
1966 if_printf(&sc->arpcom.ac_if,
1967 "could not create TX desc ring\n");
1968 return error;
1969 }
1970 ring->tag = dmem.dmem_tag;
1971 ring->map = dmem.dmem_map;
1972 *desc = dmem.dmem_addr;
1973 ring->physaddr = dmem.dmem_busaddr;
1974
1975 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count,
1976 M_DEVBUF, M_WAITOK | M_ZERO);
1977
1978 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
1979 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1980 NULL, NULL,
1981 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES,
1982 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1983 &ring->data_tag);
1984 if (error) {
1985 if_printf(&sc->arpcom.ac_if,
1986 "could not create TX buf DMA tag\n");
1987 return error;
1988 }
1989
1990 for (i = 0; i < sc->sc_tx_ring_count; i++) {
1991 error = bus_dmamap_create(ring->data_tag,
1992 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1993 &ring->data[i].map);
1994 if (error) {
1995 if_printf(&sc->arpcom.ac_if,
1996 "could not create %dth TX buf DMA map\n", i);
1997 goto fail;
1998 }
1999 }
2000
2001 return 0;
2002 fail:
2003 for (j = 0; j < i; ++j)
2004 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
2005 bus_dma_tag_destroy(ring->data_tag);
2006 ring->data_tag = NULL;
2007 return error;
2008 }
2009
2010 static void
2011 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
2012 {
2013 int i;
2014
2015 for (i = 0; i < sc->sc_tx_ring_count; i++) {
2016 struct nfe_tx_data *data = &ring->data[i];
2017
2018 if (sc->sc_caps & NFE_40BIT_ADDR)
2019 ring->desc64[i].flags = 0;
2020 else
2021 ring->desc32[i].flags = 0;
2022
2023 if (data->m != NULL) {
2024 bus_dmamap_unload(ring->data_tag, data->map);
2025 m_freem(data->m);
2026 data->m = NULL;
2027 }
2028 }
2029
2030 ring->queued = 0;
2031 ring->cur = ring->next = 0;
2032 }
2033
2034 static int
2035 nfe_init_tx_ring(struct nfe_softc *sc __unused,
2036 struct nfe_tx_ring *ring __unused)
2037 {
2038 return 0;
2039 }
2040
2041 static void
2042 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
2043 {
2044 if (ring->data_tag != NULL) {
2045 struct nfe_tx_data *data;
2046 int i;
2047
2048 for (i = 0; i < sc->sc_tx_ring_count; ++i) {
2049 data = &ring->data[i];
2050
2051 if (data->m != NULL) {
2052 bus_dmamap_unload(ring->data_tag, data->map);
2053 m_freem(data->m);
2054 }
2055 bus_dmamap_destroy(ring->data_tag, data->map);
2056 }
2057
2058 bus_dma_tag_destroy(ring->data_tag);
2059 }
2060
2061 if (ring->data != NULL)
2062 kfree(ring->data, M_DEVBUF);
2063
2064 if (ring->tag != NULL) {
2065 void *desc;
2066
2067 if (sc->sc_caps & NFE_40BIT_ADDR)
2068 desc = ring->desc64;
2069 else
2070 desc = ring->desc32;
2071
2072 bus_dmamap_unload(ring->tag, ring->map);
2073 bus_dmamem_free(ring->tag, desc, ring->map);
2074 bus_dma_tag_destroy(ring->tag);
2075 }
2076 }
2077
2078 static int
2079 nfe_ifmedia_upd(struct ifnet *ifp)
2080 {
2081 struct nfe_softc *sc = ifp->if_softc;
2082 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2083
2084 ASSERT_SERIALIZED(ifp->if_serializer);
2085
2086 if (mii->mii_instance != 0) {
2087 struct mii_softc *miisc;
2088
2089 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2090 mii_phy_reset(miisc);
2091 }
2092 mii_mediachg(mii);
2093
2094 return 0;
2095 }
2096
2097 static void
2098 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2099 {
2100 struct nfe_softc *sc = ifp->if_softc;
2101 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2102
2103 ASSERT_SERIALIZED(ifp->if_serializer);
2104
2105 mii_pollstat(mii);
2106 ifmr->ifm_status = mii->mii_media_status;
2107 ifmr->ifm_active = mii->mii_media_active;
2108 }
2109
2110 static void
2111 nfe_setmulti(struct nfe_softc *sc)
2112 {
2113 struct ifnet *ifp = &sc->arpcom.ac_if;
2114 struct ifmultiaddr *ifma;
2115 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2116 uint32_t filter = NFE_RXFILTER_MAGIC;
2117 int i;
2118
2119 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2120 bzero(addr, ETHER_ADDR_LEN);
2121 bzero(mask, ETHER_ADDR_LEN);
2122 goto done;
2123 }
2124
2125 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2126 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2127
2128 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2129 caddr_t maddr;
2130
2131 if (ifma->ifma_addr->sa_family != AF_LINK)
2132 continue;
2133
2134 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2135 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2136 addr[i] &= maddr[i];
2137 mask[i] &= ~maddr[i];
2138 }
2139 }
2140
2141 for (i = 0; i < ETHER_ADDR_LEN; i++)
2142 mask[i] |= addr[i];
2143
2144 done:
2145 addr[0] |= 0x01; /* make sure multicast bit is set */
2146
2147 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2148 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2149 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2150 addr[5] << 8 | addr[4]);
2151 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2152 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2153 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2154 mask[5] << 8 | mask[4]);
2155
2156 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
2157 NFE_WRITE(sc, NFE_RXFILTER, filter);
2158 }
2159
2160 static void
2161 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2162 {
2163 uint32_t lo, hi;
2164
2165 lo = NFE_READ(sc, NFE_MACADDR_LO);
2166 hi = NFE_READ(sc, NFE_MACADDR_HI);
2167 if (sc->sc_caps & NFE_FIX_EADDR) {
2168 addr[0] = (lo >> 8) & 0xff;
2169 addr[1] = (lo & 0xff);
2170
2171 addr[2] = (hi >> 24) & 0xff;
2172 addr[3] = (hi >> 16) & 0xff;
2173 addr[4] = (hi >> 8) & 0xff;
2174 addr[5] = (hi & 0xff);
2175 } else {
2176 addr[0] = (hi & 0xff);
2177 addr[1] = (hi >> 8) & 0xff;
2178 addr[2] = (hi >> 16) & 0xff;
2179 addr[3] = (hi >> 24) & 0xff;
2180
2181 addr[4] = (lo & 0xff);
2182 addr[5] = (lo >> 8) & 0xff;
2183 }
2184 }
2185
2186 static void
2187 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
2188 {
2189 NFE_WRITE(sc, NFE_MACADDR_LO,
2190 addr[5] << 8 | addr[4]);
2191 NFE_WRITE(sc, NFE_MACADDR_HI,
2192 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2193 }
2194
2195 static void
2196 nfe_tick(void *arg)
2197 {
2198 struct nfe_softc *sc = arg;
2199 struct ifnet *ifp = &sc->arpcom.ac_if;
2200 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2201
2202 lwkt_serialize_enter(ifp->if_serializer);
2203
2204 mii_tick(mii);
2205 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
2206
2207 lwkt_serialize_exit(ifp->if_serializer);
2208 }
2209
2210 static int
2211 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2212 int wait)
2213 {
2214 struct nfe_rx_data *data = &ring->data[idx];
2215 bus_dma_segment_t seg;
2216 bus_dmamap_t map;
2217 struct mbuf *m;
2218 int nsegs, error;
2219
2220 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2221 if (m == NULL)
2222 return ENOBUFS;
2223 m->m_len = m->m_pkthdr.len = MCLBYTES;
2224
2225 /*
2226 * Aligning the payload improves access times.
2227 */
2228 if (sc->sc_caps & NFE_WORDALIGN)
2229 m_adj(m, ETHER_ALIGN);
2230
2231 error = bus_dmamap_load_mbuf_segment(ring->data_tag, ring->data_tmpmap,
2232 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2233 if (error) {
2234 m_freem(m);
2235 if (wait) {
2236 if_printf(&sc->arpcom.ac_if,
2237 "could map RX mbuf %d\n", error);
2238 }
2239 return error;
2240 }
2241
2242 if (data->m != NULL) {
2243 /* Sync and unload originally mapped mbuf */
2244 bus_dmamap_sync(ring->data_tag, data->map,
2245 BUS_DMASYNC_POSTREAD);
2246 bus_dmamap_unload(ring->data_tag, data->map);
2247 }
2248
2249 /* Swap this DMA map with tmp DMA map */
2250 map = data->map;
2251 data->map = ring->data_tmpmap;
2252 ring->data_tmpmap = map;
2253
2254 /* Caller is assumed to have collected the old mbuf */
2255 data->m = m;
2256
2257 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
2258 return 0;
2259 }
2260
2261 static int
2262 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2263 int wait)
2264 {
2265 struct nfe_rx_data *data = &ring->data[idx];
2266 struct nfe_jbuf *jbuf;
2267 struct mbuf *m;
2268
2269 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2270 if (m == NULL)
2271 return ENOBUFS;
2272
2273 jbuf = nfe_jalloc(sc);
2274 if (jbuf == NULL) {
2275 m_freem(m);
2276 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2277 "-- packet dropped!\n");
2278 return ENOBUFS;
2279 }
2280
2281 m->m_ext.ext_arg = jbuf;
2282 m->m_ext.ext_buf = jbuf->buf;
2283 m->m_ext.ext_free = nfe_jfree;
2284 m->m_ext.ext_ref = nfe_jref;
2285 m->m_ext.ext_size = NFE_JBYTES;
2286
2287 m->m_data = m->m_ext.ext_buf;
2288 m->m_flags |= M_EXT;
2289 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2290
2291 /*
2292 * Aligning the payload improves access times.
2293 */
2294 if (sc->sc_caps & NFE_WORDALIGN)
2295 m_adj(m, ETHER_ALIGN);
2296
2297 /* Caller is assumed to have collected the old mbuf */
2298 data->m = m;
2299
2300 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2301 return 0;
2302 }
2303
2304 static void
2305 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2306 bus_addr_t physaddr)
2307 {
2308 if (sc->sc_caps & NFE_40BIT_ADDR) {
2309 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2310
2311 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr));
2312 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr));
2313 } else {
2314 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2315
2316 desc32->physaddr = htole32(physaddr);
2317 }
2318 }
2319
2320 static void
2321 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2322 {
2323 if (sc->sc_caps & NFE_40BIT_ADDR) {
2324 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2325
2326 desc64->length = htole16(ring->bufsz);
2327 desc64->flags = htole16(NFE_RX_READY);
2328 } else {
2329 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2330
2331 desc32->length = htole16(ring->bufsz);
2332 desc32->flags = htole16(NFE_RX_READY);
2333 }
2334 }
2335
2336 static int
2337 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS)
2338 {
2339 struct nfe_softc *sc = arg1;
2340 struct ifnet *ifp = &sc->arpcom.ac_if;
2341 uint32_t flags;
2342 int error, v;
2343
2344 lwkt_serialize_enter(ifp->if_serializer);
2345
2346 flags = sc->sc_flags & ~NFE_F_DYN_IM;
2347 v = sc->sc_imtime;
2348 if (sc->sc_flags & NFE_F_DYN_IM)
2349 v = -v;
2350
2351 error = sysctl_handle_int(oidp, &v, 0, req);
2352 if (error || req->newptr == NULL)
2353 goto back;
2354
2355 if (v < 0) {
2356 flags |= NFE_F_DYN_IM;
2357 v = -v;
2358 }
2359
2360 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) {
2361 if (NFE_IMTIME(v) == 0)
2362 v = 0;
2363 sc->sc_imtime = v;
2364 sc->sc_flags = flags;
2365 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
2366
2367 if ((ifp->if_flags & (IFF_NPOLLING | IFF_RUNNING))
2368 == IFF_RUNNING) {
2369 nfe_enable_intrs(sc);
2370 }
2371 }
2372 back:
2373 lwkt_serialize_exit(ifp->if_serializer);
2374 return error;
2375 }
2376
2377 static void
2378 nfe_powerup(device_t dev)
2379 {
2380 struct nfe_softc *sc = device_get_softc(dev);
2381 uint32_t pwr_state;
2382 uint16_t did;
2383
2384 /*
2385 * Bring MAC and PHY out of low power state
2386 */
2387
2388 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK;
2389
2390 did = pci_get_device(dev);
2391 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 ||
2392 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) &&
2393 pci_get_revid(dev) >= 0xa3)
2394 pwr_state |= NFE_PWRUP_REV_A3;
2395
2396 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state);
2397 }
2398
2399 static void
2400 nfe_mac_reset(struct nfe_softc *sc)
2401 {
2402 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
2403 uint32_t macaddr_hi, macaddr_lo, tx_poll;
2404
2405 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
2406
2407 /* Save several registers for later restoration */
2408 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI);
2409 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO);
2410 tx_poll = NFE_READ(sc, NFE_TX_POLL);
2411
2412 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT);
2413 DELAY(100);
2414
2415 NFE_WRITE(sc, NFE_MAC_RESET, 0);
2416 DELAY(100);
2417
2418 /* Restore saved registers */
2419 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi);
2420 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo);
2421 NFE_WRITE(sc, NFE_TX_POLL, tx_poll);
2422
2423 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
2424 }
2425
2426 static void
2427 nfe_enable_intrs(struct nfe_softc *sc)
2428 {
2429 /*
2430 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
2431 * It is unclear how wide the timer is. Base programming does
2432 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
2433 * we don't get any interrupt moderation. TX moderation is
2434 * possible by using the timer interrupt instead of TX_DONE.
2435 *
2436 * It is unclear whether there are other bits that can be
2437 * set to make the NFE device actually do interrupt moderation
2438 * on the RX side.
2439 *
2440 * For now set a 128uS interval as a placemark, but don't use
2441 * the timer.
2442 */
2443 if (sc->sc_imtime == 0)
2444 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT);
2445 else
2446 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime));
2447
2448 /* Enable interrupts */
2449 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable);
2450
2451 if (sc->sc_irq_enable & NFE_IRQ_TIMER)
2452 sc->sc_flags |= NFE_F_IRQ_TIMER;
2453 else
2454 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
2455 }
Cache object: 1eea6d28862d5b1428d2dfb553662952
|