1 /*-
2 * Copyright (c) 2020 Michael J Karels
3 * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 /*
30 * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller
31 *
32 * This driver is derived in large part from bcmgenet.c from NetBSD by
33 * Jared McNeill. Parts of the structure and other common code in
34 * this driver have been copied from if_awg.c for the Allwinner EMAC,
35 * also by Jared McNeill.
36 */
37
38 #include "opt_device_polling.h"
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/kernel.h>
48 #include <sys/endian.h>
49 #include <sys/mbuf.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/module.h>
54 #include <sys/taskqueue.h>
55 #include <sys/gpio.h>
56
57 #include <net/bpf.h>
58 #include <net/if.h>
59 #include <net/ethernet.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_types.h>
63 #include <net/if_var.h>
64
65 #include <machine/bus.h>
66
67 #include <dev/ofw/ofw_bus.h>
68 #include <dev/ofw/ofw_bus_subr.h>
69
70 #define __BIT(_x) (1 << (_x))
71 #include "if_genetreg.h"
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75 #include <dev/mii/mii_fdt.h>
76
77 #include <netinet/in.h>
78 #include <netinet/ip.h>
79 #include <netinet/ip6.h>
80
81 #include "syscon_if.h"
82 #include "miibus_if.h"
83 #include "gpio_if.h"
84
85 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_MAC], (reg))
86 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_MAC], (reg), (val))
87
88 #define GEN_LOCK(sc) mtx_lock(&(sc)->mtx)
89 #define GEN_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
90 #define GEN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
91 #define GEN_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
92
93 #define TX_DESC_COUNT GENET_DMA_DESC_COUNT
94 #define RX_DESC_COUNT GENET_DMA_DESC_COUNT
95
96 #define TX_NEXT(n, count) (((n) + 1) & ((count) - 1))
97 #define RX_NEXT(n, count) (((n) + 1) & ((count) - 1))
98
99 #define TX_MAX_SEGS 20
100
101 static SYSCTL_NODE(_hw, OID_AUTO, genet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
102 "genet driver parameters");
103
104 /* Maximum number of mbufs to pass per call to if_input */
105 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */;
106 SYSCTL_INT(_hw_genet, OID_AUTO, rx_batch, CTLFLAG_RDTUN,
107 &gen_rx_batch, 0, "max mbufs per call to if_input");
108
109 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch); /* old name/interface */
110
111 /*
112 * Transmitting packets with only an Ethernet header in the first mbuf
113 * fails. Examples include reflected ICMPv6 packets, e.g. echo replies;
114 * forwarded IPv6/TCP packets; and forwarded IPv4/TCP packets that use NAT
115 * with IPFW. Pulling up the sizes of ether_header + ip6_hdr + icmp6_hdr
116 * seems to work for both ICMPv6 and TCP over IPv6, as well as the IPv4/TCP
117 * case.
118 */
119 static int gen_tx_hdr_min = 56; /* ether_header + ip6_hdr + icmp6_hdr */
120 SYSCTL_INT(_hw_genet, OID_AUTO, tx_hdr_min, CTLFLAG_RW,
121 &gen_tx_hdr_min, 0, "header to add to packets with ether header only");
122
123 static struct ofw_compat_data compat_data[] = {
124 { "brcm,genet-v1", 1 },
125 { "brcm,genet-v2", 2 },
126 { "brcm,genet-v3", 3 },
127 { "brcm,genet-v4", 4 },
128 { "brcm,genet-v5", 5 },
129 { "brcm,bcm2711-genet-v5", 5 },
130 { NULL, 0 }
131 };
132
133 enum {
134 _RES_MAC, /* what to call this? */
135 _RES_IRQ1,
136 _RES_IRQ2,
137 _RES_NITEMS
138 };
139
140 static struct resource_spec gen_spec[] = {
141 { SYS_RES_MEMORY, 0, RF_ACTIVE },
142 { SYS_RES_IRQ, 0, RF_ACTIVE },
143 { SYS_RES_IRQ, 1, RF_ACTIVE },
144 { -1, 0 }
145 };
146
147 /* structure per ring entry */
148 struct gen_ring_ent {
149 bus_dmamap_t map;
150 struct mbuf *mbuf;
151 };
152
153 struct tx_queue {
154 int hwindex; /* hardware index */
155 int nentries;
156 u_int queued; /* or avail? */
157 u_int cur;
158 u_int next;
159 u_int prod_idx;
160 u_int cons_idx;
161 struct gen_ring_ent *entries;
162 };
163
164 struct rx_queue {
165 int hwindex; /* hardware index */
166 int nentries;
167 u_int cur;
168 u_int prod_idx;
169 u_int cons_idx;
170 struct gen_ring_ent *entries;
171 };
172
173 struct gen_softc {
174 struct resource *res[_RES_NITEMS];
175 struct mtx mtx;
176 if_t ifp;
177 device_t dev;
178 device_t miibus;
179 mii_contype_t phy_mode;
180
181 struct callout stat_ch;
182 struct task link_task;
183 void *ih;
184 void *ih2;
185 int type;
186 int if_flags;
187 int link;
188 bus_dma_tag_t tx_buf_tag;
189 /*
190 * The genet chip has multiple queues for transmit and receive.
191 * This driver uses only one (queue 16, the default), but is cast
192 * with multiple rings. The additional rings are used for different
193 * priorities.
194 */
195 #define DEF_TXQUEUE 0
196 #define NTXQUEUE 1
197 struct tx_queue tx_queue[NTXQUEUE];
198 struct gen_ring_ent tx_ring_ent[TX_DESC_COUNT]; /* ring entries */
199
200 bus_dma_tag_t rx_buf_tag;
201 #define DEF_RXQUEUE 0
202 #define NRXQUEUE 1
203 struct rx_queue rx_queue[NRXQUEUE];
204 struct gen_ring_ent rx_ring_ent[RX_DESC_COUNT]; /* ring entries */
205 };
206
207 static void gen_init(void *softc);
208 static void gen_start(if_t ifp);
209 static void gen_destroy(struct gen_softc *sc);
210 static int gen_encap(struct gen_softc *sc, struct mbuf **mp);
211 static int gen_parse_tx(struct mbuf *m, int csum_flags);
212 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data);
213 static int gen_get_phy_mode(device_t dev);
214 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr);
215 static void gen_set_enaddr(struct gen_softc *sc);
216 static void gen_setup_rxfilter(struct gen_softc *sc);
217 static void gen_reset(struct gen_softc *sc);
218 static void gen_enable(struct gen_softc *sc);
219 static void gen_dma_disable(struct gen_softc *sc);
220 static int gen_bus_dma_init(struct gen_softc *sc);
221 static void gen_bus_dma_teardown(struct gen_softc *sc);
222 static void gen_enable_intr(struct gen_softc *sc);
223 static void gen_init_txrings(struct gen_softc *sc);
224 static void gen_init_rxrings(struct gen_softc *sc);
225 static void gen_intr(void *softc);
226 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q);
227 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
228 static void gen_intr2(void *softc);
229 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index);
230 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
231 struct mbuf *m);
232 static void gen_link_task(void *arg, int pending);
233 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr);
234 static int gen_media_change(if_t ifp);
235 static void gen_tick(void *softc);
236
237 static int
238 gen_probe(device_t dev)
239 {
240 if (!ofw_bus_status_okay(dev))
241 return (ENXIO);
242
243 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
244 return (ENXIO);
245
246 device_set_desc(dev, "RPi4 Gigabit Ethernet");
247 return (BUS_PROBE_DEFAULT);
248 }
249
250 static int
251 gen_attach(device_t dev)
252 {
253 struct ether_addr eaddr;
254 struct gen_softc *sc;
255 int major, minor, error, mii_flags;
256 bool eaddr_found;
257
258 sc = device_get_softc(dev);
259 sc->dev = dev;
260 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
261
262 if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) {
263 device_printf(dev, "cannot allocate resources for device\n");
264 error = ENXIO;
265 goto fail;
266 }
267
268 major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT;
269 if (major != REV_MAJOR_V5) {
270 device_printf(dev, "version %d is not supported\n", major);
271 error = ENXIO;
272 goto fail;
273 }
274 minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT;
275 device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor,
276 RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY);
277
278 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
279 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
280 TASK_INIT(&sc->link_task, 0, gen_link_task, sc);
281
282 error = gen_get_phy_mode(dev);
283 if (error != 0)
284 goto fail;
285
286 bzero(&eaddr, sizeof(eaddr));
287 eaddr_found = gen_get_eaddr(dev, &eaddr);
288
289 /* reset core */
290 gen_reset(sc);
291
292 gen_dma_disable(sc);
293
294 /* Setup DMA */
295 error = gen_bus_dma_init(sc);
296 if (error != 0) {
297 device_printf(dev, "cannot setup bus dma\n");
298 goto fail;
299 }
300
301 /* Setup ethernet interface */
302 sc->ifp = if_alloc(IFT_ETHER);
303 if_setsoftc(sc->ifp, sc);
304 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
305 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
306 if_setstartfn(sc->ifp, gen_start);
307 if_setioctlfn(sc->ifp, gen_ioctl);
308 if_setinitfn(sc->ifp, gen_init);
309 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
310 if_setsendqready(sc->ifp);
311 #define GEN_CSUM_FEATURES (CSUM_UDP | CSUM_TCP)
312 if_sethwassist(sc->ifp, GEN_CSUM_FEATURES);
313 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
314 IFCAP_HWCSUM_IPV6);
315 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
316
317 /* Install interrupt handlers */
318 error = bus_setup_intr(dev, sc->res[_RES_IRQ1],
319 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih);
320 if (error != 0) {
321 device_printf(dev, "cannot setup interrupt handler1\n");
322 goto fail;
323 }
324
325 error = bus_setup_intr(dev, sc->res[_RES_IRQ2],
326 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2);
327 if (error != 0) {
328 device_printf(dev, "cannot setup interrupt handler2\n");
329 goto fail;
330 }
331
332 /* Attach MII driver */
333 mii_flags = 0;
334 switch (sc->phy_mode)
335 {
336 case MII_CONTYPE_RGMII_ID:
337 mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY;
338 break;
339 case MII_CONTYPE_RGMII_RXID:
340 mii_flags |= MIIF_RX_DELAY;
341 break;
342 case MII_CONTYPE_RGMII_TXID:
343 mii_flags |= MIIF_TX_DELAY;
344 break;
345 default:
346 break;
347 }
348 error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change,
349 gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
350 mii_flags);
351 if (error != 0) {
352 device_printf(dev, "cannot attach PHY\n");
353 goto fail;
354 }
355
356 /* If address was not found, create one based on the hostid and name. */
357 if (eaddr_found == 0)
358 ether_gen_addr(sc->ifp, &eaddr);
359 /* Attach ethernet interface */
360 ether_ifattach(sc->ifp, eaddr.octet);
361
362 fail:
363 if (error)
364 gen_destroy(sc);
365 return (error);
366 }
367
368 /* Free resources after failed attach. This is not a complete detach. */
369 static void
370 gen_destroy(struct gen_softc *sc)
371 {
372
373 if (sc->miibus) { /* can't happen */
374 device_delete_child(sc->dev, sc->miibus);
375 sc->miibus = NULL;
376 }
377 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
378 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
379 gen_bus_dma_teardown(sc);
380 callout_drain(&sc->stat_ch);
381 if (mtx_initialized(&sc->mtx))
382 mtx_destroy(&sc->mtx);
383 bus_release_resources(sc->dev, gen_spec, sc->res);
384 if (sc->ifp != NULL) {
385 if_free(sc->ifp);
386 sc->ifp = NULL;
387 }
388 }
389
390 static int
391 gen_get_phy_mode(device_t dev)
392 {
393 struct gen_softc *sc;
394 phandle_t node;
395 mii_contype_t type;
396 int error = 0;
397
398 sc = device_get_softc(dev);
399 node = ofw_bus_get_node(dev);
400 type = mii_fdt_get_contype(node);
401
402 switch (type) {
403 case MII_CONTYPE_RGMII:
404 case MII_CONTYPE_RGMII_ID:
405 case MII_CONTYPE_RGMII_RXID:
406 case MII_CONTYPE_RGMII_TXID:
407 sc->phy_mode = type;
408 break;
409 default:
410 device_printf(dev, "unknown phy-mode '%s'\n",
411 mii_fdt_contype_to_name(type));
412 error = ENXIO;
413 break;
414 }
415
416 return (error);
417 }
418
419 static bool
420 gen_get_eaddr(device_t dev, struct ether_addr *eaddr)
421 {
422 struct gen_softc *sc;
423 uint32_t maclo, machi, val;
424 phandle_t node;
425
426 sc = device_get_softc(dev);
427
428 node = ofw_bus_get_node(dev);
429 if (OF_getprop(node, "mac-address", eaddr->octet,
430 ETHER_ADDR_LEN) != -1 ||
431 OF_getprop(node, "local-mac-address", eaddr->octet,
432 ETHER_ADDR_LEN) != -1 ||
433 OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1)
434 return (true);
435
436 device_printf(dev, "No Ethernet address found in fdt!\n");
437 maclo = machi = 0;
438
439 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
440 if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
441 maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
442 machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
443 }
444
445 if (maclo == 0 && machi == 0) {
446 if (bootverbose)
447 device_printf(dev,
448 "No Ethernet address found in controller\n");
449 return (false);
450 } else {
451 eaddr->octet[0] = maclo & 0xff;
452 eaddr->octet[1] = (maclo >> 8) & 0xff;
453 eaddr->octet[2] = (maclo >> 16) & 0xff;
454 eaddr->octet[3] = (maclo >> 24) & 0xff;
455 eaddr->octet[4] = machi & 0xff;
456 eaddr->octet[5] = (machi >> 8) & 0xff;
457 return (true);
458 }
459 }
460
461 static void
462 gen_reset(struct gen_softc *sc)
463 {
464 uint32_t val;
465
466 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
467 val |= GENET_SYS_RBUF_FLUSH_RESET;
468 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
469 DELAY(10);
470
471 val &= ~GENET_SYS_RBUF_FLUSH_RESET;
472 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
473 DELAY(10);
474
475 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
476 DELAY(10);
477
478 WR4(sc, GENET_UMAC_CMD, 0);
479 WR4(sc, GENET_UMAC_CMD,
480 GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
481 DELAY(10);
482 WR4(sc, GENET_UMAC_CMD, 0);
483
484 WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
485 GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
486 WR4(sc, GENET_UMAC_MIB_CTRL, 0);
487 }
488
489 static void
490 gen_enable(struct gen_softc *sc)
491 {
492 u_int val;
493
494 WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
495
496 val = RD4(sc, GENET_RBUF_CTRL);
497 val |= GENET_RBUF_ALIGN_2B;
498 WR4(sc, GENET_RBUF_CTRL, val);
499
500 WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
501
502 /* Enable transmitter and receiver */
503 val = RD4(sc, GENET_UMAC_CMD);
504 val |= GENET_UMAC_CMD_TXEN;
505 val |= GENET_UMAC_CMD_RXEN;
506 WR4(sc, GENET_UMAC_CMD, val);
507
508 /* Enable interrupts */
509 gen_enable_intr(sc);
510 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
511 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
512 }
513
514 static void
515 gen_disable_intr(struct gen_softc *sc)
516 {
517 /* Disable interrupts */
518 WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
519 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 0xffffffff);
520 }
521
522 static void
523 gen_disable(struct gen_softc *sc)
524 {
525 uint32_t val;
526
527 /* Stop receiver */
528 val = RD4(sc, GENET_UMAC_CMD);
529 val &= ~GENET_UMAC_CMD_RXEN;
530 WR4(sc, GENET_UMAC_CMD, val);
531
532 /* Stop transmitter */
533 val = RD4(sc, GENET_UMAC_CMD);
534 val &= ~GENET_UMAC_CMD_TXEN;
535 WR4(sc, GENET_UMAC_CMD, val);
536
537 /* Disable Interrupt */
538 gen_disable_intr(sc);
539 }
540
541 static void
542 gen_enable_offload(struct gen_softc *sc)
543 {
544 uint32_t check_ctrl, buf_ctrl;
545
546 check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL);
547 buf_ctrl = RD4(sc, GENET_RBUF_CTRL);
548 if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) {
549 check_ctrl |= GENET_RBUF_CHECK_CTRL_EN;
550 buf_ctrl |= GENET_RBUF_64B_EN;
551 } else {
552 check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN;
553 buf_ctrl &= ~GENET_RBUF_64B_EN;
554 }
555 WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl);
556 WR4(sc, GENET_RBUF_CTRL, buf_ctrl);
557
558 buf_ctrl = RD4(sc, GENET_TBUF_CTRL);
559 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
560 0)
561 buf_ctrl |= GENET_RBUF_64B_EN;
562 else
563 buf_ctrl &= ~GENET_RBUF_64B_EN;
564 WR4(sc, GENET_TBUF_CTRL, buf_ctrl);
565 }
566
567 static void
568 gen_dma_disable(struct gen_softc *sc)
569 {
570 int val;
571
572 val = RD4(sc, GENET_TX_DMA_CTRL);
573 val &= ~GENET_TX_DMA_CTRL_EN;
574 val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
575 WR4(sc, GENET_TX_DMA_CTRL, val);
576
577 val = RD4(sc, GENET_RX_DMA_CTRL);
578 val &= ~GENET_RX_DMA_CTRL_EN;
579 val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
580 WR4(sc, GENET_RX_DMA_CTRL, val);
581 }
582
583 static int
584 gen_bus_dma_init(struct gen_softc *sc)
585 {
586 device_t dev = sc->dev;
587 int i, error;
588
589 error = bus_dma_tag_create(
590 bus_get_dma_tag(dev), /* Parent tag */
591 4, 0, /* alignment, boundary */
592 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */
593 BUS_SPACE_MAXADDR, /* highaddr */
594 NULL, NULL, /* filter, filterarg */
595 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */
596 MCLBYTES, /* maxsegsize */
597 0, /* flags */
598 NULL, NULL, /* lockfunc, lockarg */
599 &sc->tx_buf_tag);
600 if (error != 0) {
601 device_printf(dev, "cannot create TX buffer tag\n");
602 return (error);
603 }
604
605 for (i = 0; i < TX_DESC_COUNT; i++) {
606 error = bus_dmamap_create(sc->tx_buf_tag, 0,
607 &sc->tx_ring_ent[i].map);
608 if (error != 0) {
609 device_printf(dev, "cannot create TX buffer map\n");
610 return (error);
611 }
612 }
613
614 error = bus_dma_tag_create(
615 bus_get_dma_tag(dev), /* Parent tag */
616 4, 0, /* alignment, boundary */
617 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */
618 BUS_SPACE_MAXADDR, /* highaddr */
619 NULL, NULL, /* filter, filterarg */
620 MCLBYTES, 1, /* maxsize, nsegs */
621 MCLBYTES, /* maxsegsize */
622 0, /* flags */
623 NULL, NULL, /* lockfunc, lockarg */
624 &sc->rx_buf_tag);
625 if (error != 0) {
626 device_printf(dev, "cannot create RX buffer tag\n");
627 return (error);
628 }
629
630 for (i = 0; i < RX_DESC_COUNT; i++) {
631 error = bus_dmamap_create(sc->rx_buf_tag, 0,
632 &sc->rx_ring_ent[i].map);
633 if (error != 0) {
634 device_printf(dev, "cannot create RX buffer map\n");
635 return (error);
636 }
637 }
638 return (0);
639 }
640
641 static void
642 gen_bus_dma_teardown(struct gen_softc *sc)
643 {
644 int i, error;
645
646 if (sc->tx_buf_tag != NULL) {
647 for (i = 0; i < TX_DESC_COUNT; i++) {
648 error = bus_dmamap_destroy(sc->tx_buf_tag,
649 sc->tx_ring_ent[i].map);
650 sc->tx_ring_ent[i].map = NULL;
651 if (error)
652 device_printf(sc->dev,
653 "%s: bus_dmamap_destroy failed: %d\n",
654 __func__, error);
655 }
656 error = bus_dma_tag_destroy(sc->tx_buf_tag);
657 sc->tx_buf_tag = NULL;
658 if (error)
659 device_printf(sc->dev,
660 "%s: bus_dma_tag_destroy failed: %d\n", __func__,
661 error);
662 }
663
664 if (sc->tx_buf_tag != NULL) {
665 for (i = 0; i < RX_DESC_COUNT; i++) {
666 error = bus_dmamap_destroy(sc->rx_buf_tag,
667 sc->rx_ring_ent[i].map);
668 sc->rx_ring_ent[i].map = NULL;
669 if (error)
670 device_printf(sc->dev,
671 "%s: bus_dmamap_destroy failed: %d\n",
672 __func__, error);
673 }
674 error = bus_dma_tag_destroy(sc->rx_buf_tag);
675 sc->rx_buf_tag = NULL;
676 if (error)
677 device_printf(sc->dev,
678 "%s: bus_dma_tag_destroy failed: %d\n", __func__,
679 error);
680 }
681 }
682
683 static void
684 gen_enable_intr(struct gen_softc *sc)
685 {
686
687 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
688 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
689 }
690
691 /*
692 * "queue" is the software queue index (0-4); "qid" is the hardware index
693 * (0-16). "base" is the starting index in the ring array.
694 */
695 static void
696 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
697 int nentries)
698 {
699 struct tx_queue *q;
700 uint32_t val;
701
702 q = &sc->tx_queue[queue];
703 q->entries = &sc->tx_ring_ent[base];
704 q->hwindex = qid;
705 q->nentries = nentries;
706
707 /* TX ring */
708
709 q->queued = 0;
710 q->cons_idx = q->prod_idx = 0;
711
712 WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
713
714 WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
715 WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
716 WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
717 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
718 WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
719 (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
720 (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
721 WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
722 WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
723 WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
724 TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
725 WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
726 WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
727 WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
728 WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
729 WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
730
731 WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */
732
733 /* Enable transmit DMA */
734 val = RD4(sc, GENET_TX_DMA_CTRL);
735 val |= GENET_TX_DMA_CTRL_EN;
736 val |= GENET_TX_DMA_CTRL_RBUF_EN(qid);
737 WR4(sc, GENET_TX_DMA_CTRL, val);
738 }
739
740 /*
741 * "queue" is the software queue index (0-4); "qid" is the hardware index
742 * (0-16). "base" is the starting index in the ring array.
743 */
744 static void
745 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
746 int nentries)
747 {
748 struct rx_queue *q;
749 uint32_t val;
750 int i;
751
752 q = &sc->rx_queue[queue];
753 q->entries = &sc->rx_ring_ent[base];
754 q->hwindex = qid;
755 q->nentries = nentries;
756 q->cons_idx = q->prod_idx = 0;
757
758 WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
759
760 WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
761 WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
762 WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
763 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
764 WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
765 (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
766 (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
767 WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
768 WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
769 WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
770 RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
771 WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
772 WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
773 (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4));
774 WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
775 WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
776
777 WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */
778
779 /* fill ring */
780 for (i = 0; i < RX_DESC_COUNT; i++)
781 gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i);
782
783 /* Enable receive DMA */
784 val = RD4(sc, GENET_RX_DMA_CTRL);
785 val |= GENET_RX_DMA_CTRL_EN;
786 val |= GENET_RX_DMA_CTRL_RBUF_EN(qid);
787 WR4(sc, GENET_RX_DMA_CTRL, val);
788 }
789
790 static void
791 gen_init_txrings(struct gen_softc *sc)
792 {
793 int base = 0;
794 #ifdef PRI_RINGS
795 int i;
796
797 /* init priority rings */
798 for (i = 0; i < PRI_RINGS; i++) {
799 gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT);
800 sc->tx_queue[i].queue = i;
801 base += TX_DESC_PRICOUNT;
802 dma_ring_conf |= 1 << i;
803 dma_control |= DMA_RENABLE(i);
804 }
805 #endif
806
807 /* init GENET_DMA_DEFAULT_QUEUE (16) */
808 gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
809 TX_DESC_COUNT);
810 sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
811 }
812
813 static void
814 gen_init_rxrings(struct gen_softc *sc)
815 {
816 int base = 0;
817 #ifdef PRI_RINGS
818 int i;
819
820 /* init priority rings */
821 for (i = 0; i < PRI_RINGS; i++) {
822 gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT);
823 sc->rx_queue[i].queue = i;
824 base += TX_DESC_PRICOUNT;
825 dma_ring_conf |= 1 << i;
826 dma_control |= DMA_RENABLE(i);
827 }
828 #endif
829
830 /* init GENET_DMA_DEFAULT_QUEUE (16) */
831 gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
832 RX_DESC_COUNT);
833 sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
834
835 }
836
837 static void
838 gen_stop(struct gen_softc *sc)
839 {
840 int i;
841 struct gen_ring_ent *ent;
842
843 GEN_ASSERT_LOCKED(sc);
844
845 callout_stop(&sc->stat_ch);
846 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
847 gen_reset(sc);
848 gen_disable(sc);
849 gen_dma_disable(sc);
850
851 /* Clear the tx/rx ring buffer */
852 for (i = 0; i < TX_DESC_COUNT; i++) {
853 ent = &sc->tx_ring_ent[i];
854 if (ent->mbuf != NULL) {
855 bus_dmamap_sync(sc->tx_buf_tag, ent->map,
856 BUS_DMASYNC_POSTWRITE);
857 bus_dmamap_unload(sc->tx_buf_tag, ent->map);
858 m_freem(ent->mbuf);
859 ent->mbuf = NULL;
860 }
861 }
862
863 for (i = 0; i < RX_DESC_COUNT; i++) {
864 ent = &sc->rx_ring_ent[i];
865 if (ent->mbuf != NULL) {
866 bus_dmamap_sync(sc->rx_buf_tag, ent->map,
867 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
868 bus_dmamap_unload(sc->rx_buf_tag, ent->map);
869 m_freem(ent->mbuf);
870 ent->mbuf = NULL;
871 }
872 }
873 }
874
875 static void
876 gen_init_locked(struct gen_softc *sc)
877 {
878 struct mii_data *mii;
879 if_t ifp;
880
881 mii = device_get_softc(sc->miibus);
882 ifp = sc->ifp;
883
884 GEN_ASSERT_LOCKED(sc);
885
886 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
887 return;
888
889 switch (sc->phy_mode)
890 {
891 case MII_CONTYPE_RGMII:
892 case MII_CONTYPE_RGMII_ID:
893 case MII_CONTYPE_RGMII_RXID:
894 case MII_CONTYPE_RGMII_TXID:
895 WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY);
896 break;
897 default:
898 WR4(sc, GENET_SYS_PORT_CTRL, 0);
899 }
900
901 gen_set_enaddr(sc);
902
903 /* Setup RX filter */
904 gen_setup_rxfilter(sc);
905
906 gen_init_txrings(sc);
907 gen_init_rxrings(sc);
908 gen_enable(sc);
909 gen_enable_offload(sc);
910
911 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
912
913 mii_mediachg(mii);
914 callout_reset(&sc->stat_ch, hz, gen_tick, sc);
915 }
916
917 static void
918 gen_init(void *softc)
919 {
920 struct gen_softc *sc;
921
922 sc = softc;
923 GEN_LOCK(sc);
924 gen_init_locked(sc);
925 GEN_UNLOCK(sc);
926 }
927
928 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
929
930 static void
931 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea)
932 {
933 uint32_t addr0 = (ea[0] << 8) | ea[1];
934 uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
935
936 WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
937 WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
938 }
939
940 static u_int
941 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count)
942 {
943 struct gen_softc *sc = arg;
944
945 /* "count + 2" to account for unicast and broadcast */
946 gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl));
947 return (1); /* increment to count */
948 }
949
950 static void
951 gen_setup_rxfilter(struct gen_softc *sc)
952 {
953 struct ifnet *ifp = sc->ifp;
954 uint32_t cmd, mdf_ctrl;
955 u_int n;
956
957 GEN_ASSERT_LOCKED(sc);
958
959 cmd = RD4(sc, GENET_UMAC_CMD);
960
961 /*
962 * Count the required number of hardware filters. We need one
963 * for each multicast address, plus one for our own address and
964 * the broadcast address.
965 */
966 n = if_llmaddr_count(ifp) + 2;
967
968 if (n > GENET_MAX_MDF_FILTER)
969 ifp->if_flags |= IFF_ALLMULTI;
970 else
971 ifp->if_flags &= ~IFF_ALLMULTI;
972
973 if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
974 cmd |= GENET_UMAC_CMD_PROMISC;
975 mdf_ctrl = 0;
976 } else {
977 cmd &= ~GENET_UMAC_CMD_PROMISC;
978 gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr);
979 gen_setup_rxfilter_mdf(sc, 1, IF_LLADDR(ifp));
980 (void) if_foreach_llmaddr(ifp, gen_setup_multi, sc);
981 mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1) &~
982 (__BIT(GENET_MAX_MDF_FILTER - n) - 1);
983 }
984
985 WR4(sc, GENET_UMAC_CMD, cmd);
986 WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
987 }
988
989 static void
990 gen_set_enaddr(struct gen_softc *sc)
991 {
992 uint8_t *enaddr;
993 uint32_t val;
994 if_t ifp;
995
996 GEN_ASSERT_LOCKED(sc);
997
998 ifp = sc->ifp;
999
1000 /* Write our unicast address */
1001 enaddr = IF_LLADDR(ifp);
1002 /* Write hardware address */
1003 val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
1004 (enaddr[0] << 24);
1005 WR4(sc, GENET_UMAC_MAC0, val);
1006 val = enaddr[5] | (enaddr[4] << 8);
1007 WR4(sc, GENET_UMAC_MAC1, val);
1008 }
1009
1010 static void
1011 gen_start_locked(struct gen_softc *sc)
1012 {
1013 struct mbuf *m;
1014 if_t ifp;
1015 int err;
1016
1017 GEN_ASSERT_LOCKED(sc);
1018
1019 if (!sc->link)
1020 return;
1021
1022 ifp = sc->ifp;
1023
1024 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1025 IFF_DRV_RUNNING)
1026 return;
1027
1028 while (true) {
1029 m = if_dequeue(ifp);
1030 if (m == NULL)
1031 break;
1032
1033 err = gen_encap(sc, &m);
1034 if (err != 0) {
1035 if (err == ENOBUFS)
1036 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1037 else if (m == NULL)
1038 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1039 if (m != NULL)
1040 if_sendq_prepend(ifp, m);
1041 break;
1042 }
1043 if_bpfmtap(ifp, m);
1044 }
1045 }
1046
1047 static void
1048 gen_start(if_t ifp)
1049 {
1050 struct gen_softc *sc;
1051
1052 sc = if_getsoftc(ifp);
1053
1054 GEN_LOCK(sc);
1055 gen_start_locked(sc);
1056 GEN_UNLOCK(sc);
1057 }
1058
1059 /* Test for any delayed checksum */
1060 #define CSUM_DELAY_ANY (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)
1061
1062 static int
1063 gen_encap(struct gen_softc *sc, struct mbuf **mp)
1064 {
1065 bus_dmamap_t map;
1066 bus_dma_segment_t segs[TX_MAX_SEGS];
1067 int error, nsegs, cur, first, i, index, offset;
1068 uint32_t csuminfo, length_status, csum_flags = 0, csumdata;
1069 struct mbuf *m;
1070 struct statusblock *sb = NULL;
1071 struct tx_queue *q;
1072 struct gen_ring_ent *ent;
1073
1074 GEN_ASSERT_LOCKED(sc);
1075
1076 q = &sc->tx_queue[DEF_TXQUEUE];
1077
1078 m = *mp;
1079
1080 /*
1081 * Don't attempt to send packets with only an Ethernet header in
1082 * first mbuf; see comment above with gen_tx_hdr_min.
1083 */
1084 if (m->m_len == sizeof(struct ether_header)) {
1085 m = m_pullup(m, MIN(m->m_pkthdr.len, gen_tx_hdr_min));
1086 if (m == NULL) {
1087 if (sc->ifp->if_flags & IFF_DEBUG)
1088 device_printf(sc->dev,
1089 "header pullup fail\n");
1090 *mp = NULL;
1091 return (ENOMEM);
1092 }
1093 }
1094
1095 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
1096 0) {
1097 csum_flags = m->m_pkthdr.csum_flags;
1098 csumdata = m->m_pkthdr.csum_data;
1099 M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT);
1100 if (m == NULL) {
1101 if (sc->ifp->if_flags & IFF_DEBUG)
1102 device_printf(sc->dev, "prepend fail\n");
1103 *mp = NULL;
1104 return (ENOMEM);
1105 }
1106 offset = gen_parse_tx(m, csum_flags);
1107 sb = mtod(m, struct statusblock *);
1108 if ((csum_flags & CSUM_DELAY_ANY) != 0) {
1109 csuminfo = (offset << TXCSUM_OFF_SHIFT) |
1110 (offset + csumdata);
1111 csuminfo |= TXCSUM_LEN_VALID;
1112 if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP))
1113 csuminfo |= TXCSUM_UDP;
1114 sb->txcsuminfo = csuminfo;
1115 } else
1116 sb->txcsuminfo = 0;
1117 }
1118
1119 *mp = m;
1120
1121 cur = first = q->cur;
1122 ent = &q->entries[cur];
1123 map = ent->map;
1124 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs,
1125 &nsegs, BUS_DMA_NOWAIT);
1126 if (error == EFBIG) {
1127 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
1128 if (m == NULL) {
1129 device_printf(sc->dev,
1130 "gen_encap: m_collapse failed\n");
1131 m_freem(*mp);
1132 *mp = NULL;
1133 return (ENOMEM);
1134 }
1135 *mp = m;
1136 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m,
1137 segs, &nsegs, BUS_DMA_NOWAIT);
1138 if (error != 0) {
1139 m_freem(*mp);
1140 *mp = NULL;
1141 }
1142 }
1143 if (error != 0) {
1144 device_printf(sc->dev,
1145 "gen_encap: bus_dmamap_load_mbuf_sg failed\n");
1146 return (error);
1147 }
1148 if (nsegs == 0) {
1149 m_freem(*mp);
1150 *mp = NULL;
1151 return (EIO);
1152 }
1153
1154 /* Remove statusblock after mapping, before possible requeue or bpf. */
1155 if (sb != NULL) {
1156 m->m_data += sizeof(struct statusblock);
1157 m->m_len -= sizeof(struct statusblock);
1158 m->m_pkthdr.len -= sizeof(struct statusblock);
1159 }
1160 if (q->queued + nsegs > q->nentries) {
1161 bus_dmamap_unload(sc->tx_buf_tag, map);
1162 return (ENOBUFS);
1163 }
1164
1165 bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE);
1166
1167 index = q->prod_idx & (q->nentries - 1);
1168 for (i = 0; i < nsegs; i++) {
1169 ent = &q->entries[cur];
1170 length_status = GENET_TX_DESC_STATUS_QTAG_MASK;
1171 if (i == 0) {
1172 length_status |= GENET_TX_DESC_STATUS_SOP |
1173 GENET_TX_DESC_STATUS_CRC;
1174 if ((csum_flags & CSUM_DELAY_ANY) != 0)
1175 length_status |= GENET_TX_DESC_STATUS_CKSUM;
1176 }
1177 if (i == nsegs - 1)
1178 length_status |= GENET_TX_DESC_STATUS_EOP;
1179
1180 length_status |= segs[i].ds_len <<
1181 GENET_TX_DESC_STATUS_BUFLEN_SHIFT;
1182
1183 WR4(sc, GENET_TX_DESC_ADDRESS_LO(index),
1184 (uint32_t)segs[i].ds_addr);
1185 WR4(sc, GENET_TX_DESC_ADDRESS_HI(index),
1186 (uint32_t)(segs[i].ds_addr >> 32));
1187 WR4(sc, GENET_TX_DESC_STATUS(index), length_status);
1188
1189 ++q->queued;
1190 cur = TX_NEXT(cur, q->nentries);
1191 index = TX_NEXT(index, q->nentries);
1192 }
1193
1194 q->prod_idx += nsegs;
1195 q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK;
1196 /* We probably don't need to write the producer index on every iter */
1197 if (nsegs != 0)
1198 WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx);
1199 q->cur = cur;
1200
1201 /* Store mbuf in the last segment */
1202 q->entries[first].mbuf = m;
1203
1204 return (0);
1205 }
1206
1207 /*
1208 * Parse a packet to find the offset of the transport header for checksum
1209 * offload. Ensure that the link and network headers are contiguous with
1210 * the status block, or transmission fails.
1211 */
1212 static int
1213 gen_parse_tx(struct mbuf *m, int csum_flags)
1214 {
1215 int offset, off_in_m;
1216 bool copy = false, shift = false;
1217 u_char *p, *copy_p = NULL;
1218 struct mbuf *m0 = m;
1219 uint16_t ether_type;
1220
1221 if (m->m_len == sizeof(struct statusblock)) {
1222 /* M_PREPEND placed statusblock at end; move to beginning */
1223 m->m_data = m->m_pktdat;
1224 copy_p = mtodo(m, sizeof(struct statusblock));
1225 m = m->m_next;
1226 off_in_m = 0;
1227 p = mtod(m, u_char *);
1228 copy = true;
1229 } else {
1230 /*
1231 * If statusblock is not at beginning of mbuf (likely),
1232 * then remember to move mbuf contents down before copying
1233 * after them.
1234 */
1235 if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat)
1236 shift = true;
1237 p = mtodo(m, sizeof(struct statusblock));
1238 off_in_m = sizeof(struct statusblock);
1239 }
1240
1241 /*
1242 * If headers need to be copied contiguous to statusblock, do so.
1243 * If copying to the internal mbuf data area, and the status block
1244 * is not at the beginning of that area, shift the status block (which
1245 * is empty) and following data.
1246 */
1247 #define COPY(size) { \
1248 int hsize = size; \
1249 if (copy) { \
1250 if (shift) { \
1251 u_char *p0; \
1252 shift = false; \
1253 p0 = mtodo(m0, sizeof(struct statusblock)); \
1254 m0->m_data = m0->m_pktdat; \
1255 bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\
1256 m0->m_len - sizeof(struct statusblock)); \
1257 copy_p = mtodo(m0, m0->m_len); \
1258 } \
1259 bcopy(p, copy_p, hsize); \
1260 m0->m_len += hsize; \
1261 m->m_len -= hsize; \
1262 m->m_data += hsize; \
1263 } \
1264 copy_p += hsize; \
1265 }
1266
1267 KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) +
1268 sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__));
1269
1270 if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) {
1271 offset = sizeof(struct ether_vlan_header);
1272 ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto);
1273 COPY(sizeof(struct ether_vlan_header));
1274 if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) {
1275 m = m->m_next;
1276 off_in_m = 0;
1277 p = mtod(m, u_char *);
1278 copy = true;
1279 } else {
1280 off_in_m += sizeof(struct ether_vlan_header);
1281 p += sizeof(struct ether_vlan_header);
1282 }
1283 } else {
1284 offset = sizeof(struct ether_header);
1285 ether_type = ntohs(((struct ether_header *)p)->ether_type);
1286 COPY(sizeof(struct ether_header));
1287 if (m->m_len == off_in_m + sizeof(struct ether_header)) {
1288 m = m->m_next;
1289 off_in_m = 0;
1290 p = mtod(m, u_char *);
1291 copy = true;
1292 } else {
1293 off_in_m += sizeof(struct ether_header);
1294 p += sizeof(struct ether_header);
1295 }
1296 }
1297 if (ether_type == ETHERTYPE_IP) {
1298 COPY(((struct ip *)p)->ip_hl << 2);
1299 offset += ((struct ip *)p)->ip_hl << 2;
1300 } else if (ether_type == ETHERTYPE_IPV6) {
1301 COPY(sizeof(struct ip6_hdr));
1302 offset += sizeof(struct ip6_hdr);
1303 } else {
1304 /*
1305 * Unknown whether most other cases require moving a header;
1306 * ARP works without. However, Wake On LAN packets sent
1307 * by wake(8) via BPF need something like this.
1308 */
1309 COPY(MIN(gen_tx_hdr_min, m->m_len));
1310 offset += MIN(gen_tx_hdr_min, m->m_len);
1311 }
1312 return (offset);
1313 #undef COPY
1314 }
1315
1316 static void
1317 gen_intr(void *arg)
1318 {
1319 struct gen_softc *sc = arg;
1320 uint32_t val;
1321
1322 GEN_LOCK(sc);
1323
1324 val = RD4(sc, GENET_INTRL2_CPU_STAT);
1325 val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
1326 WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
1327
1328 if (val & GENET_IRQ_RXDMA_DONE)
1329 gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]);
1330
1331 if (val & GENET_IRQ_TXDMA_DONE) {
1332 gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
1333 if (!if_sendq_empty(sc->ifp))
1334 gen_start_locked(sc);
1335 }
1336
1337 GEN_UNLOCK(sc);
1338 }
1339
1340 static int
1341 gen_rxintr(struct gen_softc *sc, struct rx_queue *q)
1342 {
1343 if_t ifp;
1344 struct mbuf *m, *mh, *mt;
1345 struct statusblock *sb = NULL;
1346 int error, index, len, cnt, npkt, n;
1347 uint32_t status, prod_idx, total;
1348
1349 ifp = sc->ifp;
1350 mh = mt = NULL;
1351 cnt = 0;
1352 npkt = 0;
1353
1354 prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) &
1355 GENET_RX_DMA_PROD_CONS_MASK;
1356 total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK;
1357
1358 index = q->cons_idx & (RX_DESC_COUNT - 1);
1359 for (n = 0; n < total; n++) {
1360 bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map,
1361 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1362 bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map);
1363
1364 m = q->entries[index].mbuf;
1365
1366 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1367 sb = mtod(m, struct statusblock *);
1368 status = sb->status_buflen;
1369 } else
1370 status = RD4(sc, GENET_RX_DESC_STATUS(index));
1371
1372 len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >>
1373 GENET_RX_DESC_STATUS_BUFLEN_SHIFT;
1374
1375 /* check for errors */
1376 if ((status &
1377 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP |
1378 GENET_RX_DESC_STATUS_RX_ERROR)) !=
1379 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) {
1380 if (ifp->if_flags & IFF_DEBUG)
1381 device_printf(sc->dev,
1382 "error/frag %x csum %x\n", status,
1383 sb->rxcsum);
1384 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1385 continue;
1386 }
1387
1388 error = gen_newbuf_rx(sc, q, index);
1389 if (error != 0) {
1390 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1391 if (ifp->if_flags & IFF_DEBUG)
1392 device_printf(sc->dev, "gen_newbuf_rx %d\n",
1393 error);
1394 /* reuse previous mbuf */
1395 (void) gen_mapbuf_rx(sc, q, index, m);
1396 continue;
1397 }
1398
1399 if (sb != NULL) {
1400 if (status & GENET_RX_DESC_STATUS_CKSUM_OK) {
1401 /* L4 checksum checked; not sure about L3. */
1402 m->m_pkthdr.csum_flags = CSUM_DATA_VALID |
1403 CSUM_PSEUDO_HDR;
1404 m->m_pkthdr.csum_data = 0xffff;
1405 }
1406 m->m_data += sizeof(struct statusblock);
1407 m->m_len -= sizeof(struct statusblock);
1408 len -= sizeof(struct statusblock);
1409 }
1410 if (len > ETHER_ALIGN) {
1411 m_adj(m, ETHER_ALIGN);
1412 len -= ETHER_ALIGN;
1413 }
1414
1415 m->m_pkthdr.rcvif = ifp;
1416 m->m_pkthdr.len = len;
1417 m->m_len = len;
1418 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1419
1420 m->m_nextpkt = NULL;
1421 if (mh == NULL)
1422 mh = m;
1423 else
1424 mt->m_nextpkt = m;
1425 mt = m;
1426 ++cnt;
1427 ++npkt;
1428
1429 index = RX_NEXT(index, q->nentries);
1430
1431 q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK;
1432 WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx);
1433
1434 if (cnt == gen_rx_batch) {
1435 GEN_UNLOCK(sc);
1436 if_input(ifp, mh);
1437 GEN_LOCK(sc);
1438 mh = mt = NULL;
1439 cnt = 0;
1440 }
1441 }
1442
1443 if (mh != NULL) {
1444 GEN_UNLOCK(sc);
1445 if_input(ifp, mh);
1446 GEN_LOCK(sc);
1447 }
1448
1449 return (npkt);
1450 }
1451
1452 static void
1453 gen_txintr(struct gen_softc *sc, struct tx_queue *q)
1454 {
1455 uint32_t cons_idx, total;
1456 struct gen_ring_ent *ent;
1457 if_t ifp;
1458 int i, prog;
1459
1460 GEN_ASSERT_LOCKED(sc);
1461
1462 ifp = sc->ifp;
1463
1464 cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) &
1465 GENET_TX_DMA_PROD_CONS_MASK;
1466 total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK;
1467
1468 prog = 0;
1469 for (i = q->next; q->queued > 0 && total > 0;
1470 i = TX_NEXT(i, q->nentries), total--) {
1471 /* XXX check for errors */
1472
1473 ent = &q->entries[i];
1474 if (ent->mbuf != NULL) {
1475 bus_dmamap_sync(sc->tx_buf_tag, ent->map,
1476 BUS_DMASYNC_POSTWRITE);
1477 bus_dmamap_unload(sc->tx_buf_tag, ent->map);
1478 m_freem(ent->mbuf);
1479 ent->mbuf = NULL;
1480 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1481 }
1482
1483 prog++;
1484 --q->queued;
1485 }
1486
1487 if (prog > 0) {
1488 q->next = i;
1489 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1490 }
1491
1492 q->cons_idx = cons_idx;
1493 }
1494
1495 static void
1496 gen_intr2(void *arg)
1497 {
1498 struct gen_softc *sc = arg;
1499
1500 device_printf(sc->dev, "gen_intr2\n");
1501 }
1502
1503 static int
1504 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index)
1505 {
1506 struct mbuf *m;
1507
1508 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1509 if (m == NULL)
1510 return (ENOBUFS);
1511
1512 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
1513 m_adj(m, ETHER_ALIGN);
1514
1515 return (gen_mapbuf_rx(sc, q, index, m));
1516 }
1517
1518 static int
1519 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
1520 struct mbuf *m)
1521 {
1522 bus_dma_segment_t seg;
1523 bus_dmamap_t map;
1524 int nsegs;
1525
1526 map = q->entries[index].map;
1527 if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs,
1528 BUS_DMA_NOWAIT) != 0) {
1529 m_freem(m);
1530 return (ENOBUFS);
1531 }
1532
1533 bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD);
1534
1535 q->entries[index].mbuf = m;
1536 WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr);
1537 WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32));
1538
1539 return (0);
1540 }
1541
1542 static int
1543 gen_ioctl(if_t ifp, u_long cmd, caddr_t data)
1544 {
1545 struct gen_softc *sc;
1546 struct mii_data *mii;
1547 struct ifreq *ifr;
1548 int flags, enable, error;
1549
1550 sc = if_getsoftc(ifp);
1551 mii = device_get_softc(sc->miibus);
1552 ifr = (struct ifreq *)data;
1553 error = 0;
1554
1555 switch (cmd) {
1556 case SIOCSIFFLAGS:
1557 GEN_LOCK(sc);
1558 if (if_getflags(ifp) & IFF_UP) {
1559 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1560 flags = if_getflags(ifp) ^ sc->if_flags;
1561 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
1562 gen_setup_rxfilter(sc);
1563 } else
1564 gen_init_locked(sc);
1565 } else {
1566 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1567 gen_stop(sc);
1568 }
1569 sc->if_flags = if_getflags(ifp);
1570 GEN_UNLOCK(sc);
1571 break;
1572
1573 case SIOCADDMULTI:
1574 case SIOCDELMULTI:
1575 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1576 GEN_LOCK(sc);
1577 gen_setup_rxfilter(sc);
1578 GEN_UNLOCK(sc);
1579 }
1580 break;
1581
1582 case SIOCSIFMEDIA:
1583 case SIOCGIFMEDIA:
1584 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1585 break;
1586
1587 case SIOCSIFCAP:
1588 enable = if_getcapenable(ifp);
1589 flags = ifr->ifr_reqcap ^ enable;
1590 if (flags & IFCAP_RXCSUM)
1591 enable ^= IFCAP_RXCSUM;
1592 if (flags & IFCAP_RXCSUM_IPV6)
1593 enable ^= IFCAP_RXCSUM_IPV6;
1594 if (flags & IFCAP_TXCSUM)
1595 enable ^= IFCAP_TXCSUM;
1596 if (flags & IFCAP_TXCSUM_IPV6)
1597 enable ^= IFCAP_TXCSUM_IPV6;
1598 if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6))
1599 if_sethwassist(ifp, GEN_CSUM_FEATURES);
1600 else
1601 if_sethwassist(ifp, 0);
1602 if_setcapenable(ifp, enable);
1603 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1604 gen_enable_offload(sc);
1605 break;
1606
1607 default:
1608 error = ether_ioctl(ifp, cmd, data);
1609 break;
1610 }
1611 return (error);
1612 }
1613
1614 static void
1615 gen_tick(void *softc)
1616 {
1617 struct gen_softc *sc;
1618 struct mii_data *mii;
1619 if_t ifp;
1620 int link;
1621
1622 sc = softc;
1623 ifp = sc->ifp;
1624 mii = device_get_softc(sc->miibus);
1625
1626 GEN_ASSERT_LOCKED(sc);
1627
1628 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1629 return;
1630
1631 link = sc->link;
1632 mii_tick(mii);
1633 if (sc->link && !link)
1634 gen_start_locked(sc);
1635
1636 callout_reset(&sc->stat_ch, hz, gen_tick, sc);
1637 }
1638
1639 #define MII_BUSY_RETRY 1000
1640
1641 static int
1642 gen_miibus_readreg(device_t dev, int phy, int reg)
1643 {
1644 struct gen_softc *sc;
1645 int retry, val;
1646
1647 sc = device_get_softc(dev);
1648 val = 0;
1649
1650 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ |
1651 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT));
1652 val = RD4(sc, GENET_MDIO_CMD);
1653 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1654 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1655 if (((val = RD4(sc, GENET_MDIO_CMD)) &
1656 GENET_MDIO_START_BUSY) == 0) {
1657 if (val & GENET_MDIO_READ_FAILED)
1658 return (0); /* -1? */
1659 val &= GENET_MDIO_VAL_MASK;
1660 break;
1661 }
1662 DELAY(10);
1663 }
1664
1665 if (retry == 0)
1666 device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
1667 phy, reg);
1668
1669 return (val);
1670 }
1671
1672 static int
1673 gen_miibus_writereg(device_t dev, int phy, int reg, int val)
1674 {
1675 struct gen_softc *sc;
1676 int retry;
1677
1678 sc = device_get_softc(dev);
1679
1680 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE |
1681 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) |
1682 (val & GENET_MDIO_VAL_MASK));
1683 val = RD4(sc, GENET_MDIO_CMD);
1684 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1685 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1686 val = RD4(sc, GENET_MDIO_CMD);
1687 if ((val & GENET_MDIO_START_BUSY) == 0)
1688 break;
1689 DELAY(10);
1690 }
1691 if (retry == 0)
1692 device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
1693 phy, reg);
1694
1695 return (0);
1696 }
1697
1698 static void
1699 gen_update_link_locked(struct gen_softc *sc)
1700 {
1701 struct mii_data *mii;
1702 uint32_t val;
1703 u_int speed;
1704
1705 GEN_ASSERT_LOCKED(sc);
1706
1707 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
1708 return;
1709 mii = device_get_softc(sc->miibus);
1710
1711 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1712 (IFM_ACTIVE | IFM_AVALID)) {
1713 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1714 case IFM_1000_T:
1715 case IFM_1000_SX:
1716 speed = GENET_UMAC_CMD_SPEED_1000;
1717 sc->link = 1;
1718 break;
1719 case IFM_100_TX:
1720 speed = GENET_UMAC_CMD_SPEED_100;
1721 sc->link = 1;
1722 break;
1723 case IFM_10_T:
1724 speed = GENET_UMAC_CMD_SPEED_10;
1725 sc->link = 1;
1726 break;
1727 default:
1728 sc->link = 0;
1729 break;
1730 }
1731 } else
1732 sc->link = 0;
1733
1734 if (sc->link == 0)
1735 return;
1736
1737 val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
1738 val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
1739 val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
1740 val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
1741 if (sc->phy_mode == MII_CONTYPE_RGMII)
1742 val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1743 else
1744 val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1745 WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
1746
1747 val = RD4(sc, GENET_UMAC_CMD);
1748 val &= ~GENET_UMAC_CMD_SPEED;
1749 val |= speed;
1750 WR4(sc, GENET_UMAC_CMD, val);
1751 }
1752
1753 static void
1754 gen_link_task(void *arg, int pending)
1755 {
1756 struct gen_softc *sc;
1757
1758 sc = arg;
1759
1760 GEN_LOCK(sc);
1761 gen_update_link_locked(sc);
1762 GEN_UNLOCK(sc);
1763 }
1764
1765 static void
1766 gen_miibus_statchg(device_t dev)
1767 {
1768 struct gen_softc *sc;
1769
1770 sc = device_get_softc(dev);
1771
1772 taskqueue_enqueue(taskqueue_swi, &sc->link_task);
1773 }
1774
1775 static void
1776 gen_media_status(if_t ifp, struct ifmediareq *ifmr)
1777 {
1778 struct gen_softc *sc;
1779 struct mii_data *mii;
1780
1781 sc = if_getsoftc(ifp);
1782 mii = device_get_softc(sc->miibus);
1783
1784 GEN_LOCK(sc);
1785 mii_pollstat(mii);
1786 ifmr->ifm_active = mii->mii_media_active;
1787 ifmr->ifm_status = mii->mii_media_status;
1788 GEN_UNLOCK(sc);
1789 }
1790
1791 static int
1792 gen_media_change(if_t ifp)
1793 {
1794 struct gen_softc *sc;
1795 struct mii_data *mii;
1796 int error;
1797
1798 sc = if_getsoftc(ifp);
1799 mii = device_get_softc(sc->miibus);
1800
1801 GEN_LOCK(sc);
1802 error = mii_mediachg(mii);
1803 GEN_UNLOCK(sc);
1804
1805 return (error);
1806 }
1807
1808 static device_method_t gen_methods[] = {
1809 /* Device interface */
1810 DEVMETHOD(device_probe, gen_probe),
1811 DEVMETHOD(device_attach, gen_attach),
1812
1813 /* MII interface */
1814 DEVMETHOD(miibus_readreg, gen_miibus_readreg),
1815 DEVMETHOD(miibus_writereg, gen_miibus_writereg),
1816 DEVMETHOD(miibus_statchg, gen_miibus_statchg),
1817
1818 DEVMETHOD_END
1819 };
1820
1821 static driver_t gen_driver = {
1822 "genet",
1823 gen_methods,
1824 sizeof(struct gen_softc),
1825 };
1826
1827 DRIVER_MODULE(genet, simplebus, gen_driver, 0, 0);
1828 DRIVER_MODULE(miibus, genet, miibus_driver, 0, 0);
1829 MODULE_DEPEND(genet, ether, 1, 1, 1);
1830 MODULE_DEPEND(genet, miibus, 1, 1, 1);
Cache object: 2eaeff09c79b319255bca40bce623c0f
|