1 /*-
2 * Copyright (c) 2009, Oleksandr Tymoshenko
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/8.2/sys/mips/atheros/if_arge.c 215938 2010-11-27 12:26:40Z jchandra $");
30
31 /*
32 * AR71XX gigabit ethernet driver
33 */
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
36 #endif
37
38 #include <sys/param.h>
39 #include <sys/endian.h>
40 #include <sys/systm.h>
41 #include <sys/sockio.h>
42 #include <sys/mbuf.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/module.h>
46 #include <sys/socket.h>
47 #include <sys/taskqueue.h>
48 #include <sys/sysctl.h>
49
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/ethernet.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56
57 #include <net/bpf.h>
58
59 #include <machine/bus.h>
60 #include <machine/cache.h>
61 #include <machine/resource.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65 #include <machine/pmap.h>
66 #include <sys/bus.h>
67 #include <sys/rman.h>
68
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71
72 #include <dev/pci/pcireg.h>
73 #include <dev/pci/pcivar.h>
74
75 MODULE_DEPEND(arge, ether, 1, 1, 1);
76 MODULE_DEPEND(arge, miibus, 1, 1, 1);
77
78 #include "miibus_if.h"
79
80 #include <mips/atheros/ar71xxreg.h>
81 #include <mips/atheros/if_argevar.h>
82 #include <mips/atheros/ar71xx_cpudef.h>
83
84 #undef ARGE_DEBUG
85 #ifdef ARGE_DEBUG
86 #define dprintf printf
87 #else
88 #define dprintf(x, arg...)
89 #endif
90
91 static int arge_attach(device_t);
92 static int arge_detach(device_t);
93 static void arge_flush_ddr(struct arge_softc *);
94 static int arge_ifmedia_upd(struct ifnet *);
95 static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
96 static int arge_ioctl(struct ifnet *, u_long, caddr_t);
97 static void arge_init(void *);
98 static void arge_init_locked(struct arge_softc *);
99 static void arge_link_task(void *, int);
100 static void arge_set_pll(struct arge_softc *, int, int);
101 static int arge_miibus_readreg(device_t, int, int);
102 static void arge_miibus_statchg(device_t);
103 static int arge_miibus_writereg(device_t, int, int, int);
104 static int arge_probe(device_t);
105 static void arge_reset_dma(struct arge_softc *);
106 static int arge_resume(device_t);
107 static int arge_rx_ring_init(struct arge_softc *);
108 static int arge_tx_ring_init(struct arge_softc *);
109 #ifdef DEVICE_POLLING
110 static int arge_poll(struct ifnet *, enum poll_cmd, int);
111 #endif
112 static int arge_shutdown(device_t);
113 static void arge_start(struct ifnet *);
114 static void arge_start_locked(struct ifnet *);
115 static void arge_stop(struct arge_softc *);
116 static int arge_suspend(device_t);
117
118 static int arge_rx_locked(struct arge_softc *);
119 static void arge_tx_locked(struct arge_softc *);
120 static void arge_intr(void *);
121 static int arge_intr_filter(void *);
122 static void arge_tick(void *);
123
124 /*
125 * ifmedia callbacks for multiPHY MAC
126 */
127 void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *);
128 int arge_multiphy_mediachange(struct ifnet *);
129
130 static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
131 static int arge_dma_alloc(struct arge_softc *);
132 static void arge_dma_free(struct arge_softc *);
133 static int arge_newbuf(struct arge_softc *, int);
134 static __inline void arge_fixup_rx(struct mbuf *);
135
136 static device_method_t arge_methods[] = {
137 /* Device interface */
138 DEVMETHOD(device_probe, arge_probe),
139 DEVMETHOD(device_attach, arge_attach),
140 DEVMETHOD(device_detach, arge_detach),
141 DEVMETHOD(device_suspend, arge_suspend),
142 DEVMETHOD(device_resume, arge_resume),
143 DEVMETHOD(device_shutdown, arge_shutdown),
144
145 /* bus interface */
146 DEVMETHOD(bus_print_child, bus_generic_print_child),
147 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
148
149 /* MII interface */
150 DEVMETHOD(miibus_readreg, arge_miibus_readreg),
151 DEVMETHOD(miibus_writereg, arge_miibus_writereg),
152 DEVMETHOD(miibus_statchg, arge_miibus_statchg),
153
154 { 0, 0 }
155 };
156
157 static driver_t arge_driver = {
158 "arge",
159 arge_methods,
160 sizeof(struct arge_softc)
161 };
162
163 static devclass_t arge_devclass;
164
165 DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0);
166 DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0);
167
168 /*
169 * RedBoot passes MAC address to entry point as environment
170 * variable. platfrom_start parses it and stores in this variable
171 */
172 extern uint32_t ar711_base_mac[ETHER_ADDR_LEN];
173
174 static struct mtx miibus_mtx;
175
176 MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF);
177
178
179 /*
180 * Flushes all
181 */
182 static void
183 arge_flush_ddr(struct arge_softc *sc)
184 {
185 if (sc->arge_mac_unit == 0)
186 ar71xx_device_flush_ddr_ge0();
187 else
188 ar71xx_device_flush_ddr_ge1();
189 }
190
191 static int
192 arge_probe(device_t dev)
193 {
194
195 device_set_desc(dev, "Atheros AR71xx built-in ethernet interface");
196 return (0);
197 }
198
199 static void
200 arge_attach_sysctl(device_t dev)
201 {
202 struct arge_softc *sc = device_get_softc(dev);
203 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
204 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
205
206 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
207 "debug", CTLFLAG_RW, &sc->arge_debug, 0,
208 "arge interface debugging flags");
209
210 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
211 "tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0,
212 "number of TX aligned packets");
213
214 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
215 "tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned, 0,
216 "number of TX unaligned packets");
217 }
218
219 static int
220 arge_attach(device_t dev)
221 {
222 uint8_t eaddr[ETHER_ADDR_LEN];
223 struct ifnet *ifp;
224 struct arge_softc *sc;
225 int error = 0, rid, phymask;
226 uint32_t reg, rnd;
227 int is_base_mac_empty, i, phys_total;
228 uint32_t hint;
229
230 sc = device_get_softc(dev);
231 sc->arge_dev = dev;
232 sc->arge_mac_unit = device_get_unit(dev);
233
234 KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)),
235 ("if_arge: Only MAC0 and MAC1 supported"));
236
237 /*
238 * Get which PHY of 5 available we should use for this unit
239 */
240 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
241 "phymask", &phymask) != 0) {
242 /*
243 * Use port 4 (WAN) for GE0. For any other port use
244 * its PHY the same as its unit number
245 */
246 if (sc->arge_mac_unit == 0)
247 phymask = (1 << 4);
248 else
249 /* Use all phys up to 4 */
250 phymask = (1 << 4) - 1;
251
252 device_printf(dev, "No PHY specified, using mask %d\n", phymask);
253 }
254
255 /*
256 * Get default media & duplex mode, by default its Base100T
257 * and full duplex
258 */
259 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
260 "media", &hint) != 0)
261 hint = 0;
262
263 if (hint == 1000)
264 sc->arge_media_type = IFM_1000_T;
265 else
266 sc->arge_media_type = IFM_100_TX;
267
268 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
269 "fduplex", &hint) != 0)
270 hint = 1;
271
272 if (hint)
273 sc->arge_duplex_mode = IFM_FDX;
274 else
275 sc->arge_duplex_mode = 0;
276
277 sc->arge_phymask = phymask;
278
279 mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
280 MTX_DEF);
281 callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0);
282 TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc);
283
284 /* Map control/status registers. */
285 sc->arge_rid = 0;
286 sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
287 &sc->arge_rid, RF_ACTIVE);
288
289 if (sc->arge_res == NULL) {
290 device_printf(dev, "couldn't map memory\n");
291 error = ENXIO;
292 goto fail;
293 }
294
295 /* Allocate interrupts */
296 rid = 0;
297 sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
298 RF_SHAREABLE | RF_ACTIVE);
299
300 if (sc->arge_irq == NULL) {
301 device_printf(dev, "couldn't map interrupt\n");
302 error = ENXIO;
303 goto fail;
304 }
305
306 /* Allocate ifnet structure. */
307 ifp = sc->arge_ifp = if_alloc(IFT_ETHER);
308
309 if (ifp == NULL) {
310 device_printf(dev, "couldn't allocate ifnet structure\n");
311 error = ENOSPC;
312 goto fail;
313 }
314
315 ifp->if_softc = sc;
316 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
317 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
318 ifp->if_ioctl = arge_ioctl;
319 ifp->if_start = arge_start;
320 ifp->if_init = arge_init;
321 sc->arge_if_flags = ifp->if_flags;
322
323 /* XXX: add real size */
324 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
325 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
326 IFQ_SET_READY(&ifp->if_snd);
327
328 ifp->if_capenable = ifp->if_capabilities;
329 #ifdef DEVICE_POLLING
330 ifp->if_capabilities |= IFCAP_POLLING;
331 #endif
332
333 is_base_mac_empty = 1;
334 for (i = 0; i < ETHER_ADDR_LEN; i++) {
335 eaddr[i] = ar711_base_mac[i] & 0xff;
336 if (eaddr[i] != 0)
337 is_base_mac_empty = 0;
338 }
339
340 if (is_base_mac_empty) {
341 /*
342 * No MAC address configured. Generate the random one.
343 */
344 if (bootverbose)
345 device_printf(dev,
346 "Generating random ethernet address.\n");
347
348 rnd = arc4random();
349 eaddr[0] = 'b';
350 eaddr[1] = 's';
351 eaddr[2] = 'd';
352 eaddr[3] = (rnd >> 24) & 0xff;
353 eaddr[4] = (rnd >> 16) & 0xff;
354 eaddr[5] = (rnd >> 8) & 0xff;
355 }
356
357 if (sc->arge_mac_unit != 0)
358 eaddr[5] += sc->arge_mac_unit;
359
360 if (arge_dma_alloc(sc) != 0) {
361 error = ENXIO;
362 goto fail;
363 }
364
365 /* Initialize the MAC block */
366
367 /* Step 1. Soft-reset MAC */
368 ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET);
369 DELAY(20);
370
371 /* Step 2. Punt the MAC core from the central reset register */
372 ar71xx_device_stop(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
373 DELAY(100);
374 ar71xx_device_start(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
375
376 /* Step 3. Reconfigure MAC block */
377 ARGE_WRITE(sc, AR71XX_MAC_CFG1,
378 MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE |
379 MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE);
380
381 reg = ARGE_READ(sc, AR71XX_MAC_CFG2);
382 reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ;
383 ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg);
384
385 ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536);
386
387 /* Reset MII bus */
388 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET);
389 DELAY(100);
390 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_CLOCK_DIV_28);
391 DELAY(100);
392
393 /*
394 * Set all Ethernet address registers to the same initial values
395 * set all four addresses to 66-88-aa-cc-dd-ee
396 */
397 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1,
398 (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]);
399 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (eaddr[0] << 8) | eaddr[1]);
400
401 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0,
402 FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT);
403 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000);
404 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff);
405
406 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH,
407 FIFO_RX_FILTMATCH_DEFAULT);
408
409 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
410 FIFO_RX_FILTMASK_DEFAULT);
411
412 /*
413 * Check if we have single-PHY MAC or multi-PHY
414 */
415 phys_total = 0;
416 for (i = 0; i < ARGE_NPHY; i++)
417 if (phymask & (1 << i))
418 phys_total ++;
419
420 if (phys_total == 0) {
421 error = EINVAL;
422 goto fail;
423 }
424
425 if (phys_total == 1) {
426 /* Do MII setup. */
427 if (mii_phy_probe(dev, &sc->arge_miibus,
428 arge_ifmedia_upd, arge_ifmedia_sts)) {
429 device_printf(dev, "MII without any phy!\n");
430 error = ENXIO;
431 goto fail;
432 }
433 }
434 else {
435 ifmedia_init(&sc->arge_ifmedia, 0,
436 arge_multiphy_mediachange,
437 arge_multiphy_mediastatus);
438 ifmedia_add(&sc->arge_ifmedia,
439 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode,
440 0, NULL);
441 ifmedia_set(&sc->arge_ifmedia,
442 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode);
443 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode);
444 }
445
446 /* Call MI attach routine. */
447 ether_ifattach(ifp, eaddr);
448
449 /* Hook interrupt last to avoid having to lock softc */
450 error = bus_setup_intr(dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE,
451 arge_intr_filter, arge_intr, sc, &sc->arge_intrhand);
452
453 if (error) {
454 device_printf(dev, "couldn't set up irq\n");
455 ether_ifdetach(ifp);
456 goto fail;
457 }
458
459 /* setup sysctl variables */
460 arge_attach_sysctl(dev);
461
462 fail:
463 if (error)
464 arge_detach(dev);
465
466 return (error);
467 }
468
469 static int
470 arge_detach(device_t dev)
471 {
472 struct arge_softc *sc = device_get_softc(dev);
473 struct ifnet *ifp = sc->arge_ifp;
474
475 KASSERT(mtx_initialized(&sc->arge_mtx), ("arge mutex not initialized"));
476
477 /* These should only be active if attach succeeded */
478 if (device_is_attached(dev)) {
479 ARGE_LOCK(sc);
480 sc->arge_detach = 1;
481 #ifdef DEVICE_POLLING
482 if (ifp->if_capenable & IFCAP_POLLING)
483 ether_poll_deregister(ifp);
484 #endif
485
486 arge_stop(sc);
487 ARGE_UNLOCK(sc);
488 taskqueue_drain(taskqueue_swi, &sc->arge_link_task);
489 ether_ifdetach(ifp);
490 }
491
492 if (sc->arge_miibus)
493 device_delete_child(dev, sc->arge_miibus);
494
495 bus_generic_detach(dev);
496
497 if (sc->arge_intrhand)
498 bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand);
499
500 if (sc->arge_res)
501 bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid,
502 sc->arge_res);
503
504 if (ifp)
505 if_free(ifp);
506
507 arge_dma_free(sc);
508
509 mtx_destroy(&sc->arge_mtx);
510
511 return (0);
512
513 }
514
515 static int
516 arge_suspend(device_t dev)
517 {
518
519 panic("%s", __func__);
520 return 0;
521 }
522
523 static int
524 arge_resume(device_t dev)
525 {
526
527 panic("%s", __func__);
528 return 0;
529 }
530
531 static int
532 arge_shutdown(device_t dev)
533 {
534 struct arge_softc *sc;
535
536 sc = device_get_softc(dev);
537
538 ARGE_LOCK(sc);
539 arge_stop(sc);
540 ARGE_UNLOCK(sc);
541
542 return (0);
543 }
544
545 static int
546 arge_miibus_readreg(device_t dev, int phy, int reg)
547 {
548 struct arge_softc * sc = device_get_softc(dev);
549 int i, result;
550 uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT)
551 | (reg & MAC_MII_REG_MASK);
552
553 if ((sc->arge_phymask & (1 << phy)) == 0)
554 return (0);
555
556 mtx_lock(&miibus_mtx);
557 ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
558 ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
559 ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ);
560
561 i = ARGE_MII_TIMEOUT;
562 while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) &
563 MAC_MII_INDICATOR_BUSY) && (i--))
564 DELAY(5);
565
566 if (i < 0) {
567 mtx_unlock(&miibus_mtx);
568 dprintf("%s timedout\n", __func__);
569 /* XXX: return ERRNO istead? */
570 return (-1);
571 }
572
573 result = ARGE_MII_READ(AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK;
574 ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
575 mtx_unlock(&miibus_mtx);
576
577 dprintf("%s: phy=%d, reg=%02x, value[%08x]=%04x\n", __func__,
578 phy, reg, addr, result);
579
580 return (result);
581 }
582
583 static int
584 arge_miibus_writereg(device_t dev, int phy, int reg, int data)
585 {
586 struct arge_softc * sc = device_get_softc(dev);
587 int i;
588 uint32_t addr =
589 (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK);
590
591
592 if ((sc->arge_phymask & (1 << phy)) == 0)
593 return (-1);
594
595 dprintf("%s: phy=%d, reg=%02x, value=%04x\n", __func__,
596 phy, reg, data);
597
598 mtx_lock(&miibus_mtx);
599 ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
600 ARGE_MII_WRITE(AR71XX_MAC_MII_CONTROL, data);
601
602 i = ARGE_MII_TIMEOUT;
603 while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) &
604 MAC_MII_INDICATOR_BUSY) && (i--))
605 DELAY(5);
606
607 mtx_unlock(&miibus_mtx);
608
609 if (i < 0) {
610 dprintf("%s timedout\n", __func__);
611 /* XXX: return ERRNO istead? */
612 return (-1);
613 }
614
615 return (0);
616 }
617
618 static void
619 arge_miibus_statchg(device_t dev)
620 {
621 struct arge_softc *sc;
622
623 sc = device_get_softc(dev);
624 taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task);
625 }
626
627 static void
628 arge_link_task(void *arg, int pending)
629 {
630 struct arge_softc *sc;
631 struct mii_data *mii;
632 struct ifnet *ifp;
633 uint32_t media, duplex;
634
635 sc = (struct arge_softc *)arg;
636
637 ARGE_LOCK(sc);
638 mii = device_get_softc(sc->arge_miibus);
639 ifp = sc->arge_ifp;
640 if (mii == NULL || ifp == NULL ||
641 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
642 ARGE_UNLOCK(sc);
643 return;
644 }
645
646 if (mii->mii_media_status & IFM_ACTIVE) {
647
648 media = IFM_SUBTYPE(mii->mii_media_active);
649
650 if (media != IFM_NONE) {
651 sc->arge_link_status = 1;
652 duplex = mii->mii_media_active & IFM_GMASK;
653 arge_set_pll(sc, media, duplex);
654 }
655 } else
656 sc->arge_link_status = 0;
657
658 ARGE_UNLOCK(sc);
659 }
660
661 static void
662 arge_set_pll(struct arge_softc *sc, int media, int duplex)
663 {
664 uint32_t cfg, ifcontrol, rx_filtmask;
665 int if_speed;
666
667 cfg = ARGE_READ(sc, AR71XX_MAC_CFG2);
668 cfg &= ~(MAC_CFG2_IFACE_MODE_1000
669 | MAC_CFG2_IFACE_MODE_10_100
670 | MAC_CFG2_FULL_DUPLEX);
671
672 if (duplex == IFM_FDX)
673 cfg |= MAC_CFG2_FULL_DUPLEX;
674
675 ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL);
676 ifcontrol &= ~MAC_IFCONTROL_SPEED;
677 rx_filtmask =
678 ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK);
679 rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE;
680
681 switch(media) {
682 case IFM_10_T:
683 cfg |= MAC_CFG2_IFACE_MODE_10_100;
684 if_speed = 10;
685 break;
686 case IFM_100_TX:
687 cfg |= MAC_CFG2_IFACE_MODE_10_100;
688 ifcontrol |= MAC_IFCONTROL_SPEED;
689 if_speed = 100;
690 break;
691 case IFM_1000_T:
692 case IFM_1000_SX:
693 cfg |= MAC_CFG2_IFACE_MODE_1000;
694 rx_filtmask |= FIFO_RX_MASK_BYTE_MODE;
695 if_speed = 1000;
696 break;
697 default:
698 if_speed = 100;
699 device_printf(sc->arge_dev,
700 "Unknown media %d\n", media);
701 }
702
703 ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD,
704 0x008001ff);
705
706 ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg);
707 ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol);
708 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
709 rx_filtmask);
710
711 /* set PLL registers */
712 if (sc->arge_mac_unit == 0)
713 ar71xx_device_set_pll_ge0(if_speed);
714 else
715 ar71xx_device_set_pll_ge1(if_speed);
716 }
717
718
719 static void
720 arge_reset_dma(struct arge_softc *sc)
721 {
722 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0);
723 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0);
724
725 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0);
726 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0);
727
728 /* Clear all possible RX interrupts */
729 while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD)
730 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
731
732 /*
733 * Clear all possible TX interrupts
734 */
735 while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT)
736 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
737
738 /*
739 * Now Rx/Tx errors
740 */
741 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS,
742 DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW);
743 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS,
744 DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN);
745 }
746
747
748
749 static void
750 arge_init(void *xsc)
751 {
752 struct arge_softc *sc = xsc;
753
754 ARGE_LOCK(sc);
755 arge_init_locked(sc);
756 ARGE_UNLOCK(sc);
757 }
758
759 static void
760 arge_init_locked(struct arge_softc *sc)
761 {
762 struct ifnet *ifp = sc->arge_ifp;
763 struct mii_data *mii;
764
765 ARGE_LOCK_ASSERT(sc);
766
767 arge_stop(sc);
768
769 /* Init circular RX list. */
770 if (arge_rx_ring_init(sc) != 0) {
771 device_printf(sc->arge_dev,
772 "initialization failed: no memory for rx buffers\n");
773 arge_stop(sc);
774 return;
775 }
776
777 /* Init tx descriptors. */
778 arge_tx_ring_init(sc);
779
780 arge_reset_dma(sc);
781
782
783 if (sc->arge_miibus) {
784 sc->arge_link_status = 0;
785 mii = device_get_softc(sc->arge_miibus);
786 mii_mediachg(mii);
787 }
788 else {
789 /*
790 * Sun always shines over multiPHY interface
791 */
792 sc->arge_link_status = 1;
793 }
794
795 ifp->if_drv_flags |= IFF_DRV_RUNNING;
796 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
797
798 if (sc->arge_miibus)
799 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
800
801 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0));
802 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0));
803
804 /* Start listening */
805 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
806
807 /* Enable interrupts */
808 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
809 }
810
811 /*
812 * Return whether the mbuf chain is correctly aligned
813 * for the arge TX engine.
814 *
815 * The TX engine requires each fragment to be aligned to a
816 * 4 byte boundary and the size of each fragment except
817 * the last to be a multiple of 4 bytes.
818 */
819 static int
820 arge_mbuf_chain_is_tx_aligned(struct mbuf *m0)
821 {
822 struct mbuf *m;
823
824 for (m = m0; m != NULL; m = m->m_next) {
825 if((mtod(m, intptr_t) & 3) != 0)
826 return 0;
827 if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0))
828 return 0;
829 }
830 return 1;
831 }
832
833 /*
834 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
835 * pointers to the fragment pointers.
836 */
837 static int
838 arge_encap(struct arge_softc *sc, struct mbuf **m_head)
839 {
840 struct arge_txdesc *txd;
841 struct arge_desc *desc, *prev_desc;
842 bus_dma_segment_t txsegs[ARGE_MAXFRAGS];
843 int error, i, nsegs, prod, prev_prod;
844 struct mbuf *m;
845
846 ARGE_LOCK_ASSERT(sc);
847
848 /*
849 * Fix mbuf chain, all fragments should be 4 bytes aligned and
850 * even 4 bytes
851 */
852 m = *m_head;
853 if (! arge_mbuf_chain_is_tx_aligned(m)) {
854 sc->stats.tx_pkts_unaligned++;
855 m = m_defrag(*m_head, M_DONTWAIT);
856 if (m == NULL) {
857 *m_head = NULL;
858 return (ENOBUFS);
859 }
860 *m_head = m;
861 } else
862 sc->stats.tx_pkts_aligned++;
863
864 prod = sc->arge_cdata.arge_tx_prod;
865 txd = &sc->arge_cdata.arge_txdesc[prod];
866 error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag,
867 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
868
869 if (error == EFBIG) {
870 panic("EFBIG");
871 } else if (error != 0)
872 return (error);
873
874 if (nsegs == 0) {
875 m_freem(*m_head);
876 *m_head = NULL;
877 return (EIO);
878 }
879
880 /* Check number of available descriptors. */
881 if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 1)) {
882 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
883 return (ENOBUFS);
884 }
885
886 txd->tx_m = *m_head;
887 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
888 BUS_DMASYNC_PREWRITE);
889
890 /*
891 * Make a list of descriptors for this packet. DMA controller will
892 * walk through it while arge_link is not zero.
893 */
894 prev_prod = prod;
895 desc = prev_desc = NULL;
896 for (i = 0; i < nsegs; i++) {
897 desc = &sc->arge_rdata.arge_tx_ring[prod];
898 desc->packet_ctrl = ARGE_DMASIZE(txsegs[i].ds_len);
899
900 if (txsegs[i].ds_addr & 3)
901 panic("TX packet address unaligned\n");
902
903 desc->packet_addr = txsegs[i].ds_addr;
904
905 /* link with previous descriptor */
906 if (prev_desc)
907 prev_desc->packet_ctrl |= ARGE_DESC_MORE;
908
909 sc->arge_cdata.arge_tx_cnt++;
910 prev_desc = desc;
911 ARGE_INC(prod, ARGE_TX_RING_COUNT);
912 }
913
914 /* Update producer index. */
915 sc->arge_cdata.arge_tx_prod = prod;
916
917 /* Sync descriptors. */
918 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
919 sc->arge_cdata.arge_tx_ring_map,
920 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
921
922 /* Start transmitting */
923 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN);
924 return (0);
925 }
926
927 static void
928 arge_start(struct ifnet *ifp)
929 {
930 struct arge_softc *sc;
931
932 sc = ifp->if_softc;
933
934 ARGE_LOCK(sc);
935 arge_start_locked(ifp);
936 ARGE_UNLOCK(sc);
937 }
938
939 static void
940 arge_start_locked(struct ifnet *ifp)
941 {
942 struct arge_softc *sc;
943 struct mbuf *m_head;
944 int enq;
945
946 sc = ifp->if_softc;
947
948 ARGE_LOCK_ASSERT(sc);
949
950 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
951 IFF_DRV_RUNNING || sc->arge_link_status == 0 )
952 return;
953
954 arge_flush_ddr(sc);
955
956 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
957 sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) {
958 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
959 if (m_head == NULL)
960 break;
961
962
963 /*
964 * Pack the data into the transmit ring.
965 */
966 if (arge_encap(sc, &m_head)) {
967 if (m_head == NULL)
968 break;
969 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
970 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
971 break;
972 }
973
974 enq++;
975 /*
976 * If there's a BPF listener, bounce a copy of this frame
977 * to him.
978 */
979 ETHER_BPF_MTAP(ifp, m_head);
980 }
981 }
982
983 static void
984 arge_stop(struct arge_softc *sc)
985 {
986 struct ifnet *ifp;
987
988 ARGE_LOCK_ASSERT(sc);
989
990 ifp = sc->arge_ifp;
991 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
992 if (sc->arge_miibus)
993 callout_stop(&sc->arge_stat_callout);
994
995 /* mask out interrupts */
996 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
997
998 arge_reset_dma(sc);
999 }
1000
1001
1002 static int
1003 arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1004 {
1005 struct arge_softc *sc = ifp->if_softc;
1006 struct ifreq *ifr = (struct ifreq *) data;
1007 struct mii_data *mii;
1008 int error;
1009 #ifdef DEVICE_POLLING
1010 int mask;
1011 #endif
1012
1013 switch (command) {
1014 case SIOCSIFFLAGS:
1015 ARGE_LOCK(sc);
1016 if ((ifp->if_flags & IFF_UP) != 0) {
1017 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1018 if (((ifp->if_flags ^ sc->arge_if_flags)
1019 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1020 /* XXX: handle promisc & multi flags */
1021 }
1022
1023 } else {
1024 if (!sc->arge_detach)
1025 arge_init_locked(sc);
1026 }
1027 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1028 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1029 arge_stop(sc);
1030 }
1031 sc->arge_if_flags = ifp->if_flags;
1032 ARGE_UNLOCK(sc);
1033 error = 0;
1034 break;
1035 case SIOCADDMULTI:
1036 case SIOCDELMULTI:
1037 /* XXX: implement SIOCDELMULTI */
1038 error = 0;
1039 break;
1040 case SIOCGIFMEDIA:
1041 case SIOCSIFMEDIA:
1042 if (sc->arge_miibus) {
1043 mii = device_get_softc(sc->arge_miibus);
1044 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1045 }
1046 else
1047 error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia, command);
1048 break;
1049 case SIOCSIFCAP:
1050 /* XXX: Check other capabilities */
1051 #ifdef DEVICE_POLLING
1052 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1053 if (mask & IFCAP_POLLING) {
1054 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1055 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1056 error = ether_poll_register(arge_poll, ifp);
1057 if (error)
1058 return error;
1059 ARGE_LOCK(sc);
1060 ifp->if_capenable |= IFCAP_POLLING;
1061 ARGE_UNLOCK(sc);
1062 } else {
1063 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1064 error = ether_poll_deregister(ifp);
1065 ARGE_LOCK(sc);
1066 ifp->if_capenable &= ~IFCAP_POLLING;
1067 ARGE_UNLOCK(sc);
1068 }
1069 }
1070 error = 0;
1071 break;
1072 #endif
1073 default:
1074 error = ether_ioctl(ifp, command, data);
1075 break;
1076 }
1077
1078 return (error);
1079 }
1080
1081 /*
1082 * Set media options.
1083 */
1084 static int
1085 arge_ifmedia_upd(struct ifnet *ifp)
1086 {
1087 struct arge_softc *sc;
1088 struct mii_data *mii;
1089 struct mii_softc *miisc;
1090 int error;
1091
1092 sc = ifp->if_softc;
1093 ARGE_LOCK(sc);
1094 mii = device_get_softc(sc->arge_miibus);
1095 if (mii->mii_instance) {
1096 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1097 mii_phy_reset(miisc);
1098 }
1099 error = mii_mediachg(mii);
1100 ARGE_UNLOCK(sc);
1101
1102 return (error);
1103 }
1104
1105 /*
1106 * Report current media status.
1107 */
1108 static void
1109 arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1110 {
1111 struct arge_softc *sc = ifp->if_softc;
1112 struct mii_data *mii;
1113
1114 mii = device_get_softc(sc->arge_miibus);
1115 ARGE_LOCK(sc);
1116 mii_pollstat(mii);
1117 ARGE_UNLOCK(sc);
1118 ifmr->ifm_active = mii->mii_media_active;
1119 ifmr->ifm_status = mii->mii_media_status;
1120 }
1121
1122 struct arge_dmamap_arg {
1123 bus_addr_t arge_busaddr;
1124 };
1125
1126 static void
1127 arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1128 {
1129 struct arge_dmamap_arg *ctx;
1130
1131 if (error != 0)
1132 return;
1133 ctx = arg;
1134 ctx->arge_busaddr = segs[0].ds_addr;
1135 }
1136
1137 static int
1138 arge_dma_alloc(struct arge_softc *sc)
1139 {
1140 struct arge_dmamap_arg ctx;
1141 struct arge_txdesc *txd;
1142 struct arge_rxdesc *rxd;
1143 int error, i;
1144
1145 /* Create parent DMA tag. */
1146 error = bus_dma_tag_create(
1147 bus_get_dma_tag(sc->arge_dev), /* parent */
1148 1, 0, /* alignment, boundary */
1149 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1150 BUS_SPACE_MAXADDR, /* highaddr */
1151 NULL, NULL, /* filter, filterarg */
1152 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1153 0, /* nsegments */
1154 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1155 0, /* flags */
1156 NULL, NULL, /* lockfunc, lockarg */
1157 &sc->arge_cdata.arge_parent_tag);
1158 if (error != 0) {
1159 device_printf(sc->arge_dev, "failed to create parent DMA tag\n");
1160 goto fail;
1161 }
1162 /* Create tag for Tx ring. */
1163 error = bus_dma_tag_create(
1164 sc->arge_cdata.arge_parent_tag, /* parent */
1165 ARGE_RING_ALIGN, 0, /* alignment, boundary */
1166 BUS_SPACE_MAXADDR, /* lowaddr */
1167 BUS_SPACE_MAXADDR, /* highaddr */
1168 NULL, NULL, /* filter, filterarg */
1169 ARGE_TX_DMA_SIZE, /* maxsize */
1170 1, /* nsegments */
1171 ARGE_TX_DMA_SIZE, /* maxsegsize */
1172 0, /* flags */
1173 NULL, NULL, /* lockfunc, lockarg */
1174 &sc->arge_cdata.arge_tx_ring_tag);
1175 if (error != 0) {
1176 device_printf(sc->arge_dev, "failed to create Tx ring DMA tag\n");
1177 goto fail;
1178 }
1179
1180 /* Create tag for Rx ring. */
1181 error = bus_dma_tag_create(
1182 sc->arge_cdata.arge_parent_tag, /* parent */
1183 ARGE_RING_ALIGN, 0, /* alignment, boundary */
1184 BUS_SPACE_MAXADDR, /* lowaddr */
1185 BUS_SPACE_MAXADDR, /* highaddr */
1186 NULL, NULL, /* filter, filterarg */
1187 ARGE_RX_DMA_SIZE, /* maxsize */
1188 1, /* nsegments */
1189 ARGE_RX_DMA_SIZE, /* maxsegsize */
1190 0, /* flags */
1191 NULL, NULL, /* lockfunc, lockarg */
1192 &sc->arge_cdata.arge_rx_ring_tag);
1193 if (error != 0) {
1194 device_printf(sc->arge_dev, "failed to create Rx ring DMA tag\n");
1195 goto fail;
1196 }
1197
1198 /* Create tag for Tx buffers. */
1199 error = bus_dma_tag_create(
1200 sc->arge_cdata.arge_parent_tag, /* parent */
1201 sizeof(uint32_t), 0, /* alignment, boundary */
1202 BUS_SPACE_MAXADDR, /* lowaddr */
1203 BUS_SPACE_MAXADDR, /* highaddr */
1204 NULL, NULL, /* filter, filterarg */
1205 MCLBYTES * ARGE_MAXFRAGS, /* maxsize */
1206 ARGE_MAXFRAGS, /* nsegments */
1207 MCLBYTES, /* maxsegsize */
1208 0, /* flags */
1209 NULL, NULL, /* lockfunc, lockarg */
1210 &sc->arge_cdata.arge_tx_tag);
1211 if (error != 0) {
1212 device_printf(sc->arge_dev, "failed to create Tx DMA tag\n");
1213 goto fail;
1214 }
1215
1216 /* Create tag for Rx buffers. */
1217 error = bus_dma_tag_create(
1218 sc->arge_cdata.arge_parent_tag, /* parent */
1219 ARGE_RX_ALIGN, 0, /* alignment, boundary */
1220 BUS_SPACE_MAXADDR, /* lowaddr */
1221 BUS_SPACE_MAXADDR, /* highaddr */
1222 NULL, NULL, /* filter, filterarg */
1223 MCLBYTES, /* maxsize */
1224 ARGE_MAXFRAGS, /* nsegments */
1225 MCLBYTES, /* maxsegsize */
1226 0, /* flags */
1227 NULL, NULL, /* lockfunc, lockarg */
1228 &sc->arge_cdata.arge_rx_tag);
1229 if (error != 0) {
1230 device_printf(sc->arge_dev, "failed to create Rx DMA tag\n");
1231 goto fail;
1232 }
1233
1234 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1235 error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag,
1236 (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK |
1237 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_tx_ring_map);
1238 if (error != 0) {
1239 device_printf(sc->arge_dev,
1240 "failed to allocate DMA'able memory for Tx ring\n");
1241 goto fail;
1242 }
1243
1244 ctx.arge_busaddr = 0;
1245 error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag,
1246 sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring,
1247 ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1248 if (error != 0 || ctx.arge_busaddr == 0) {
1249 device_printf(sc->arge_dev,
1250 "failed to load DMA'able memory for Tx ring\n");
1251 goto fail;
1252 }
1253 sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr;
1254
1255 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1256 error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag,
1257 (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK |
1258 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_rx_ring_map);
1259 if (error != 0) {
1260 device_printf(sc->arge_dev,
1261 "failed to allocate DMA'able memory for Rx ring\n");
1262 goto fail;
1263 }
1264
1265 ctx.arge_busaddr = 0;
1266 error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag,
1267 sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring,
1268 ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1269 if (error != 0 || ctx.arge_busaddr == 0) {
1270 device_printf(sc->arge_dev,
1271 "failed to load DMA'able memory for Rx ring\n");
1272 goto fail;
1273 }
1274 sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr;
1275
1276 /* Create DMA maps for Tx buffers. */
1277 for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1278 txd = &sc->arge_cdata.arge_txdesc[i];
1279 txd->tx_m = NULL;
1280 txd->tx_dmamap = NULL;
1281 error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0,
1282 &txd->tx_dmamap);
1283 if (error != 0) {
1284 device_printf(sc->arge_dev,
1285 "failed to create Tx dmamap\n");
1286 goto fail;
1287 }
1288 }
1289 /* Create DMA maps for Rx buffers. */
1290 if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1291 &sc->arge_cdata.arge_rx_sparemap)) != 0) {
1292 device_printf(sc->arge_dev,
1293 "failed to create spare Rx dmamap\n");
1294 goto fail;
1295 }
1296 for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1297 rxd = &sc->arge_cdata.arge_rxdesc[i];
1298 rxd->rx_m = NULL;
1299 rxd->rx_dmamap = NULL;
1300 error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1301 &rxd->rx_dmamap);
1302 if (error != 0) {
1303 device_printf(sc->arge_dev,
1304 "failed to create Rx dmamap\n");
1305 goto fail;
1306 }
1307 }
1308
1309 fail:
1310 return (error);
1311 }
1312
1313 static void
1314 arge_dma_free(struct arge_softc *sc)
1315 {
1316 struct arge_txdesc *txd;
1317 struct arge_rxdesc *rxd;
1318 int i;
1319
1320 /* Tx ring. */
1321 if (sc->arge_cdata.arge_tx_ring_tag) {
1322 if (sc->arge_cdata.arge_tx_ring_map)
1323 bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag,
1324 sc->arge_cdata.arge_tx_ring_map);
1325 if (sc->arge_cdata.arge_tx_ring_map &&
1326 sc->arge_rdata.arge_tx_ring)
1327 bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag,
1328 sc->arge_rdata.arge_tx_ring,
1329 sc->arge_cdata.arge_tx_ring_map);
1330 sc->arge_rdata.arge_tx_ring = NULL;
1331 sc->arge_cdata.arge_tx_ring_map = NULL;
1332 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag);
1333 sc->arge_cdata.arge_tx_ring_tag = NULL;
1334 }
1335 /* Rx ring. */
1336 if (sc->arge_cdata.arge_rx_ring_tag) {
1337 if (sc->arge_cdata.arge_rx_ring_map)
1338 bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag,
1339 sc->arge_cdata.arge_rx_ring_map);
1340 if (sc->arge_cdata.arge_rx_ring_map &&
1341 sc->arge_rdata.arge_rx_ring)
1342 bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag,
1343 sc->arge_rdata.arge_rx_ring,
1344 sc->arge_cdata.arge_rx_ring_map);
1345 sc->arge_rdata.arge_rx_ring = NULL;
1346 sc->arge_cdata.arge_rx_ring_map = NULL;
1347 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag);
1348 sc->arge_cdata.arge_rx_ring_tag = NULL;
1349 }
1350 /* Tx buffers. */
1351 if (sc->arge_cdata.arge_tx_tag) {
1352 for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1353 txd = &sc->arge_cdata.arge_txdesc[i];
1354 if (txd->tx_dmamap) {
1355 bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag,
1356 txd->tx_dmamap);
1357 txd->tx_dmamap = NULL;
1358 }
1359 }
1360 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag);
1361 sc->arge_cdata.arge_tx_tag = NULL;
1362 }
1363 /* Rx buffers. */
1364 if (sc->arge_cdata.arge_rx_tag) {
1365 for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1366 rxd = &sc->arge_cdata.arge_rxdesc[i];
1367 if (rxd->rx_dmamap) {
1368 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1369 rxd->rx_dmamap);
1370 rxd->rx_dmamap = NULL;
1371 }
1372 }
1373 if (sc->arge_cdata.arge_rx_sparemap) {
1374 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1375 sc->arge_cdata.arge_rx_sparemap);
1376 sc->arge_cdata.arge_rx_sparemap = 0;
1377 }
1378 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag);
1379 sc->arge_cdata.arge_rx_tag = NULL;
1380 }
1381
1382 if (sc->arge_cdata.arge_parent_tag) {
1383 bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag);
1384 sc->arge_cdata.arge_parent_tag = NULL;
1385 }
1386 }
1387
1388 /*
1389 * Initialize the transmit descriptors.
1390 */
1391 static int
1392 arge_tx_ring_init(struct arge_softc *sc)
1393 {
1394 struct arge_ring_data *rd;
1395 struct arge_txdesc *txd;
1396 bus_addr_t addr;
1397 int i;
1398
1399 sc->arge_cdata.arge_tx_prod = 0;
1400 sc->arge_cdata.arge_tx_cons = 0;
1401 sc->arge_cdata.arge_tx_cnt = 0;
1402 sc->arge_cdata.arge_tx_pkts = 0;
1403
1404 rd = &sc->arge_rdata;
1405 bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring));
1406 for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1407 if (i == ARGE_TX_RING_COUNT - 1)
1408 addr = ARGE_TX_RING_ADDR(sc, 0);
1409 else
1410 addr = ARGE_TX_RING_ADDR(sc, i + 1);
1411 rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY;
1412 rd->arge_tx_ring[i].next_desc = addr;
1413 txd = &sc->arge_cdata.arge_txdesc[i];
1414 txd->tx_m = NULL;
1415 }
1416
1417 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1418 sc->arge_cdata.arge_tx_ring_map,
1419 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1420
1421 return (0);
1422 }
1423
1424 /*
1425 * Initialize the RX descriptors and allocate mbufs for them. Note that
1426 * we arrange the descriptors in a closed ring, so that the last descriptor
1427 * points back to the first.
1428 */
1429 static int
1430 arge_rx_ring_init(struct arge_softc *sc)
1431 {
1432 struct arge_ring_data *rd;
1433 struct arge_rxdesc *rxd;
1434 bus_addr_t addr;
1435 int i;
1436
1437 sc->arge_cdata.arge_rx_cons = 0;
1438
1439 rd = &sc->arge_rdata;
1440 bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring));
1441 for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1442 rxd = &sc->arge_cdata.arge_rxdesc[i];
1443 rxd->rx_m = NULL;
1444 rxd->desc = &rd->arge_rx_ring[i];
1445 if (i == ARGE_RX_RING_COUNT - 1)
1446 addr = ARGE_RX_RING_ADDR(sc, 0);
1447 else
1448 addr = ARGE_RX_RING_ADDR(sc, i + 1);
1449 rd->arge_rx_ring[i].next_desc = addr;
1450 if (arge_newbuf(sc, i) != 0) {
1451 return (ENOBUFS);
1452 }
1453 }
1454
1455 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1456 sc->arge_cdata.arge_rx_ring_map,
1457 BUS_DMASYNC_PREWRITE);
1458
1459 return (0);
1460 }
1461
1462 /*
1463 * Initialize an RX descriptor and attach an MBUF cluster.
1464 */
1465 static int
1466 arge_newbuf(struct arge_softc *sc, int idx)
1467 {
1468 struct arge_desc *desc;
1469 struct arge_rxdesc *rxd;
1470 struct mbuf *m;
1471 bus_dma_segment_t segs[1];
1472 bus_dmamap_t map;
1473 int nsegs;
1474
1475 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1476 if (m == NULL)
1477 return (ENOBUFS);
1478 m->m_len = m->m_pkthdr.len = MCLBYTES;
1479 m_adj(m, sizeof(uint64_t));
1480
1481 if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag,
1482 sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1483 m_freem(m);
1484 return (ENOBUFS);
1485 }
1486 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1487
1488 rxd = &sc->arge_cdata.arge_rxdesc[idx];
1489 if (rxd->rx_m != NULL) {
1490 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap);
1491 }
1492 map = rxd->rx_dmamap;
1493 rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap;
1494 sc->arge_cdata.arge_rx_sparemap = map;
1495 rxd->rx_m = m;
1496 desc = rxd->desc;
1497 if (segs[0].ds_addr & 3)
1498 panic("RX packet address unaligned");
1499 desc->packet_addr = segs[0].ds_addr;
1500 desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len);
1501
1502 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1503 sc->arge_cdata.arge_rx_ring_map,
1504 BUS_DMASYNC_PREWRITE);
1505
1506 return (0);
1507 }
1508
1509 static __inline void
1510 arge_fixup_rx(struct mbuf *m)
1511 {
1512 int i;
1513 uint16_t *src, *dst;
1514
1515 src = mtod(m, uint16_t *);
1516 dst = src - 1;
1517
1518 for (i = 0; i < m->m_len / sizeof(uint16_t); i++) {
1519 *dst++ = *src++;
1520 }
1521
1522 if (m->m_len % sizeof(uint16_t))
1523 *(uint8_t *)dst = *(uint8_t *)src;
1524
1525 m->m_data -= ETHER_ALIGN;
1526 }
1527
1528 #ifdef DEVICE_POLLING
1529 static int
1530 arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1531 {
1532 struct arge_softc *sc = ifp->if_softc;
1533 int rx_npkts = 0;
1534
1535 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1536 ARGE_LOCK(sc);
1537 arge_tx_locked(sc);
1538 rx_npkts = arge_rx_locked(sc);
1539 ARGE_UNLOCK(sc);
1540 }
1541
1542 return (rx_npkts);
1543 }
1544 #endif /* DEVICE_POLLING */
1545
1546
1547 static void
1548 arge_tx_locked(struct arge_softc *sc)
1549 {
1550 struct arge_txdesc *txd;
1551 struct arge_desc *cur_tx;
1552 struct ifnet *ifp;
1553 uint32_t ctrl;
1554 int cons, prod;
1555
1556 ARGE_LOCK_ASSERT(sc);
1557
1558 cons = sc->arge_cdata.arge_tx_cons;
1559 prod = sc->arge_cdata.arge_tx_prod;
1560 if (cons == prod)
1561 return;
1562
1563 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1564 sc->arge_cdata.arge_tx_ring_map,
1565 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1566
1567 ifp = sc->arge_ifp;
1568 /*
1569 * Go through our tx list and free mbufs for those
1570 * frames that have been transmitted.
1571 */
1572 for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) {
1573 cur_tx = &sc->arge_rdata.arge_tx_ring[cons];
1574 ctrl = cur_tx->packet_ctrl;
1575 /* Check if descriptor has "finished" flag */
1576 if ((ctrl & ARGE_DESC_EMPTY) == 0)
1577 break;
1578
1579 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
1580
1581 sc->arge_cdata.arge_tx_cnt--;
1582 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1583
1584 txd = &sc->arge_cdata.arge_txdesc[cons];
1585
1586 ifp->if_opackets++;
1587
1588 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
1589 BUS_DMASYNC_POSTWRITE);
1590 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
1591
1592 /* Free only if it's first descriptor in list */
1593 if (txd->tx_m)
1594 m_freem(txd->tx_m);
1595 txd->tx_m = NULL;
1596
1597 /* reset descriptor */
1598 cur_tx->packet_addr = 0;
1599 }
1600
1601 sc->arge_cdata.arge_tx_cons = cons;
1602
1603 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1604 sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1605 }
1606
1607
1608 static int
1609 arge_rx_locked(struct arge_softc *sc)
1610 {
1611 struct arge_rxdesc *rxd;
1612 struct ifnet *ifp = sc->arge_ifp;
1613 int cons, prog, packet_len, i;
1614 struct arge_desc *cur_rx;
1615 struct mbuf *m;
1616 int rx_npkts = 0;
1617
1618 ARGE_LOCK_ASSERT(sc);
1619
1620 cons = sc->arge_cdata.arge_rx_cons;
1621
1622 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1623 sc->arge_cdata.arge_rx_ring_map,
1624 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1625
1626 for (prog = 0; prog < ARGE_RX_RING_COUNT;
1627 ARGE_INC(cons, ARGE_RX_RING_COUNT)) {
1628 cur_rx = &sc->arge_rdata.arge_rx_ring[cons];
1629 rxd = &sc->arge_cdata.arge_rxdesc[cons];
1630 m = rxd->rx_m;
1631
1632 if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0)
1633 break;
1634
1635 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
1636
1637 prog++;
1638
1639 packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl);
1640 bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap,
1641 BUS_DMASYNC_POSTREAD);
1642 m = rxd->rx_m;
1643
1644 arge_fixup_rx(m);
1645 m->m_pkthdr.rcvif = ifp;
1646 /* Skip 4 bytes of CRC */
1647 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1648 ifp->if_ipackets++;
1649 rx_npkts++;
1650
1651 ARGE_UNLOCK(sc);
1652 (*ifp->if_input)(ifp, m);
1653 ARGE_LOCK(sc);
1654 cur_rx->packet_addr = 0;
1655 }
1656
1657 if (prog > 0) {
1658
1659 i = sc->arge_cdata.arge_rx_cons;
1660 for (; prog > 0 ; prog--) {
1661 if (arge_newbuf(sc, i) != 0) {
1662 device_printf(sc->arge_dev,
1663 "Failed to allocate buffer\n");
1664 break;
1665 }
1666 ARGE_INC(i, ARGE_RX_RING_COUNT);
1667 }
1668
1669 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1670 sc->arge_cdata.arge_rx_ring_map,
1671 BUS_DMASYNC_PREWRITE);
1672
1673 sc->arge_cdata.arge_rx_cons = cons;
1674 }
1675
1676 return (rx_npkts);
1677 }
1678
1679 static int
1680 arge_intr_filter(void *arg)
1681 {
1682 struct arge_softc *sc = arg;
1683 uint32_t status, ints;
1684
1685 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1686 ints = ARGE_READ(sc, AR71XX_DMA_INTR);
1687
1688 #if 0
1689 dprintf("int mask(filter) = %b\n", ints,
1690 "\2\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1691 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1692 dprintf("status(filter) = %b\n", status,
1693 "\2\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1694 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1695 #endif
1696
1697 if (status & DMA_INTR_ALL) {
1698 sc->arge_intr_status |= status;
1699 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1700 return (FILTER_SCHEDULE_THREAD);
1701 }
1702
1703 sc->arge_intr_status = 0;
1704 return (FILTER_STRAY);
1705 }
1706
1707 static void
1708 arge_intr(void *arg)
1709 {
1710 struct arge_softc *sc = arg;
1711 uint32_t status;
1712
1713 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1714 status |= sc->arge_intr_status;
1715
1716 #if 0
1717 dprintf("int status(intr) = %b\n", status,
1718 "\2\1\7RX_OVERFLOW\5RX_PKT_RCVD"
1719 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1720 #endif
1721
1722 /*
1723 * Is it our interrupt at all?
1724 */
1725 if (status == 0)
1726 return;
1727
1728 if (status & DMA_INTR_RX_BUS_ERROR) {
1729 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR);
1730 device_printf(sc->arge_dev, "RX bus error");
1731 return;
1732 }
1733
1734 if (status & DMA_INTR_TX_BUS_ERROR) {
1735 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR);
1736 device_printf(sc->arge_dev, "TX bus error");
1737 return;
1738 }
1739
1740 ARGE_LOCK(sc);
1741
1742 if (status & DMA_INTR_RX_PKT_RCVD)
1743 arge_rx_locked(sc);
1744
1745 /*
1746 * RX overrun disables the receiver.
1747 * Clear indication and re-enable rx.
1748 */
1749 if ( status & DMA_INTR_RX_OVERFLOW) {
1750 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW);
1751 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
1752 }
1753
1754 if (status & DMA_INTR_TX_PKT_SENT)
1755 arge_tx_locked(sc);
1756 /*
1757 * Underrun turns off TX. Clear underrun indication.
1758 * If there's anything left in the ring, reactivate the tx.
1759 */
1760 if (status & DMA_INTR_TX_UNDERRUN) {
1761 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN);
1762 if (sc->arge_cdata.arge_tx_pkts > 0 ) {
1763 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL,
1764 DMA_TX_CONTROL_EN);
1765 }
1766 }
1767
1768 /*
1769 * We handled all bits, clear status
1770 */
1771 sc->arge_intr_status = 0;
1772 ARGE_UNLOCK(sc);
1773 /*
1774 * re-enable all interrupts
1775 */
1776 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1777 }
1778
1779
1780 static void
1781 arge_tick(void *xsc)
1782 {
1783 struct arge_softc *sc = xsc;
1784 struct mii_data *mii;
1785
1786 ARGE_LOCK_ASSERT(sc);
1787
1788 if (sc->arge_miibus) {
1789 mii = device_get_softc(sc->arge_miibus);
1790 mii_tick(mii);
1791 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
1792 }
1793 }
1794
1795 int
1796 arge_multiphy_mediachange(struct ifnet *ifp)
1797 {
1798 struct arge_softc *sc = ifp->if_softc;
1799 struct ifmedia *ifm = &sc->arge_ifmedia;
1800 struct ifmedia_entry *ife = ifm->ifm_cur;
1801
1802 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1803 return (EINVAL);
1804
1805 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
1806 device_printf(sc->arge_dev,
1807 "AUTO is not supported for multiphy MAC");
1808 return (EINVAL);
1809 }
1810
1811 /*
1812 * Ignore everything
1813 */
1814 return (0);
1815 }
1816
1817 void
1818 arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1819 {
1820 struct arge_softc *sc = ifp->if_softc;
1821
1822 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1823 ifmr->ifm_active = IFM_ETHER | sc->arge_media_type |
1824 sc->arge_duplex_mode;
1825 }
1826
Cache object: 9d3ba183405259aab4e9bd0354b55448
|