1 /*-
2 * Copyright (c) 2006-2008 Sam Leffler. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
27
28 /*
29 * Intel XScale NPE Ethernet driver.
30 *
31 * This driver handles the two ports present on the IXP425.
32 * Packet processing is done by the Network Processing Engines
33 * (NPE's) that work together with a MAC and PHY. The MAC
34 * is also mapped to the XScale cpu; the PHY is accessed via
35 * the MAC. NPE-XScale communication happens through h/w
36 * queues managed by the Q Manager block.
37 *
38 * The code here replaces the ethAcc, ethMii, and ethDB classes
39 * in the Intel Access Library (IAL) and the OS-specific driver.
40 *
41 * XXX add vlan support
42 */
43 #ifdef HAVE_KERNEL_OPTION_HEADERS
44 #include "opt_device_polling.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bus.h>
50 #include <sys/kernel.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/module.h>
54 #include <sys/rman.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <sys/endian.h>
59 #include <machine/bus.h>
60
61 #include <net/ethernet.h>
62 #include <net/if.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 #include <net/if_mib.h>
67 #include <net/if_types.h>
68 #include <net/if_var.h>
69
70 #ifdef INET
71 #include <netinet/in.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/in_var.h>
74 #include <netinet/ip.h>
75 #endif
76
77 #include <net/bpf.h>
78 #include <net/bpfdesc.h>
79
80 #include <arm/xscale/ixp425/ixp425reg.h>
81 #include <arm/xscale/ixp425/ixp425var.h>
82 #include <arm/xscale/ixp425/ixp425_qmgr.h>
83 #include <arm/xscale/ixp425/ixp425_npevar.h>
84
85 #include <dev/mii/mii.h>
86 #include <dev/mii/miivar.h>
87 #include <arm/xscale/ixp425/if_npereg.h>
88
89 #include <machine/armreg.h>
90
91 #include "miibus_if.h"
92
93 /*
94 * XXX: For the main bus dma tag. Can go away if the new method to get the
95 * dma tag from the parent got MFC'd into RELENG_6.
96 */
97 extern struct ixp425_softc *ixp425_softc;
98
99 struct npebuf {
100 struct npebuf *ix_next; /* chain to next buffer */
101 void *ix_m; /* backpointer to mbuf */
102 bus_dmamap_t ix_map; /* bus dma map for associated data */
103 struct npehwbuf *ix_hw; /* associated h/w block */
104 uint32_t ix_neaddr; /* phys address of ix_hw */
105 };
106
107 struct npedma {
108 const char* name;
109 int nbuf; /* # npebuf's allocated */
110 bus_dma_tag_t mtag; /* bus dma tag for mbuf data */
111 struct npehwbuf *hwbuf; /* NPE h/w buffers */
112 bus_dma_tag_t buf_tag; /* tag+map for NPE buffers */
113 bus_dmamap_t buf_map;
114 bus_addr_t buf_phys; /* phys addr of buffers */
115 struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */
116 };
117
118 struct npe_softc {
119 /* XXX mii requires this be first; do not move! */
120 struct ifnet *sc_ifp; /* ifnet pointer */
121 struct mtx sc_mtx; /* basically a perimeter lock */
122 device_t sc_dev;
123 bus_space_tag_t sc_iot;
124 bus_space_handle_t sc_ioh; /* MAC register window */
125 device_t sc_mii; /* child miibus */
126 bus_space_handle_t sc_miih; /* MII register window */
127 int sc_npeid;
128 struct ixpnpe_softc *sc_npe; /* NPE support */
129 int sc_debug; /* DPRINTF* control */
130 int sc_tickinterval;
131 struct callout tick_ch; /* Tick callout */
132 int npe_watchdog_timer;
133 struct npedma txdma;
134 struct npebuf *tx_free; /* list of free tx buffers */
135 struct npedma rxdma;
136 bus_addr_t buf_phys; /* XXX for returning a value */
137 int rx_qid; /* rx qid */
138 int rx_freeqid; /* rx free buffers qid */
139 int tx_qid; /* tx qid */
140 int tx_doneqid; /* tx completed qid */
141 struct ifmib_iso_8802_3 mibdata;
142 bus_dma_tag_t sc_stats_tag; /* bus dma tag for stats block */
143 struct npestats *sc_stats;
144 bus_dmamap_t sc_stats_map;
145 bus_addr_t sc_stats_phys; /* phys addr of sc_stats */
146 struct npestats sc_totals; /* accumulated sc_stats */
147 };
148
149 /*
150 * Static configuration for IXP425. The tx and
151 * rx free Q id's are fixed by the NPE microcode. The
152 * rx Q id's are programmed to be separate to simplify
153 * multi-port processing. It may be better to handle
154 * all traffic through one Q (as done by the Intel drivers).
155 *
156 * Note that the PHY's are accessible only from MAC B on the
157 * IXP425 and from MAC C on other devices. This and other
158 * platform-specific assumptions are handled with hints.
159 */
160 static const struct {
161 uint32_t macbase;
162 uint32_t miibase;
163 int phy; /* phy id */
164 uint8_t rx_qid;
165 uint8_t rx_freeqid;
166 uint8_t tx_qid;
167 uint8_t tx_doneqid;
168 } npeconfig[NPE_MAX] = {
169 [NPE_A] = {
170 .macbase = IXP435_MAC_A_HWBASE,
171 .miibase = IXP425_MAC_C_HWBASE,
172 .phy = 2,
173 .rx_qid = 4,
174 .rx_freeqid = 26,
175 .tx_qid = 23,
176 .tx_doneqid = 31
177 },
178 [NPE_B] = {
179 .macbase = IXP425_MAC_B_HWBASE,
180 .miibase = IXP425_MAC_B_HWBASE,
181 .phy = 0,
182 .rx_qid = 4,
183 .rx_freeqid = 27,
184 .tx_qid = 24,
185 .tx_doneqid = 31
186 },
187 [NPE_C] = {
188 .macbase = IXP425_MAC_C_HWBASE,
189 .miibase = IXP425_MAC_B_HWBASE,
190 .phy = 1,
191 .rx_qid = 12,
192 .rx_freeqid = 28,
193 .tx_qid = 25,
194 .tx_doneqid = 31
195 },
196 };
197 static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */
198
199 static __inline uint32_t
200 RD4(struct npe_softc *sc, bus_size_t off)
201 {
202 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
203 }
204
205 static __inline void
206 WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
207 {
208 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
209 }
210
211 #define NPE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
212 #define NPE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
213 #define NPE_LOCK_INIT(_sc) \
214 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
215 MTX_NETWORK_LOCK, MTX_DEF)
216 #define NPE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
217 #define NPE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
218 #define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
219
220 static devclass_t npe_devclass;
221
222 static int override_npeid(device_t, const char *resname, int *val);
223 static int npe_activate(device_t dev);
224 static void npe_deactivate(device_t dev);
225 static int npe_ifmedia_update(struct ifnet *ifp);
226 static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
227 static void npe_setmac(struct npe_softc *sc, u_char *eaddr);
228 static void npe_getmac(struct npe_softc *sc, u_char *eaddr);
229 static void npe_txdone(int qid, void *arg);
230 static int npe_rxbuf_init(struct npe_softc *, struct npebuf *,
231 struct mbuf *);
232 static int npe_rxdone(int qid, void *arg);
233 static void npeinit(void *);
234 static void npestart_locked(struct ifnet *);
235 static void npestart(struct ifnet *);
236 static void npestop(struct npe_softc *);
237 static void npewatchdog(struct npe_softc *);
238 static int npeioctl(struct ifnet * ifp, u_long, caddr_t);
239
240 static int npe_setrxqosentry(struct npe_softc *, int classix,
241 int trafclass, int qid);
242 static int npe_setportaddress(struct npe_softc *, const uint8_t mac[]);
243 static int npe_setfirewallmode(struct npe_softc *, int onoff);
244 static int npe_updatestats(struct npe_softc *);
245 #if 0
246 static int npe_getstats(struct npe_softc *);
247 static uint32_t npe_getimageid(struct npe_softc *);
248 static int npe_setloopback(struct npe_softc *, int ena);
249 #endif
250
251 /* NB: all tx done processing goes through one queue */
252 static int tx_doneqid = -1;
253
254 static SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0,
255 "IXP4XX NPE driver parameters");
256
257 static int npe_debug = 0;
258 SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RWTUN, &npe_debug,
259 0, "IXP4XX NPE network interface debug msgs");
260 #define DPRINTF(sc, fmt, ...) do { \
261 if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__); \
262 } while (0)
263 #define DPRINTFn(n, sc, fmt, ...) do { \
264 if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
265 } while (0)
266 static int npe_tickinterval = 3; /* npe_tick frequency (secs) */
267 SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RDTUN, &npe_tickinterval,
268 0, "periodic work interval (secs)");
269
270 static int npe_rxbuf = 64; /* # rx buffers to allocate */
271 SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RDTUN, &npe_rxbuf,
272 0, "rx buffers allocated");
273 static int npe_txbuf = 128; /* # tx buffers to allocate */
274 SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RDTUN, &npe_txbuf,
275 0, "tx buffers allocated");
276
277 static int
278 unit2npeid(int unit)
279 {
280 static const int npeidmap[2][3] = {
281 /* on 425 A is for HSS, B & C are for Ethernet */
282 { NPE_B, NPE_C, -1 }, /* IXP425 */
283 /* 435 only has A & C, order C then A */
284 { NPE_C, NPE_A, -1 }, /* IXP435 */
285 };
286 /* XXX check feature register instead */
287 return (unit < 3 ? npeidmap[
288 (cpu_ident() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1);
289 }
290
291 static int
292 npe_probe(device_t dev)
293 {
294 static const char *desc[NPE_MAX] = {
295 [NPE_A] = "IXP NPE-A",
296 [NPE_B] = "IXP NPE-B",
297 [NPE_C] = "IXP NPE-C"
298 };
299 int unit = device_get_unit(dev);
300 int npeid;
301
302 if (unit > 2 ||
303 (ixp4xx_read_feature_bits() &
304 (unit == 0 ? EXP_FCTRL_ETH0 : EXP_FCTRL_ETH1)) == 0)
305 return EINVAL;
306
307 npeid = -1;
308 if (!override_npeid(dev, "npeid", &npeid))
309 npeid = unit2npeid(unit);
310 if (npeid == -1) {
311 device_printf(dev, "unit %d not supported\n", unit);
312 return EINVAL;
313 }
314 device_set_desc(dev, desc[npeid]);
315 return 0;
316 }
317
318 static int
319 npe_attach(device_t dev)
320 {
321 struct npe_softc *sc = device_get_softc(dev);
322 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
323 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
324 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
325 struct ifnet *ifp;
326 int error;
327 u_char eaddr[6];
328
329 sc->sc_dev = dev;
330 sc->sc_iot = sa->sc_iot;
331 NPE_LOCK_INIT(sc);
332 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
333 sc->sc_debug = npe_debug;
334 sc->sc_tickinterval = npe_tickinterval;
335
336 ifp = if_alloc(IFT_ETHER);
337 if (ifp == NULL) {
338 device_printf(dev, "cannot allocate ifnet\n");
339 error = EIO; /* XXX */
340 goto out;
341 }
342 /* NB: must be setup prior to invoking mii code */
343 sc->sc_ifp = ifp;
344
345 error = npe_activate(dev);
346 if (error) {
347 device_printf(dev, "cannot activate npe\n");
348 goto out;
349 }
350
351 npe_getmac(sc, eaddr);
352
353 ifp->if_softc = sc;
354 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
355 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
356 ifp->if_start = npestart;
357 ifp->if_ioctl = npeioctl;
358 ifp->if_init = npeinit;
359 IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
360 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
361 IFQ_SET_READY(&ifp->if_snd);
362 ifp->if_linkmib = &sc->mibdata;
363 ifp->if_linkmiblen = sizeof(sc->mibdata);
364 sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
365 /* device supports oversided vlan frames */
366 ifp->if_capabilities |= IFCAP_VLAN_MTU;
367 ifp->if_capenable = ifp->if_capabilities;
368 #ifdef DEVICE_POLLING
369 ifp->if_capabilities |= IFCAP_POLLING;
370 #endif
371
372 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
373 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
374 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
375 CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
376 SYSCTL_ADD_STRUCT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats",
377 CTLFLAG_RD, &sc->sc_totals, npestats, "onboard stats");
378
379 ether_ifattach(ifp, eaddr);
380 return 0;
381 out:
382 if (ifp != NULL)
383 if_free(ifp);
384 NPE_LOCK_DESTROY(sc);
385 npe_deactivate(dev);
386 return error;
387 }
388
389 static int
390 npe_detach(device_t dev)
391 {
392 struct npe_softc *sc = device_get_softc(dev);
393 struct ifnet *ifp = sc->sc_ifp;
394
395 #ifdef DEVICE_POLLING
396 if (ifp->if_capenable & IFCAP_POLLING)
397 ether_poll_deregister(ifp);
398 #endif
399 npestop(sc);
400 if (ifp != NULL) {
401 ether_ifdetach(ifp);
402 if_free(ifp);
403 }
404 NPE_LOCK_DESTROY(sc);
405 npe_deactivate(dev);
406 return 0;
407 }
408
409 /*
410 * Compute and install the multicast filter.
411 */
412 static void
413 npe_setmcast(struct npe_softc *sc)
414 {
415 struct ifnet *ifp = sc->sc_ifp;
416 uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
417 int i;
418
419 if (ifp->if_flags & IFF_PROMISC) {
420 memset(mask, 0, ETHER_ADDR_LEN);
421 memset(addr, 0, ETHER_ADDR_LEN);
422 } else if (ifp->if_flags & IFF_ALLMULTI) {
423 static const uint8_t allmulti[ETHER_ADDR_LEN] =
424 { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
425 memcpy(mask, allmulti, ETHER_ADDR_LEN);
426 memcpy(addr, allmulti, ETHER_ADDR_LEN);
427 } else {
428 uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
429 struct ifmultiaddr *ifma;
430 const uint8_t *mac;
431
432 memset(clr, 0, ETHER_ADDR_LEN);
433 memset(set, 0xff, ETHER_ADDR_LEN);
434
435 if_maddr_rlock(ifp);
436 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
437 if (ifma->ifma_addr->sa_family != AF_LINK)
438 continue;
439 mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
440 for (i = 0; i < ETHER_ADDR_LEN; i++) {
441 clr[i] |= mac[i];
442 set[i] &= mac[i];
443 }
444 }
445 if_maddr_runlock(ifp);
446
447 for (i = 0; i < ETHER_ADDR_LEN; i++) {
448 mask[i] = set[i] | ~clr[i];
449 addr[i] = set[i];
450 }
451 }
452
453 /*
454 * Write the mask and address registers.
455 */
456 for (i = 0; i < ETHER_ADDR_LEN; i++) {
457 WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
458 WR4(sc, NPE_MAC_ADDR(i), addr[i]);
459 }
460 }
461
462 static void
463 npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
464 {
465 struct npe_softc *sc;
466
467 if (error != 0)
468 return;
469 sc = (struct npe_softc *)arg;
470 sc->buf_phys = segs[0].ds_addr;
471 }
472
473 static int
474 npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
475 const char *name, int nbuf, int maxseg)
476 {
477 int error, i;
478
479 memset(dma, 0, sizeof(*dma));
480
481 dma->name = name;
482 dma->nbuf = nbuf;
483
484 /* DMA tag for mapped mbufs */
485 error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
486 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
487 MCLBYTES, maxseg, MCLBYTES, 0,
488 busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
489 if (error != 0) {
490 device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
491 "error %u\n", dma->name, error);
492 return error;
493 }
494
495 /* DMA tag and map for the NPE buffers */
496 error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
497 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
498 nbuf * sizeof(struct npehwbuf), 1,
499 nbuf * sizeof(struct npehwbuf), 0,
500 busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
501 if (error != 0) {
502 device_printf(sc->sc_dev,
503 "unable to create %s npebuf dma tag, error %u\n",
504 dma->name, error);
505 return error;
506 }
507 if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
508 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
509 &dma->buf_map) != 0) {
510 device_printf(sc->sc_dev,
511 "unable to allocate memory for %s h/w buffers, error %u\n",
512 dma->name, error);
513 return error;
514 }
515 /* XXX M_TEMP */
516 dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
517 if (dma->buf == NULL) {
518 device_printf(sc->sc_dev,
519 "unable to allocate memory for %s s/w buffers\n",
520 dma->name);
521 return error;
522 }
523 if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
524 dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
525 device_printf(sc->sc_dev,
526 "unable to map memory for %s h/w buffers, error %u\n",
527 dma->name, error);
528 return error;
529 }
530 dma->buf_phys = sc->buf_phys;
531 for (i = 0; i < dma->nbuf; i++) {
532 struct npebuf *npe = &dma->buf[i];
533 struct npehwbuf *hw = &dma->hwbuf[i];
534
535 /* calculate offset to shared area */
536 npe->ix_neaddr = dma->buf_phys +
537 ((uintptr_t)hw - (uintptr_t)dma->hwbuf);
538 KASSERT((npe->ix_neaddr & 0x1f) == 0,
539 ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
540 error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
541 &npe->ix_map);
542 if (error != 0) {
543 device_printf(sc->sc_dev,
544 "unable to create dmamap for %s buffer %u, "
545 "error %u\n", dma->name, i, error);
546 return error;
547 }
548 npe->ix_hw = hw;
549 }
550 bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
551 return 0;
552 }
553
554 static void
555 npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
556 {
557 int i;
558
559 if (dma->hwbuf != NULL) {
560 for (i = 0; i < dma->nbuf; i++) {
561 struct npebuf *npe = &dma->buf[i];
562 bus_dmamap_destroy(dma->mtag, npe->ix_map);
563 }
564 bus_dmamap_unload(dma->buf_tag, dma->buf_map);
565 bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
566 }
567 if (dma->buf != NULL)
568 free(dma->buf, M_TEMP);
569 if (dma->buf_tag)
570 bus_dma_tag_destroy(dma->buf_tag);
571 if (dma->mtag)
572 bus_dma_tag_destroy(dma->mtag);
573 memset(dma, 0, sizeof(*dma));
574 }
575
576 static int
577 override_addr(device_t dev, const char *resname, int *base)
578 {
579 int unit = device_get_unit(dev);
580 const char *resval;
581
582 /* XXX warn for wrong hint type */
583 if (resource_string_value("npe", unit, resname, &resval) != 0)
584 return 0;
585 switch (resval[0]) {
586 case 'A':
587 *base = IXP435_MAC_A_HWBASE;
588 break;
589 case 'B':
590 *base = IXP425_MAC_B_HWBASE;
591 break;
592 case 'C':
593 *base = IXP425_MAC_C_HWBASE;
594 break;
595 default:
596 device_printf(dev, "Warning, bad value %s for "
597 "npe.%d.%s ignored\n", resval, unit, resname);
598 return 0;
599 }
600 if (bootverbose)
601 device_printf(dev, "using npe.%d.%s=%s override\n",
602 unit, resname, resval);
603 return 1;
604 }
605
606 static int
607 override_npeid(device_t dev, const char *resname, int *npeid)
608 {
609 int unit = device_get_unit(dev);
610 const char *resval;
611
612 /* XXX warn for wrong hint type */
613 if (resource_string_value("npe", unit, resname, &resval) != 0)
614 return 0;
615 switch (resval[0]) {
616 case 'A': *npeid = NPE_A; break;
617 case 'B': *npeid = NPE_B; break;
618 case 'C': *npeid = NPE_C; break;
619 default:
620 device_printf(dev, "Warning, bad value %s for "
621 "npe.%d.%s ignored\n", resval, unit, resname);
622 return 0;
623 }
624 if (bootverbose)
625 device_printf(dev, "using npe.%d.%s=%s override\n",
626 unit, resname, resval);
627 return 1;
628 }
629
630 static int
631 override_unit(device_t dev, const char *resname, int *val, int min, int max)
632 {
633 int unit = device_get_unit(dev);
634 int resval;
635
636 if (resource_int_value("npe", unit, resname, &resval) != 0)
637 return 0;
638 if (!(min <= resval && resval <= max)) {
639 device_printf(dev, "Warning, bad value %d for npe.%d.%s "
640 "ignored (value must be [%d-%d])\n", resval, unit,
641 resname, min, max);
642 return 0;
643 }
644 if (bootverbose)
645 device_printf(dev, "using npe.%d.%s=%d override\n",
646 unit, resname, resval);
647 *val = resval;
648 return 1;
649 }
650
651 static void
652 npe_mac_reset(struct npe_softc *sc)
653 {
654 /*
655 * Reset MAC core.
656 */
657 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
658 DELAY(NPE_MAC_RESET_DELAY);
659 /* configure MAC to generate MDC clock */
660 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
661 }
662
663 static int
664 npe_activate(device_t dev)
665 {
666 struct npe_softc *sc = device_get_softc(dev);
667 int error, i, macbase, miibase, phy;
668
669 /*
670 * Setup NEP ID, MAC, and MII bindings. We allow override
671 * via hints to handle unexpected board configs.
672 */
673 if (!override_npeid(dev, "npeid", &sc->sc_npeid))
674 sc->sc_npeid = unit2npeid(device_get_unit(dev));
675 sc->sc_npe = ixpnpe_attach(dev, sc->sc_npeid);
676 if (sc->sc_npe == NULL) {
677 device_printf(dev, "cannot attach ixpnpe\n");
678 return EIO; /* XXX */
679 }
680
681 /* MAC */
682 if (!override_addr(dev, "mac", &macbase))
683 macbase = npeconfig[sc->sc_npeid].macbase;
684 if (bootverbose)
685 device_printf(sc->sc_dev, "MAC at 0x%x\n", macbase);
686 if (bus_space_map(sc->sc_iot, macbase, IXP425_REG_SIZE, 0, &sc->sc_ioh)) {
687 device_printf(dev, "cannot map mac registers 0x%x:0x%x\n",
688 macbase, IXP425_REG_SIZE);
689 return ENOMEM;
690 }
691
692 /* PHY */
693 if (!override_unit(dev, "phy", &phy, 0, MII_NPHY - 1))
694 phy = npeconfig[sc->sc_npeid].phy;
695 if (!override_addr(dev, "mii", &miibase))
696 miibase = npeconfig[sc->sc_npeid].miibase;
697 if (bootverbose)
698 device_printf(sc->sc_dev, "MII at 0x%x\n", miibase);
699 if (miibase != macbase) {
700 /*
701 * PHY is mapped through a different MAC, setup an
702 * additional mapping for frobbing the PHY registers.
703 */
704 if (bus_space_map(sc->sc_iot, miibase, IXP425_REG_SIZE, 0, &sc->sc_miih)) {
705 device_printf(dev,
706 "cannot map MII registers 0x%x:0x%x\n",
707 miibase, IXP425_REG_SIZE);
708 return ENOMEM;
709 }
710 } else
711 sc->sc_miih = sc->sc_ioh;
712
713 /*
714 * Load NPE firmware and start it running.
715 */
716 error = ixpnpe_init(sc->sc_npe);
717 if (error != 0) {
718 device_printf(dev, "cannot init NPE (error %d)\n", error);
719 return error;
720 }
721
722 /* attach PHY */
723 error = mii_attach(dev, &sc->sc_mii, sc->sc_ifp, npe_ifmedia_update,
724 npe_ifmedia_status, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
725 if (error != 0) {
726 device_printf(dev, "attaching PHYs failed\n");
727 return error;
728 }
729
730 error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
731 if (error != 0)
732 return error;
733 error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
734 if (error != 0)
735 return error;
736
737 /* setup statistics block */
738 error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
739 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
740 sizeof(struct npestats), 1, sizeof(struct npestats), 0,
741 busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
742 if (error != 0) {
743 device_printf(sc->sc_dev, "unable to create stats tag, "
744 "error %u\n", error);
745 return error;
746 }
747 if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
748 BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
749 device_printf(sc->sc_dev,
750 "unable to allocate memory for stats block, error %u\n",
751 error);
752 return error;
753 }
754 if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
755 sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
756 device_printf(sc->sc_dev,
757 "unable to load memory for stats block, error %u\n",
758 error);
759 return error;
760 }
761 sc->sc_stats_phys = sc->buf_phys;
762
763 /*
764 * Setup h/w rx/tx queues. There are four q's:
765 * rx inbound q of rx'd frames
766 * rx_free pool of ixpbuf's for receiving frames
767 * tx outbound q of frames to send
768 * tx_done q of tx frames that have been processed
769 *
770 * The NPE handles the actual tx/rx process and the q manager
771 * handles the queues. The driver just writes entries to the
772 * q manager mailbox's and gets callbacks when there are rx'd
773 * frames to process or tx'd frames to reap. These callbacks
774 * are controlled by the q configurations; e.g. we get a
775 * callback when tx_done has 2 or more frames to process and
776 * when the rx q has at least one frame. These setings can
777 * changed at the time the q is configured.
778 */
779 sc->rx_qid = npeconfig[sc->sc_npeid].rx_qid;
780 ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0, 1,
781 IX_QMGR_Q_SOURCE_ID_NOT_E, (qconfig_hand_t *)npe_rxdone, sc);
782 sc->rx_freeqid = npeconfig[sc->sc_npeid].rx_freeqid;
783 ixpqmgr_qconfig(sc->rx_freeqid, npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
784 /*
785 * Setup the NPE to direct all traffic to rx_qid.
786 * When QoS is enabled in the firmware there are
787 * 8 traffic classes; otherwise just 4.
788 */
789 for (i = 0; i < 8; i++)
790 npe_setrxqosentry(sc, i, 0, sc->rx_qid);
791
792 /* disable firewall mode just in case (should be off) */
793 npe_setfirewallmode(sc, 0);
794
795 sc->tx_qid = npeconfig[sc->sc_npeid].tx_qid;
796 sc->tx_doneqid = npeconfig[sc->sc_npeid].tx_doneqid;
797 ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
798 if (tx_doneqid == -1) {
799 ixpqmgr_qconfig(sc->tx_doneqid, npe_txbuf, 0, 2,
800 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
801 tx_doneqid = sc->tx_doneqid;
802 }
803
804 KASSERT(npes[sc->sc_npeid] == NULL,
805 ("npe %u already setup", sc->sc_npeid));
806 npes[sc->sc_npeid] = sc;
807
808 return 0;
809 }
810
811 static void
812 npe_deactivate(device_t dev)
813 {
814 struct npe_softc *sc = device_get_softc(dev);
815
816 npes[sc->sc_npeid] = NULL;
817
818 /* XXX disable q's */
819 if (sc->sc_npe != NULL) {
820 ixpnpe_stop(sc->sc_npe);
821 ixpnpe_detach(sc->sc_npe);
822 }
823 if (sc->sc_stats != NULL) {
824 bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
825 bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
826 sc->sc_stats_map);
827 }
828 if (sc->sc_stats_tag != NULL)
829 bus_dma_tag_destroy(sc->sc_stats_tag);
830 npe_dma_destroy(sc, &sc->txdma);
831 npe_dma_destroy(sc, &sc->rxdma);
832 bus_generic_detach(sc->sc_dev);
833 if (sc->sc_mii != NULL)
834 device_delete_child(sc->sc_dev, sc->sc_mii);
835 }
836
837 /*
838 * Change media according to request.
839 */
840 static int
841 npe_ifmedia_update(struct ifnet *ifp)
842 {
843 struct npe_softc *sc = ifp->if_softc;
844 struct mii_data *mii;
845
846 mii = device_get_softc(sc->sc_mii);
847 NPE_LOCK(sc);
848 mii_mediachg(mii);
849 /* XXX push state ourself? */
850 NPE_UNLOCK(sc);
851 return (0);
852 }
853
854 /*
855 * Notify the world which media we're using.
856 */
857 static void
858 npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
859 {
860 struct npe_softc *sc = ifp->if_softc;
861 struct mii_data *mii;
862
863 mii = device_get_softc(sc->sc_mii);
864 NPE_LOCK(sc);
865 mii_pollstat(mii);
866 ifmr->ifm_active = mii->mii_media_active;
867 ifmr->ifm_status = mii->mii_media_status;
868 NPE_UNLOCK(sc);
869 }
870
871 static void
872 npe_addstats(struct npe_softc *sc)
873 {
874 #define NPEADD(x) sc->sc_totals.x += be32toh(ns->x)
875 #define MIBADD(x) do { sc->mibdata.x += be32toh(ns->x); NPEADD(x); } while (0)
876 struct ifnet *ifp = sc->sc_ifp;
877 struct npestats *ns = sc->sc_stats;
878
879 MIBADD(dot3StatsAlignmentErrors);
880 MIBADD(dot3StatsFCSErrors);
881 MIBADD(dot3StatsInternalMacReceiveErrors);
882 NPEADD(RxOverrunDiscards);
883 NPEADD(RxLearnedEntryDiscards);
884 NPEADD(RxLargeFramesDiscards);
885 NPEADD(RxSTPBlockedDiscards);
886 NPEADD(RxVLANTypeFilterDiscards);
887 NPEADD(RxVLANIdFilterDiscards);
888 NPEADD(RxInvalidSourceDiscards);
889 NPEADD(RxBlackListDiscards);
890 NPEADD(RxWhiteListDiscards);
891 NPEADD(RxUnderflowEntryDiscards);
892 MIBADD(dot3StatsSingleCollisionFrames);
893 MIBADD(dot3StatsMultipleCollisionFrames);
894 MIBADD(dot3StatsDeferredTransmissions);
895 MIBADD(dot3StatsLateCollisions);
896 MIBADD(dot3StatsExcessiveCollisions);
897 MIBADD(dot3StatsInternalMacTransmitErrors);
898 MIBADD(dot3StatsCarrierSenseErrors);
899 NPEADD(TxLargeFrameDiscards);
900 NPEADD(TxVLANIdFilterDiscards);
901
902 sc->mibdata.dot3StatsFrameTooLongs +=
903 be32toh(ns->RxLargeFramesDiscards)
904 + be32toh(ns->TxLargeFrameDiscards);
905 sc->mibdata.dot3StatsMissedFrames +=
906 be32toh(ns->RxOverrunDiscards)
907 + be32toh(ns->RxUnderflowEntryDiscards);
908
909 if_inc_counter(ifp, IFCOUNTER_OERRORS,
910 be32toh(ns->dot3StatsInternalMacTransmitErrors) +
911 be32toh(ns->dot3StatsCarrierSenseErrors) +
912 be32toh(ns->TxVLANIdFilterDiscards));
913 if_inc_counter(ifp, IFCOUNTER_IERRORS,
914 be32toh(ns->dot3StatsFCSErrors) +
915 be32toh(ns->dot3StatsInternalMacReceiveErrors) +
916 be32toh(ns->RxOverrunDiscards) +
917 be32toh(ns->RxUnderflowEntryDiscards));
918 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
919 be32toh(ns->dot3StatsSingleCollisionFrames) +
920 be32toh(ns->dot3StatsMultipleCollisionFrames));
921 #undef NPEADD
922 #undef MIBADD
923 }
924
925 static void
926 npe_tick(void *xsc)
927 {
928 #define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
929 struct npe_softc *sc = xsc;
930 struct mii_data *mii = device_get_softc(sc->sc_mii);
931 uint32_t msg[2];
932
933 NPE_ASSERT_LOCKED(sc);
934
935 /*
936 * NB: to avoid sleeping with the softc lock held we
937 * split the NPE msg processing into two parts. The
938 * request for statistics is sent w/o waiting for a
939 * reply and then on the next tick we retrieve the
940 * results. This works because npe_tick is the only
941 * code that talks via the mailbox's (except at setup).
942 * This likely can be handled better.
943 */
944 if (ixpnpe_recvmsg_async(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
945 bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
946 BUS_DMASYNC_POSTREAD);
947 npe_addstats(sc);
948 }
949 npe_updatestats(sc);
950 mii_tick(mii);
951
952 npewatchdog(sc);
953
954 /* schedule next poll */
955 callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
956 #undef ACK
957 }
958
959 static void
960 npe_setmac(struct npe_softc *sc, u_char *eaddr)
961 {
962 WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
963 WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
964 WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
965 WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
966 WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
967 WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
968 }
969
970 static void
971 npe_getmac(struct npe_softc *sc, u_char *eaddr)
972 {
973 /* NB: the unicast address appears to be loaded from EEPROM on reset */
974 eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
975 eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
976 eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
977 eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
978 eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
979 eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
980 }
981
982 struct txdone {
983 struct npebuf *head;
984 struct npebuf **tail;
985 int count;
986 };
987
988 static __inline void
989 npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
990 {
991 struct ifnet *ifp = sc->sc_ifp;
992
993 NPE_LOCK(sc);
994 *td->tail = sc->tx_free;
995 sc->tx_free = td->head;
996 /*
997 * We're no longer busy, so clear the busy flag and call the
998 * start routine to xmit more packets.
999 */
1000 if_inc_counter(ifp, IFCOUNTER_OPACKETS, td->count);
1001 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1002 sc->npe_watchdog_timer = 0;
1003 npestart_locked(ifp);
1004 NPE_UNLOCK(sc);
1005 }
1006
1007 /*
1008 * Q manager callback on tx done queue. Reap mbufs
1009 * and return tx buffers to the free list. Finally
1010 * restart output. Note the microcode has only one
1011 * txdone q wired into it so we must use the NPE ID
1012 * returned with each npehwbuf to decide where to
1013 * send buffers.
1014 */
1015 static void
1016 npe_txdone(int qid, void *arg)
1017 {
1018 #define P2V(a, dma) \
1019 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1020 struct npe_softc *sc0 = arg;
1021 struct npe_softc *sc;
1022 struct npebuf *npe;
1023 struct txdone *td, q[NPE_MAX];
1024 uint32_t entry;
1025
1026 q[NPE_A].tail = &q[NPE_A].head; q[NPE_A].count = 0;
1027 q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
1028 q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
1029 /* XXX max # at a time? */
1030 while (ixpqmgr_qread(qid, &entry) == 0) {
1031 DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
1032 __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
1033
1034 sc = npes[NPE_QM_Q_NPE(entry)];
1035 npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
1036 m_freem(npe->ix_m);
1037 npe->ix_m = NULL;
1038
1039 td = &q[NPE_QM_Q_NPE(entry)];
1040 *td->tail = npe;
1041 td->tail = &npe->ix_next;
1042 td->count++;
1043 }
1044
1045 if (q[NPE_A].count)
1046 npe_txdone_finish(npes[NPE_A], &q[NPE_A]);
1047 if (q[NPE_B].count)
1048 npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
1049 if (q[NPE_C].count)
1050 npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
1051 #undef P2V
1052 }
1053
1054 static int
1055 npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
1056 {
1057 bus_dma_segment_t segs[1];
1058 struct npedma *dma = &sc->rxdma;
1059 struct npehwbuf *hw;
1060 int error, nseg;
1061
1062 if (m == NULL) {
1063 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1064 if (m == NULL)
1065 return ENOBUFS;
1066 }
1067 KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
1068 ("ext_size %d", m->m_ext.ext_size));
1069 m->m_pkthdr.len = m->m_len = 1536;
1070 /* backload payload and align ip hdr */
1071 m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
1072 bus_dmamap_unload(dma->mtag, npe->ix_map);
1073 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
1074 segs, &nseg, 0);
1075 if (error != 0) {
1076 m_freem(m);
1077 return error;
1078 }
1079 hw = npe->ix_hw;
1080 hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
1081 /* NB: NPE requires length be a multiple of 64 */
1082 /* NB: buffer length is shifted in word */
1083 hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
1084 hw->ix_ne[0].next = 0;
1085 bus_dmamap_sync(dma->buf_tag, dma->buf_map,
1086 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1087 npe->ix_m = m;
1088 /* Flush the memory in the mbuf */
1089 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
1090 return 0;
1091 }
1092
1093 /*
1094 * RX q processing for a specific NPE. Claim entries
1095 * from the hardware queue and pass the frames up the
1096 * stack. Pass the rx buffers to the free list.
1097 */
1098 static int
1099 npe_rxdone(int qid, void *arg)
1100 {
1101 #define P2V(a, dma) \
1102 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1103 struct npe_softc *sc = arg;
1104 struct npedma *dma = &sc->rxdma;
1105 uint32_t entry;
1106 int rx_npkts = 0;
1107
1108 while (ixpqmgr_qread(qid, &entry) == 0) {
1109 struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
1110 struct mbuf *m;
1111
1112 bus_dmamap_sync(dma->buf_tag, dma->buf_map,
1113 BUS_DMASYNC_POSTREAD);
1114 DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
1115 __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
1116 /*
1117 * Allocate a new mbuf to replenish the rx buffer.
1118 * If doing so fails we drop the rx'd frame so we
1119 * can reuse the previous mbuf. When we're able to
1120 * allocate a new mbuf dispatch the mbuf w/ rx'd
1121 * data up the stack and replace it with the newly
1122 * allocated one.
1123 */
1124 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1125 if (m != NULL) {
1126 struct mbuf *mrx = npe->ix_m;
1127 struct npehwbuf *hw = npe->ix_hw;
1128 struct ifnet *ifp = sc->sc_ifp;
1129
1130 /* Flush mbuf memory for rx'd data */
1131 bus_dmamap_sync(dma->mtag, npe->ix_map,
1132 BUS_DMASYNC_POSTREAD);
1133
1134 /* set m_len etc. per rx frame size */
1135 mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
1136 mrx->m_pkthdr.len = mrx->m_len;
1137 mrx->m_pkthdr.rcvif = ifp;
1138
1139 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1140 ifp->if_input(ifp, mrx);
1141 rx_npkts++;
1142 } else {
1143 /* discard frame and re-use mbuf */
1144 m = npe->ix_m;
1145 }
1146 if (npe_rxbuf_init(sc, npe, m) == 0) {
1147 /* return npe buf to rx free list */
1148 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1149 } else {
1150 /* XXX should not happen */
1151 }
1152 }
1153 return rx_npkts;
1154 #undef P2V
1155 }
1156
1157 #ifdef DEVICE_POLLING
1158 static int
1159 npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1160 {
1161 struct npe_softc *sc = ifp->if_softc;
1162 int rx_npkts = 0;
1163
1164 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1165 rx_npkts = npe_rxdone(sc->rx_qid, sc);
1166 npe_txdone(sc->tx_doneqid, sc); /* XXX polls both NPE's */
1167 }
1168 return rx_npkts;
1169 }
1170 #endif /* DEVICE_POLLING */
1171
1172 static void
1173 npe_startxmit(struct npe_softc *sc)
1174 {
1175 struct npedma *dma = &sc->txdma;
1176 int i;
1177
1178 NPE_ASSERT_LOCKED(sc);
1179 sc->tx_free = NULL;
1180 for (i = 0; i < dma->nbuf; i++) {
1181 struct npebuf *npe = &dma->buf[i];
1182 if (npe->ix_m != NULL) {
1183 /* NB: should not happen */
1184 device_printf(sc->sc_dev,
1185 "%s: free mbuf at entry %u\n", __func__, i);
1186 m_freem(npe->ix_m);
1187 }
1188 npe->ix_m = NULL;
1189 npe->ix_next = sc->tx_free;
1190 sc->tx_free = npe;
1191 }
1192 }
1193
1194 static void
1195 npe_startrecv(struct npe_softc *sc)
1196 {
1197 struct npedma *dma = &sc->rxdma;
1198 struct npebuf *npe;
1199 int i;
1200
1201 NPE_ASSERT_LOCKED(sc);
1202 for (i = 0; i < dma->nbuf; i++) {
1203 npe = &dma->buf[i];
1204 npe_rxbuf_init(sc, npe, npe->ix_m);
1205 /* set npe buf on rx free list */
1206 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1207 }
1208 }
1209
1210 /*
1211 * Reset and initialize the chip
1212 */
1213 static void
1214 npeinit_locked(void *xsc)
1215 {
1216 struct npe_softc *sc = xsc;
1217 struct ifnet *ifp = sc->sc_ifp;
1218
1219 NPE_ASSERT_LOCKED(sc);
1220 if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
1221
1222 /*
1223 * Reset MAC core.
1224 */
1225 npe_mac_reset(sc);
1226
1227 /* disable transmitter and reciver in the MAC */
1228 WR4(sc, NPE_MAC_RX_CNTRL1,
1229 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1230 WR4(sc, NPE_MAC_TX_CNTRL1,
1231 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1232
1233 /*
1234 * Set the MAC core registers.
1235 */
1236 WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */
1237 WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */
1238 WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */
1239 /* thresholds determined by NPE firmware FS */
1240 WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12);
1241 WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30);
1242 WR4(sc, NPE_MAC_BUF_SIZE_TX, 0x8); /* tx fifo threshold (bytes) */
1243 WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */
1244 WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/
1245 WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */
1246 WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */
1247 WR4(sc, NPE_MAC_SLOT_TIME, 0x80); /* assumes MII mode */
1248
1249 WR4(sc, NPE_MAC_TX_CNTRL1,
1250 NPE_TX_CNTRL1_RETRY /* retry failed xmits */
1251 | NPE_TX_CNTRL1_FCS_EN /* append FCS */
1252 | NPE_TX_CNTRL1_2DEFER /* 2-part deferal */
1253 | NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */
1254 /* XXX pad strip? */
1255 /* ena pause frame handling */
1256 WR4(sc, NPE_MAC_RX_CNTRL1, NPE_RX_CNTRL1_PAUSE_EN);
1257 WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1258
1259 npe_setmac(sc, IF_LLADDR(ifp));
1260 npe_setportaddress(sc, IF_LLADDR(ifp));
1261 npe_setmcast(sc);
1262
1263 npe_startxmit(sc);
1264 npe_startrecv(sc);
1265
1266 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1267 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1268 sc->npe_watchdog_timer = 0; /* just in case */
1269
1270 /* enable transmitter and reciver in the MAC */
1271 WR4(sc, NPE_MAC_RX_CNTRL1,
1272 RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1273 WR4(sc, NPE_MAC_TX_CNTRL1,
1274 RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1275
1276 callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
1277 }
1278
1279 static void
1280 npeinit(void *xsc)
1281 {
1282 struct npe_softc *sc = xsc;
1283 NPE_LOCK(sc);
1284 npeinit_locked(sc);
1285 NPE_UNLOCK(sc);
1286 }
1287
1288 /*
1289 * Dequeue packets and place on the h/w transmit queue.
1290 */
1291 static void
1292 npestart_locked(struct ifnet *ifp)
1293 {
1294 struct npe_softc *sc = ifp->if_softc;
1295 struct npebuf *npe;
1296 struct npehwbuf *hw;
1297 struct mbuf *m, *n;
1298 struct npedma *dma = &sc->txdma;
1299 bus_dma_segment_t segs[NPE_MAXSEG];
1300 int nseg, len, error, i;
1301 uint32_t next;
1302
1303 NPE_ASSERT_LOCKED(sc);
1304 /* XXX can this happen? */
1305 if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1306 return;
1307
1308 while (sc->tx_free != NULL) {
1309 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1310 if (m == NULL) {
1311 /* XXX? */
1312 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1313 return;
1314 }
1315 npe = sc->tx_free;
1316 bus_dmamap_unload(dma->mtag, npe->ix_map);
1317 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1318 m, segs, &nseg, 0);
1319 if (error == EFBIG) {
1320 n = m_collapse(m, M_NOWAIT, NPE_MAXSEG);
1321 if (n == NULL) {
1322 if_printf(ifp, "%s: too many fragments %u\n",
1323 __func__, nseg);
1324 m_freem(m);
1325 return; /* XXX? */
1326 }
1327 m = n;
1328 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1329 m, segs, &nseg, 0);
1330 }
1331 if (error != 0 || nseg == 0) {
1332 if_printf(ifp, "%s: error %u nseg %u\n",
1333 __func__, error, nseg);
1334 m_freem(m);
1335 return; /* XXX? */
1336 }
1337 sc->tx_free = npe->ix_next;
1338
1339 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
1340
1341 /*
1342 * Tap off here if there is a bpf listener.
1343 */
1344 BPF_MTAP(ifp, m);
1345
1346 npe->ix_m = m;
1347 hw = npe->ix_hw;
1348 len = m->m_pkthdr.len;
1349 next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1350 for (i = 0; i < nseg; i++) {
1351 hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1352 hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1353 hw->ix_ne[i].next = htobe32(next);
1354
1355 len = 0; /* zero for segments > 1 */
1356 next += sizeof(hw->ix_ne[0]);
1357 }
1358 hw->ix_ne[i-1].next = 0; /* zero last in chain */
1359 bus_dmamap_sync(dma->buf_tag, dma->buf_map,
1360 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1361
1362 DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1363 __func__, sc->tx_qid, npe->ix_neaddr,
1364 hw->ix_ne[0].data, hw->ix_ne[0].len);
1365 /* stick it on the tx q */
1366 /* XXX add vlan priority */
1367 ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1368
1369 sc->npe_watchdog_timer = 5;
1370 }
1371 if (sc->tx_free == NULL)
1372 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1373 }
1374
1375 void
1376 npestart(struct ifnet *ifp)
1377 {
1378 struct npe_softc *sc = ifp->if_softc;
1379 NPE_LOCK(sc);
1380 npestart_locked(ifp);
1381 NPE_UNLOCK(sc);
1382 }
1383
1384 static void
1385 npe_stopxmit(struct npe_softc *sc)
1386 {
1387 struct npedma *dma = &sc->txdma;
1388 int i;
1389
1390 NPE_ASSERT_LOCKED(sc);
1391
1392 /* XXX qmgr */
1393 for (i = 0; i < dma->nbuf; i++) {
1394 struct npebuf *npe = &dma->buf[i];
1395
1396 if (npe->ix_m != NULL) {
1397 bus_dmamap_unload(dma->mtag, npe->ix_map);
1398 m_freem(npe->ix_m);
1399 npe->ix_m = NULL;
1400 }
1401 }
1402 }
1403
1404 static void
1405 npe_stoprecv(struct npe_softc *sc)
1406 {
1407 struct npedma *dma = &sc->rxdma;
1408 int i;
1409
1410 NPE_ASSERT_LOCKED(sc);
1411
1412 /* XXX qmgr */
1413 for (i = 0; i < dma->nbuf; i++) {
1414 struct npebuf *npe = &dma->buf[i];
1415
1416 if (npe->ix_m != NULL) {
1417 bus_dmamap_unload(dma->mtag, npe->ix_map);
1418 m_freem(npe->ix_m);
1419 npe->ix_m = NULL;
1420 }
1421 }
1422 }
1423
1424 /*
1425 * Turn off interrupts, and stop the nic.
1426 */
1427 void
1428 npestop(struct npe_softc *sc)
1429 {
1430 struct ifnet *ifp = sc->sc_ifp;
1431
1432 /* disable transmitter and reciver in the MAC */
1433 WR4(sc, NPE_MAC_RX_CNTRL1,
1434 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1435 WR4(sc, NPE_MAC_TX_CNTRL1,
1436 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1437
1438 sc->npe_watchdog_timer = 0;
1439 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1440
1441 callout_stop(&sc->tick_ch);
1442
1443 npe_stopxmit(sc);
1444 npe_stoprecv(sc);
1445 /* XXX go into loopback & drain q's? */
1446 /* XXX but beware of disabling tx above */
1447
1448 /*
1449 * The MAC core rx/tx disable may leave the MAC hardware in an
1450 * unpredictable state. A hw reset is executed before resetting
1451 * all the MAC parameters to a known value.
1452 */
1453 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1454 DELAY(NPE_MAC_RESET_DELAY);
1455 WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1456 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1457 }
1458
1459 void
1460 npewatchdog(struct npe_softc *sc)
1461 {
1462 NPE_ASSERT_LOCKED(sc);
1463
1464 if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
1465 return;
1466
1467 device_printf(sc->sc_dev, "watchdog timeout\n");
1468 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1469
1470 npeinit_locked(sc);
1471 }
1472
1473 static int
1474 npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1475 {
1476 struct npe_softc *sc = ifp->if_softc;
1477 struct mii_data *mii;
1478 struct ifreq *ifr = (struct ifreq *)data;
1479 int error = 0;
1480 #ifdef DEVICE_POLLING
1481 int mask;
1482 #endif
1483
1484 switch (cmd) {
1485 case SIOCSIFFLAGS:
1486 NPE_LOCK(sc);
1487 if ((ifp->if_flags & IFF_UP) == 0 &&
1488 ifp->if_drv_flags & IFF_DRV_RUNNING) {
1489 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1490 npestop(sc);
1491 } else {
1492 /* reinitialize card on any parameter change */
1493 npeinit_locked(sc);
1494 }
1495 NPE_UNLOCK(sc);
1496 break;
1497
1498 case SIOCADDMULTI:
1499 case SIOCDELMULTI:
1500 /* update multicast filter list. */
1501 NPE_LOCK(sc);
1502 npe_setmcast(sc);
1503 NPE_UNLOCK(sc);
1504 error = 0;
1505 break;
1506
1507 case SIOCSIFMEDIA:
1508 case SIOCGIFMEDIA:
1509 mii = device_get_softc(sc->sc_mii);
1510 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1511 break;
1512
1513 #ifdef DEVICE_POLLING
1514 case SIOCSIFCAP:
1515 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1516 if (mask & IFCAP_POLLING) {
1517 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1518 error = ether_poll_register(npe_poll, ifp);
1519 if (error)
1520 return error;
1521 NPE_LOCK(sc);
1522 /* disable callbacks XXX txdone is shared */
1523 ixpqmgr_notify_disable(sc->rx_qid);
1524 ixpqmgr_notify_disable(sc->tx_doneqid);
1525 ifp->if_capenable |= IFCAP_POLLING;
1526 NPE_UNLOCK(sc);
1527 } else {
1528 error = ether_poll_deregister(ifp);
1529 /* NB: always enable qmgr callbacks */
1530 NPE_LOCK(sc);
1531 /* enable qmgr callbacks */
1532 ixpqmgr_notify_enable(sc->rx_qid,
1533 IX_QMGR_Q_SOURCE_ID_NOT_E);
1534 ixpqmgr_notify_enable(sc->tx_doneqid,
1535 IX_QMGR_Q_SOURCE_ID_NOT_E);
1536 ifp->if_capenable &= ~IFCAP_POLLING;
1537 NPE_UNLOCK(sc);
1538 }
1539 }
1540 break;
1541 #endif
1542 default:
1543 error = ether_ioctl(ifp, cmd, data);
1544 break;
1545 }
1546 return error;
1547 }
1548
1549 /*
1550 * Setup a traffic class -> rx queue mapping.
1551 */
1552 static int
1553 npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1554 {
1555 uint32_t msg[2];
1556
1557 msg[0] = (NPE_SETRXQOSENTRY << 24) | (sc->sc_npeid << 20) | classix;
1558 msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1559 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1560 }
1561
1562 static int
1563 npe_setportaddress(struct npe_softc *sc, const uint8_t mac[ETHER_ADDR_LEN])
1564 {
1565 uint32_t msg[2];
1566
1567 msg[0] = (NPE_SETPORTADDRESS << 24)
1568 | (sc->sc_npeid << 20)
1569 | (mac[0] << 8)
1570 | (mac[1] << 0);
1571 msg[1] = (mac[2] << 24)
1572 | (mac[3] << 16)
1573 | (mac[4] << 8)
1574 | (mac[5] << 0);
1575 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1576 }
1577
1578 static int
1579 npe_setfirewallmode(struct npe_softc *sc, int onoff)
1580 {
1581 uint32_t msg[2];
1582
1583 /* XXX honor onoff */
1584 msg[0] = (NPE_SETFIREWALLMODE << 24) | (sc->sc_npeid << 20);
1585 msg[1] = 0;
1586 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1587 }
1588
1589 /*
1590 * Update and reset the statistics in the NPE.
1591 */
1592 static int
1593 npe_updatestats(struct npe_softc *sc)
1594 {
1595 uint32_t msg[2];
1596
1597 msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1598 msg[1] = sc->sc_stats_phys; /* physical address of stat block */
1599 return ixpnpe_sendmsg_async(sc->sc_npe, msg);
1600 }
1601
1602 #if 0
1603 /*
1604 * Get the current statistics block.
1605 */
1606 static int
1607 npe_getstats(struct npe_softc *sc)
1608 {
1609 uint32_t msg[2];
1610
1611 msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1612 msg[1] = sc->sc_stats_phys; /* physical address of stat block */
1613 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1614 }
1615
1616 /*
1617 * Query the image id of the loaded firmware.
1618 */
1619 static uint32_t
1620 npe_getimageid(struct npe_softc *sc)
1621 {
1622 uint32_t msg[2];
1623
1624 msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1625 msg[1] = 0;
1626 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1627 }
1628
1629 /*
1630 * Enable/disable loopback.
1631 */
1632 static int
1633 npe_setloopback(struct npe_softc *sc, int ena)
1634 {
1635 uint32_t msg[2];
1636
1637 msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1638 msg[1] = 0;
1639 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1640 }
1641 #endif
1642
1643 static void
1644 npe_child_detached(device_t dev, device_t child)
1645 {
1646 struct npe_softc *sc;
1647
1648 sc = device_get_softc(dev);
1649 if (child == sc->sc_mii)
1650 sc->sc_mii = NULL;
1651 }
1652
1653 /*
1654 * MII bus support routines.
1655 */
1656 #define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1657 #define MII_WR4(sc, reg, v) \
1658 bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1659
1660 static uint32_t
1661 npe_mii_mdio_read(struct npe_softc *sc, int reg)
1662 {
1663 uint32_t v;
1664
1665 /* NB: registers are known to be sequential */
1666 v = (MII_RD4(sc, reg+0) & 0xff) << 0;
1667 v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1668 v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1669 v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1670 return v;
1671 }
1672
1673 static void
1674 npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1675 {
1676 /* NB: registers are known to be sequential */
1677 MII_WR4(sc, reg+0, cmd & 0xff);
1678 MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1679 MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1680 MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1681 }
1682
1683 static int
1684 npe_mii_mdio_wait(struct npe_softc *sc)
1685 {
1686 uint32_t v;
1687 int i;
1688
1689 /* NB: typically this takes 25-30 trips */
1690 for (i = 0; i < 1000; i++) {
1691 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1692 if ((v & NPE_MII_GO) == 0)
1693 return 1;
1694 DELAY(1);
1695 }
1696 device_printf(sc->sc_dev, "%s: timeout after ~1ms, cmd 0x%x\n",
1697 __func__, v);
1698 return 0; /* NB: timeout */
1699 }
1700
1701 static int
1702 npe_miibus_readreg(device_t dev, int phy, int reg)
1703 {
1704 struct npe_softc *sc = device_get_softc(dev);
1705 uint32_t v;
1706
1707 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) | NPE_MII_GO;
1708 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1709 if (npe_mii_mdio_wait(sc))
1710 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1711 else
1712 v = 0xffff | NPE_MII_READ_FAIL;
1713 return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
1714 }
1715
1716 static int
1717 npe_miibus_writereg(device_t dev, int phy, int reg, int data)
1718 {
1719 struct npe_softc *sc = device_get_softc(dev);
1720 uint32_t v;
1721
1722 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1723 | data | NPE_MII_WRITE
1724 | NPE_MII_GO;
1725 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1726 /* XXX complain about timeout */
1727 (void) npe_mii_mdio_wait(sc);
1728 return (0);
1729 }
1730
1731 static void
1732 npe_miibus_statchg(device_t dev)
1733 {
1734 struct npe_softc *sc = device_get_softc(dev);
1735 struct mii_data *mii = device_get_softc(sc->sc_mii);
1736 uint32_t tx1, rx1;
1737
1738 /* sync MAC duplex state */
1739 tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1740 rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1741 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1742 tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1743 rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1744 } else {
1745 tx1 |= NPE_TX_CNTRL1_DUPLEX;
1746 rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1747 }
1748 WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1749 WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1750 }
1751
1752 static device_method_t npe_methods[] = {
1753 /* Device interface */
1754 DEVMETHOD(device_probe, npe_probe),
1755 DEVMETHOD(device_attach, npe_attach),
1756 DEVMETHOD(device_detach, npe_detach),
1757
1758 /* Bus interface */
1759 DEVMETHOD(bus_child_detached, npe_child_detached),
1760
1761 /* MII interface */
1762 DEVMETHOD(miibus_readreg, npe_miibus_readreg),
1763 DEVMETHOD(miibus_writereg, npe_miibus_writereg),
1764 DEVMETHOD(miibus_statchg, npe_miibus_statchg),
1765
1766 { 0, 0 }
1767 };
1768
1769 static driver_t npe_driver = {
1770 "npe",
1771 npe_methods,
1772 sizeof(struct npe_softc),
1773 };
1774
1775 DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
1776 DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
1777 MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
1778 MODULE_DEPEND(npe, miibus, 1, 1, 1);
1779 MODULE_DEPEND(npe, ether, 1, 1, 1);
Cache object: 064b46a6760f032d45cec5c7d2e96524
|