1 /*-
2 * Copyright (c) 2006 Sam Leffler. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD: releng/6.3/sys/arm/xscale/ixp425/if_npe.c 173698 2007-11-17 18:22:57Z cognet $");
27
28 /*
29 * Intel XScale NPE Ethernet driver.
30 *
31 * This driver handles the two ports present on the IXP425.
32 * Packet processing is done by the Network Processing Engines
33 * (NPE's) that work together with a MAC and PHY. The MAC
34 * is also mapped to the XScale cpu; the PHY is accessed via
35 * the MAC. NPE-XScale communication happens through h/w
36 * queues managed by the Q Manager block.
37 *
38 * The code here replaces the ethAcc, ethMii, and ethDB classes
39 * in the Intel Access Library (IAL) and the OS-specific driver.
40 *
41 * XXX add vlan support
42 * XXX NPE-C port doesn't work yet
43 */
44 #ifdef HAVE_KERNEL_OPTION_HEADERS
45 #include "opt_device_polling.h"
46 #endif
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bus.h>
51 #include <sys/kernel.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/module.h>
55 #include <sys/rman.h>
56 #include <sys/socket.h>
57 #include <sys/sockio.h>
58 #include <sys/sysctl.h>
59 #include <sys/endian.h>
60 #include <machine/bus.h>
61
62 #include <net/ethernet.h>
63 #include <net/if.h>
64 #include <net/if_arp.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_mib.h>
68 #include <net/if_types.h>
69
70 #ifdef INET
71 #include <netinet/in.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/in_var.h>
74 #include <netinet/ip.h>
75 #endif
76
77 #include <net/bpf.h>
78 #include <net/bpfdesc.h>
79
80 #include <arm/xscale/ixp425/ixp425reg.h>
81 #include <arm/xscale/ixp425/ixp425var.h>
82 #include <arm/xscale/ixp425/ixp425_qmgr.h>
83 #include <arm/xscale/ixp425/ixp425_npevar.h>
84
85 #include <dev/mii/mii.h>
86 #include <dev/mii/miivar.h>
87 #include <arm/xscale/ixp425/if_npereg.h>
88
89 #include "miibus_if.h"
90
91 struct npebuf {
92 struct npebuf *ix_next; /* chain to next buffer */
93 void *ix_m; /* backpointer to mbuf */
94 bus_dmamap_t ix_map; /* bus dma map for associated data */
95 struct npehwbuf *ix_hw; /* associated h/w block */
96 uint32_t ix_neaddr; /* phys address of ix_hw */
97 };
98
99 struct npedma {
100 const char* name;
101 int nbuf; /* # npebuf's allocated */
102 bus_dma_tag_t mtag; /* bus dma tag for mbuf data */
103 struct npehwbuf *hwbuf; /* NPE h/w buffers */
104 bus_dma_tag_t buf_tag; /* tag+map for NPE buffers */
105 bus_dmamap_t buf_map;
106 bus_addr_t buf_phys; /* phys addr of buffers */
107 struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */
108 };
109
110 struct npe_softc {
111 /* XXX mii requires this be first; do not move! */
112 struct ifnet *sc_ifp; /* ifnet pointer */
113 struct mtx sc_mtx; /* basically a perimeter lock */
114 device_t sc_dev;
115 bus_space_tag_t sc_iot;
116 bus_space_handle_t sc_ioh; /* MAC register window */
117 device_t sc_mii; /* child miibus */
118 bus_space_handle_t sc_miih; /* MII register window */
119 struct ixpnpe_softc *sc_npe; /* NPE support */
120 int sc_debug; /* DPRINTF* control */
121 int sc_tickinterval;
122 struct callout tick_ch; /* Tick callout */
123 struct npedma txdma;
124 struct npebuf *tx_free; /* list of free tx buffers */
125 struct npedma rxdma;
126 bus_addr_t buf_phys; /* XXX for returning a value */
127 int rx_qid; /* rx qid */
128 int rx_freeqid; /* rx free buffers qid */
129 int tx_qid; /* tx qid */
130 int tx_doneqid; /* tx completed qid */
131 struct ifmib_iso_8802_3 mibdata;
132 bus_dma_tag_t sc_stats_tag; /* bus dma tag for stats block */
133 struct npestats *sc_stats;
134 bus_dmamap_t sc_stats_map;
135 bus_addr_t sc_stats_phys; /* phys addr of sc_stats */
136 };
137
138 /*
139 * Per-unit static configuration for IXP425. The tx and
140 * rx free Q id's are fixed by the NPE microcode. The
141 * rx Q id's are programmed to be separate to simplify
142 * multi-port processing. It may be better to handle
143 * all traffic through one Q (as done by the Intel drivers).
144 *
145 * Note that the PHY's are accessible only from MAC A
146 * on the IXP425. This and other platform-specific
147 * assumptions probably need to be handled through hints.
148 */
149 static const struct {
150 const char *desc; /* device description */
151 int npeid; /* NPE assignment */
152 uint32_t imageid; /* NPE firmware image id */
153 uint32_t regbase;
154 int regsize;
155 uint32_t miibase;
156 int miisize;
157 uint8_t rx_qid;
158 uint8_t rx_freeqid;
159 uint8_t tx_qid;
160 uint8_t tx_doneqid;
161 } npeconfig[NPE_PORTS_MAX] = {
162 { .desc = "IXP NPE-B",
163 .npeid = NPE_B,
164 .imageid = IXP425_NPE_B_IMAGEID,
165 .regbase = IXP425_MAC_A_HWBASE,
166 .regsize = IXP425_MAC_A_SIZE,
167 .miibase = IXP425_MAC_A_HWBASE,
168 .miisize = IXP425_MAC_A_SIZE,
169 .rx_qid = 4,
170 .rx_freeqid = 27,
171 .tx_qid = 24,
172 .tx_doneqid = 31
173 },
174 { .desc = "IXP NPE-C",
175 .npeid = NPE_C,
176 .imageid = IXP425_NPE_C_IMAGEID,
177 .regbase = IXP425_MAC_B_HWBASE,
178 .regsize = IXP425_MAC_B_SIZE,
179 .miibase = IXP425_MAC_A_HWBASE,
180 .miisize = IXP425_MAC_A_SIZE,
181 .rx_qid = 12,
182 .rx_freeqid = 28,
183 .tx_qid = 25,
184 .tx_doneqid = 31
185 },
186 };
187 static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */
188
189 static __inline uint32_t
190 RD4(struct npe_softc *sc, bus_size_t off)
191 {
192 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
193 }
194
195 static __inline void
196 WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
197 {
198 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
199 }
200
201 #define NPE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
202 #define NPE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
203 #define NPE_LOCK_INIT(_sc) \
204 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
205 MTX_NETWORK_LOCK, MTX_DEF)
206 #define NPE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
207 #define NPE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
208 #define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
209
210 static devclass_t npe_devclass;
211
212 static int npe_activate(device_t dev);
213 static void npe_deactivate(device_t dev);
214 static int npe_ifmedia_update(struct ifnet *ifp);
215 static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
216 static void npe_setmac(struct npe_softc *sc, u_char *eaddr);
217 static void npe_getmac(struct npe_softc *sc, u_char *eaddr);
218 static void npe_txdone(int qid, void *arg);
219 static int npe_rxbuf_init(struct npe_softc *, struct npebuf *,
220 struct mbuf *);
221 static void npe_rxdone(int qid, void *arg);
222 static void npeinit(void *);
223 static void npestart_locked(struct ifnet *);
224 static void npestart(struct ifnet *);
225 static void npestop(struct npe_softc *);
226 static void npewatchdog(struct ifnet *);
227 static int npeioctl(struct ifnet * ifp, u_long, caddr_t);
228
229 static int npe_setrxqosentry(struct npe_softc *, int classix,
230 int trafclass, int qid);
231 static int npe_updatestats(struct npe_softc *);
232 #if 0
233 static int npe_getstats(struct npe_softc *);
234 static uint32_t npe_getimageid(struct npe_softc *);
235 static int npe_setloopback(struct npe_softc *, int ena);
236 #endif
237
238 /* NB: all tx done processing goes through one queue */
239 static int tx_doneqid = -1;
240
241 SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP425 NPE driver parameters");
242
243 static int npe_debug = 0;
244 SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
245 0, "IXP425 NPE network interface debug msgs");
246 TUNABLE_INT("hw.npe.npe", &npe_debug);
247 #define DPRINTF(sc, fmt, ...) do { \
248 if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__); \
249 } while (0)
250 #define DPRINTFn(n, sc, fmt, ...) do { \
251 if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
252 } while (0)
253 static int npe_tickinterval = 3; /* npe_tick frequency (secs) */
254 SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
255 0, "periodic work interval (secs)");
256 TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
257
258 static int npe_rxbuf = 64; /* # rx buffers to allocate */
259 SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
260 0, "rx buffers allocated");
261 TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
262 static int npe_txbuf = 128; /* # tx buffers to allocate */
263 SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
264 0, "tx buffers allocated");
265 TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
266
267 static int
268 npe_probe(device_t dev)
269 {
270 int unit = device_get_unit(dev);
271
272 if (unit >= NPE_PORTS_MAX) {
273 device_printf(dev, "unit %d not supported\n", unit);
274 return EINVAL;
275 }
276 /* XXX check feature register to see if enabled */
277 device_set_desc(dev, npeconfig[unit].desc);
278 return 0;
279 }
280
281 static int
282 npe_attach(device_t dev)
283 {
284 struct npe_softc *sc = device_get_softc(dev);
285 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
286 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
287 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
288 struct ifnet *ifp = NULL;
289 int error;
290 u_char eaddr[6];
291
292 sc->sc_dev = dev;
293 sc->sc_iot = sa->sc_iot;
294 NPE_LOCK_INIT(sc);
295 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
296 sc->sc_debug = npe_debug;
297 sc->sc_tickinterval = npe_tickinterval;
298
299 sc->sc_npe = ixpnpe_attach(dev);
300 if (sc->sc_npe == NULL) {
301 error = EIO; /* XXX */
302 goto out;
303 }
304
305 error = npe_activate(dev);
306 if (error)
307 goto out;
308
309 npe_getmac(sc, eaddr);
310
311 /* NB: must be setup prior to invoking mii code */
312 sc->sc_ifp = ifp = if_alloc(IFT_ETHER);
313 if (mii_phy_probe(dev, &sc->sc_mii, npe_ifmedia_update, npe_ifmedia_status)) {
314 device_printf(dev, "Cannot find my PHY.\n");
315 error = ENXIO;
316 goto out;
317 }
318
319 ifp->if_softc = sc;
320 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
321 ifp->if_mtu = ETHERMTU;
322 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
323 ifp->if_start = npestart;
324 ifp->if_ioctl = npeioctl;
325 ifp->if_watchdog = npewatchdog;
326 ifp->if_init = npeinit;
327 IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
328 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
329 IFQ_SET_READY(&ifp->if_snd);
330 ifp->if_timer = 0;
331 ifp->if_linkmib = &sc->mibdata;
332 ifp->if_linkmiblen = sizeof(sc->mibdata);
333 sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
334 #ifdef DEVICE_POLLING
335 ifp->if_capabilities |= IFCAP_POLLING;
336 #endif
337
338 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
339 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
340 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
341 CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
342
343 ether_ifattach(ifp, eaddr);
344 return 0;
345 out:
346 npe_deactivate(dev);
347 if (ifp != NULL)
348 if_free(ifp);
349 return error;
350 }
351
352 static int
353 npe_detach(device_t dev)
354 {
355 struct npe_softc *sc = device_get_softc(dev);
356 struct ifnet *ifp = sc->sc_ifp;
357
358 #ifdef DEVICE_POLLING
359 if (ifp->if_capenable & IFCAP_POLLING)
360 ether_poll_deregister(ifp);
361 #endif
362 npestop(sc);
363 if (ifp != NULL) {
364 ether_ifdetach(ifp);
365 if_free(ifp);
366 }
367 NPE_LOCK_DESTROY(sc);
368 npe_deactivate(dev);
369 if (sc->sc_npe != NULL)
370 ixpnpe_detach(sc->sc_npe);
371 return 0;
372 }
373
374 /*
375 * Compute and install the multicast filter.
376 */
377 static void
378 npe_setmcast(struct npe_softc *sc)
379 {
380 struct ifnet *ifp = sc->sc_ifp;
381 uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
382 int i;
383
384 if (ifp->if_flags & IFF_PROMISC) {
385 memset(mask, 0, ETHER_ADDR_LEN);
386 memset(addr, 0, ETHER_ADDR_LEN);
387 } else if (ifp->if_flags & IFF_ALLMULTI) {
388 static const uint8_t allmulti[ETHER_ADDR_LEN] =
389 { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
390 memcpy(mask, allmulti, ETHER_ADDR_LEN);
391 memcpy(addr, allmulti, ETHER_ADDR_LEN);
392 } else {
393 uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
394 struct ifmultiaddr *ifma;
395 const uint8_t *mac;
396
397 memset(clr, 0, ETHER_ADDR_LEN);
398 memset(set, 0xff, ETHER_ADDR_LEN);
399
400 IF_ADDR_LOCK(ifp);
401 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
402 if (ifma->ifma_addr->sa_family != AF_LINK)
403 continue;
404 mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
405 for (i = 0; i < ETHER_ADDR_LEN; i++) {
406 clr[i] |= mac[i];
407 set[i] &= mac[i];
408 }
409 }
410 IF_ADDR_UNLOCK(ifp);
411
412 for (i = 0; i < ETHER_ADDR_LEN; i++) {
413 mask[i] = set[i] | ~clr[i];
414 addr[i] = set[i];
415 }
416 }
417
418 /*
419 * Write the mask and address registers.
420 */
421 for (i = 0; i < ETHER_ADDR_LEN; i++) {
422 WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
423 WR4(sc, NPE_MAC_ADDR(i), addr[i]);
424 }
425 }
426
427 static void
428 npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
429 {
430 struct npe_softc *sc;
431
432 if (error != 0)
433 return;
434 sc = (struct npe_softc *)arg;
435 sc->buf_phys = segs[0].ds_addr;
436 }
437
438 static int
439 npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
440 const char *name, int nbuf, int maxseg)
441 {
442 int error, i;
443
444 memset(dma, 0, sizeof(dma));
445
446 dma->name = name;
447 dma->nbuf = nbuf;
448
449 /* DMA tag for mapped mbufs */
450 error = bus_dma_tag_create(NULL, 1, 0,
451 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
452 MCLBYTES, maxseg, MCLBYTES, 0,
453 busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
454 if (error != 0) {
455 device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
456 "error %u\n", dma->name, error);
457 return error;
458 }
459
460 /* DMA tag and map for the NPE buffers */
461 error = bus_dma_tag_create(NULL, sizeof(uint32_t), 0,
462 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
463 nbuf * sizeof(struct npehwbuf), 1,
464 nbuf * sizeof(struct npehwbuf), 0,
465 busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
466 if (error != 0) {
467 device_printf(sc->sc_dev,
468 "unable to create %s npebuf dma tag, error %u\n",
469 dma->name, error);
470 return error;
471 }
472 /* XXX COHERENT for now */
473 if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
474 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
475 &dma->buf_map) != 0) {
476 device_printf(sc->sc_dev,
477 "unable to allocate memory for %s h/w buffers, error %u\n",
478 dma->name, error);
479 return error;
480 }
481 /* XXX M_TEMP */
482 dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
483 if (dma->buf == NULL) {
484 device_printf(sc->sc_dev,
485 "unable to allocate memory for %s s/w buffers\n",
486 dma->name);
487 return error;
488 }
489 if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
490 dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
491 device_printf(sc->sc_dev,
492 "unable to map memory for %s h/w buffers, error %u\n",
493 dma->name, error);
494 return error;
495 }
496 dma->buf_phys = sc->buf_phys;
497 for (i = 0; i < dma->nbuf; i++) {
498 struct npebuf *npe = &dma->buf[i];
499 struct npehwbuf *hw = &dma->hwbuf[i];
500
501 /* calculate offset to shared area */
502 npe->ix_neaddr = dma->buf_phys +
503 ((uintptr_t)hw - (uintptr_t)dma->hwbuf);
504 KASSERT((npe->ix_neaddr & 0x1f) == 0,
505 ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
506 error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
507 &npe->ix_map);
508 if (error != 0) {
509 device_printf(sc->sc_dev,
510 "unable to create dmamap for %s buffer %u, "
511 "error %u\n", dma->name, i, error);
512 return error;
513 }
514 npe->ix_hw = hw;
515 }
516 bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
517 return 0;
518 }
519
520 static void
521 npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
522 {
523 int i;
524
525 if (dma->hwbuf != NULL) {
526 for (i = 0; i < dma->nbuf; i++) {
527 struct npebuf *npe = &dma->buf[i];
528 bus_dmamap_destroy(dma->mtag, npe->ix_map);
529 }
530 bus_dmamap_unload(dma->buf_tag, dma->buf_map);
531 bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
532 bus_dmamap_destroy(dma->buf_tag, dma->buf_map);
533 }
534 if (dma->buf != NULL)
535 free(dma->buf, M_TEMP);
536 if (dma->buf_tag)
537 bus_dma_tag_destroy(dma->buf_tag);
538 if (dma->mtag)
539 bus_dma_tag_destroy(dma->mtag);
540 memset(dma, 0, sizeof(*dma));
541 }
542
543 static int
544 npe_activate(device_t dev)
545 {
546 struct npe_softc * sc = device_get_softc(dev);
547 int unit = device_get_unit(dev);
548 int error, i;
549 uint32_t imageid;
550
551 /*
552 * Load NPE firmware and start it running. We assume
553 * that minor version bumps remain compatible so probe
554 * the firmware image starting with the expected version
555 * and then bump the minor version up to the max.
556 */
557 imageid = npeconfig[unit].imageid;
558 for (;;) {
559 error = ixpnpe_init(sc->sc_npe, "npe_fw", imageid);
560 if (error == 0)
561 break;
562 /* ESRCH is returned when the requested image is not present */
563 if (error != ESRCH)
564 return error;
565 /* bump the minor version up to the max possible */
566 if (NPEIMAGE_MINOR(imageid) == 0xff)
567 return error;
568 imageid++;
569 }
570
571 if (bus_space_map(sc->sc_iot, npeconfig[unit].regbase,
572 npeconfig[unit].regsize, 0, &sc->sc_ioh)) {
573 device_printf(dev, "Cannot map registers 0x%x:0x%x\n",
574 npeconfig[unit].regbase, npeconfig[unit].regsize);
575 return ENOMEM;
576 }
577
578 if (npeconfig[unit].miibase != npeconfig[unit].regbase) {
579 /*
580 * The PHY's are only accessible from one MAC (it appears)
581 * so for other MAC's setup an additional mapping for
582 * frobbing the PHY registers.
583 */
584 if (bus_space_map(sc->sc_iot, npeconfig[unit].miibase,
585 npeconfig[unit].miisize, 0, &sc->sc_miih)) {
586 device_printf(dev,
587 "Cannot map MII registers 0x%x:0x%x\n",
588 npeconfig[unit].miibase, npeconfig[unit].miisize);
589 return ENOMEM;
590 }
591 } else
592 sc->sc_miih = sc->sc_ioh;
593 error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
594 if (error != 0)
595 return error;
596 error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
597 if (error != 0)
598 return error;
599
600 /* setup statistics block */
601 error = bus_dma_tag_create(NULL, sizeof(uint32_t), 0,
602 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
603 sizeof(struct npestats), 1, sizeof(struct npestats), 0,
604 busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
605 if (error != 0) {
606 device_printf(sc->sc_dev, "unable to create stats tag, "
607 "error %u\n", error);
608 return error;
609 }
610 if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
611 BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
612 device_printf(sc->sc_dev,
613 "unable to allocate memory for stats block, error %u\n",
614 error);
615 return error;
616 }
617 if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
618 sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
619 device_printf(sc->sc_dev,
620 "unable to load memory for stats block, error %u\n",
621 error);
622 return error;
623 }
624 sc->sc_stats_phys = sc->buf_phys;
625
626 /* XXX disable half-bridge LEARNING+FILTERING feature */
627
628 /*
629 * Setup h/w rx/tx queues. There are four q's:
630 * rx inbound q of rx'd frames
631 * rx_free pool of ixpbuf's for receiving frames
632 * tx outbound q of frames to send
633 * tx_done q of tx frames that have been processed
634 *
635 * The NPE handles the actual tx/rx process and the q manager
636 * handles the queues. The driver just writes entries to the
637 * q manager mailbox's and gets callbacks when there are rx'd
638 * frames to process or tx'd frames to reap. These callbacks
639 * are controlled by the q configurations; e.g. we get a
640 * callback when tx_done has 2 or more frames to process and
641 * when the rx q has at least one frame. These setings can
642 * changed at the time the q is configured.
643 */
644 sc->rx_qid = npeconfig[unit].rx_qid;
645 ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0, 1,
646 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc);
647 sc->rx_freeqid = npeconfig[unit].rx_freeqid;
648 ixpqmgr_qconfig(sc->rx_freeqid, npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
649 /* tell the NPE to direct all traffic to rx_qid */
650 #if 0
651 for (i = 0; i < 8; i++)
652 #else
653 device_printf(sc->sc_dev, "remember to fix rx q setup\n");
654 for (i = 0; i < 4; i++)
655 #endif
656 npe_setrxqosentry(sc, i, 0, sc->rx_qid);
657
658 sc->tx_qid = npeconfig[unit].tx_qid;
659 sc->tx_doneqid = npeconfig[unit].tx_doneqid;
660 ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
661 if (tx_doneqid == -1) {
662 ixpqmgr_qconfig(sc->tx_doneqid, npe_txbuf, 0, 2,
663 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
664 tx_doneqid = sc->tx_doneqid;
665 }
666
667 KASSERT(npes[npeconfig[unit].npeid] == NULL,
668 ("npe %u already setup", npeconfig[unit].npeid));
669 npes[npeconfig[unit].npeid] = sc;
670
671 return 0;
672 }
673
674 static void
675 npe_deactivate(device_t dev)
676 {
677 struct npe_softc *sc = device_get_softc(dev);
678 int unit = device_get_unit(dev);
679
680 npes[npeconfig[unit].npeid] = NULL;
681
682 /* XXX disable q's */
683 if (sc->sc_npe != NULL)
684 ixpnpe_stop(sc->sc_npe);
685 if (sc->sc_stats != NULL) {
686 bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
687 bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
688 sc->sc_stats_map);
689 bus_dmamap_destroy(sc->sc_stats_tag, sc->sc_stats_map);
690 }
691 if (sc->sc_stats_tag != NULL)
692 bus_dma_tag_destroy(sc->sc_stats_tag);
693 npe_dma_destroy(sc, &sc->txdma);
694 npe_dma_destroy(sc, &sc->rxdma);
695 bus_generic_detach(sc->sc_dev);
696 if (sc->sc_mii)
697 device_delete_child(sc->sc_dev, sc->sc_mii);
698 #if 0
699 /* XXX sc_ioh and sc_miih */
700 if (sc->mem_res)
701 bus_release_resource(dev, SYS_RES_IOPORT,
702 rman_get_rid(sc->mem_res), sc->mem_res);
703 sc->mem_res = 0;
704 #endif
705 }
706
707 /*
708 * Change media according to request.
709 */
710 static int
711 npe_ifmedia_update(struct ifnet *ifp)
712 {
713 struct npe_softc *sc = ifp->if_softc;
714 struct mii_data *mii;
715
716 mii = device_get_softc(sc->sc_mii);
717 NPE_LOCK(sc);
718 mii_mediachg(mii);
719 /* XXX push state ourself? */
720 NPE_UNLOCK(sc);
721 return (0);
722 }
723
724 /*
725 * Notify the world which media we're using.
726 */
727 static void
728 npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
729 {
730 struct npe_softc *sc = ifp->if_softc;
731 struct mii_data *mii;
732
733 mii = device_get_softc(sc->sc_mii);
734 NPE_LOCK(sc);
735 mii_pollstat(mii);
736 ifmr->ifm_active = mii->mii_media_active;
737 ifmr->ifm_status = mii->mii_media_status;
738 NPE_UNLOCK(sc);
739 }
740
741 static void
742 npe_addstats(struct npe_softc *sc)
743 {
744 #define MIBADD(x) sc->mibdata.x += be32toh(ns->x)
745 struct ifnet *ifp = sc->sc_ifp;
746 struct npestats *ns = sc->sc_stats;
747
748 MIBADD(dot3StatsAlignmentErrors);
749 MIBADD(dot3StatsFCSErrors);
750 MIBADD(dot3StatsSingleCollisionFrames);
751 MIBADD(dot3StatsMultipleCollisionFrames);
752 MIBADD(dot3StatsDeferredTransmissions);
753 MIBADD(dot3StatsLateCollisions);
754 MIBADD(dot3StatsExcessiveCollisions);
755 MIBADD(dot3StatsInternalMacTransmitErrors);
756 MIBADD(dot3StatsCarrierSenseErrors);
757 sc->mibdata.dot3StatsFrameTooLongs +=
758 be32toh(ns->RxLargeFramesDiscards)
759 + be32toh(ns->TxLargeFrameDiscards);
760 MIBADD(dot3StatsInternalMacReceiveErrors);
761 sc->mibdata.dot3StatsMissedFrames +=
762 be32toh(ns->RxOverrunDiscards)
763 + be32toh(ns->RxUnderflowEntryDiscards);
764
765 ifp->if_oerrors +=
766 be32toh(ns->dot3StatsInternalMacTransmitErrors)
767 + be32toh(ns->dot3StatsCarrierSenseErrors)
768 + be32toh(ns->TxVLANIdFilterDiscards)
769 ;
770 ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors)
771 + be32toh(ns->dot3StatsInternalMacReceiveErrors)
772 + be32toh(ns->RxOverrunDiscards)
773 + be32toh(ns->RxUnderflowEntryDiscards)
774 ;
775 ifp->if_collisions +=
776 be32toh(ns->dot3StatsSingleCollisionFrames)
777 + be32toh(ns->dot3StatsMultipleCollisionFrames)
778 ;
779 #undef MIBADD
780 }
781
782 static void
783 npe_tick(void *xsc)
784 {
785 #define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
786 struct npe_softc *sc = xsc;
787 struct mii_data *mii = device_get_softc(sc->sc_mii);
788 uint32_t msg[2];
789
790 NPE_ASSERT_LOCKED(sc);
791
792 /*
793 * NB: to avoid sleeping with the softc lock held we
794 * split the NPE msg processing into two parts. The
795 * request for statistics is sent w/o waiting for a
796 * reply and then on the next tick we retrieve the
797 * results. This works because npe_tick is the only
798 * code that talks via the mailbox's (except at setup).
799 * This likely can be handled better.
800 */
801 if (ixpnpe_recvmsg(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
802 bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
803 BUS_DMASYNC_POSTREAD);
804 npe_addstats(sc);
805 }
806 npe_updatestats(sc);
807 mii_tick(mii);
808
809 /* schedule next poll */
810 callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
811 #undef ACK
812 }
813
814 static void
815 npe_setmac(struct npe_softc *sc, u_char *eaddr)
816 {
817 WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
818 WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
819 WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
820 WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
821 WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
822 WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
823
824 }
825
826 static void
827 npe_getmac(struct npe_softc *sc, u_char *eaddr)
828 {
829 /* NB: the unicast address appears to be loaded from EEPROM on reset */
830 eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
831 eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
832 eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
833 eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
834 eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
835 eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
836 }
837
838 struct txdone {
839 struct npebuf *head;
840 struct npebuf **tail;
841 int count;
842 };
843
844 static __inline void
845 npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
846 {
847 struct ifnet *ifp = sc->sc_ifp;
848
849 NPE_LOCK(sc);
850 *td->tail = sc->tx_free;
851 sc->tx_free = td->head;
852 /*
853 * We're no longer busy, so clear the busy flag and call the
854 * start routine to xmit more packets.
855 */
856 ifp->if_opackets += td->count;
857 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
858 ifp->if_timer = 0;
859 npestart_locked(ifp);
860 NPE_UNLOCK(sc);
861 }
862
863 /*
864 * Q manager callback on tx done queue. Reap mbufs
865 * and return tx buffers to the free list. Finally
866 * restart output. Note the microcode has only one
867 * txdone q wired into it so we must use the NPE ID
868 * returned with each npehwbuf to decide where to
869 * send buffers.
870 */
871 static void
872 npe_txdone(int qid, void *arg)
873 {
874 #define P2V(a, dma) \
875 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
876 struct npe_softc *sc0 = arg;
877 struct npe_softc *sc;
878 struct npebuf *npe;
879 struct txdone *td, q[NPE_MAX];
880 uint32_t entry;
881
882 /* XXX no NPE-A support */
883 q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
884 q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
885 /* XXX max # at a time? */
886 while (ixpqmgr_qread(qid, &entry) == 0) {
887 DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
888 __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
889
890 sc = npes[NPE_QM_Q_NPE(entry)];
891 npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
892 m_freem(npe->ix_m);
893 npe->ix_m = NULL;
894
895 td = &q[NPE_QM_Q_NPE(entry)];
896 *td->tail = npe;
897 td->tail = &npe->ix_next;
898 td->count++;
899 }
900
901 if (q[NPE_B].count)
902 npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
903 if (q[NPE_C].count)
904 npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
905 #undef P2V
906 }
907
908 static int
909 npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
910 {
911 bus_dma_segment_t segs[1];
912 struct npedma *dma = &sc->rxdma;
913 struct npehwbuf *hw;
914 int error, nseg;
915
916 if (m == NULL) {
917 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
918 if (m == NULL)
919 return ENOBUFS;
920 }
921 KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
922 ("ext_size %d", m->m_ext.ext_size));
923 m->m_pkthdr.len = m->m_len = 1536;
924 /* backload payload and align ip hdr */
925 m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
926 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
927 segs, &nseg, 0);
928 if (error != 0) {
929 m_freem(m);
930 return error;
931 }
932 hw = npe->ix_hw;
933 hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
934 /* NB: NPE requires length be a multiple of 64 */
935 /* NB: buffer length is shifted in word */
936 hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
937 hw->ix_ne[0].next = 0;
938 npe->ix_m = m;
939 /* Flush the memory in the mbuf */
940 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
941 return 0;
942 }
943
944 /*
945 * RX q processing for a specific NPE. Claim entries
946 * from the hardware queue and pass the frames up the
947 * stack. Pass the rx buffers to the free list.
948 */
949 static void
950 npe_rxdone(int qid, void *arg)
951 {
952 #define P2V(a, dma) \
953 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
954 struct npe_softc *sc = arg;
955 struct npedma *dma = &sc->rxdma;
956 uint32_t entry;
957
958 while (ixpqmgr_qread(qid, &entry) == 0) {
959 struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
960 struct mbuf *m;
961
962 DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
963 __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
964 /*
965 * Allocate a new mbuf to replenish the rx buffer.
966 * If doing so fails we drop the rx'd frame so we
967 * can reuse the previous mbuf. When we're able to
968 * allocate a new mbuf dispatch the mbuf w/ rx'd
969 * data up the stack and replace it with the newly
970 * allocated one.
971 */
972 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
973 if (m != NULL) {
974 struct mbuf *mrx = npe->ix_m;
975 struct npehwbuf *hw = npe->ix_hw;
976 struct ifnet *ifp = sc->sc_ifp;
977
978 /* Flush mbuf memory for rx'd data */
979 bus_dmamap_sync(dma->mtag, npe->ix_map,
980 BUS_DMASYNC_POSTREAD);
981
982 /* XXX flush hw buffer; works now 'cuz coherent */
983 /* set m_len etc. per rx frame size */
984 mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
985 mrx->m_pkthdr.len = mrx->m_len;
986 mrx->m_pkthdr.rcvif = ifp;
987 mrx->m_flags |= M_HASFCS;
988
989 ifp->if_ipackets++;
990 ifp->if_input(ifp, mrx);
991 } else {
992 /* discard frame and re-use mbuf */
993 m = npe->ix_m;
994 }
995 if (npe_rxbuf_init(sc, npe, m) == 0) {
996 /* return npe buf to rx free list */
997 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
998 } else {
999 /* XXX should not happen */
1000 }
1001 }
1002 #undef P2V
1003 }
1004
1005 #ifdef DEVICE_POLLING
1006 static void
1007 npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1008 {
1009 struct npe_softc *sc = ifp->if_softc;
1010
1011 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1012 npe_rxdone(sc->rx_qid, sc);
1013 npe_txdone(sc->tx_doneqid, sc); /* XXX polls both NPE's */
1014 }
1015 }
1016 #endif /* DEVICE_POLLING */
1017
1018 static void
1019 npe_startxmit(struct npe_softc *sc)
1020 {
1021 struct npedma *dma = &sc->txdma;
1022 int i;
1023
1024 NPE_ASSERT_LOCKED(sc);
1025 sc->tx_free = NULL;
1026 for (i = 0; i < dma->nbuf; i++) {
1027 struct npebuf *npe = &dma->buf[i];
1028 if (npe->ix_m != NULL) {
1029 /* NB: should not happen */
1030 device_printf(sc->sc_dev,
1031 "%s: free mbuf at entry %u\n", __func__, i);
1032 m_freem(npe->ix_m);
1033 }
1034 npe->ix_m = NULL;
1035 npe->ix_next = sc->tx_free;
1036 sc->tx_free = npe;
1037 }
1038 }
1039
1040 static void
1041 npe_startrecv(struct npe_softc *sc)
1042 {
1043 struct npedma *dma = &sc->rxdma;
1044 struct npebuf *npe;
1045 int i;
1046
1047 NPE_ASSERT_LOCKED(sc);
1048 for (i = 0; i < dma->nbuf; i++) {
1049 npe = &dma->buf[i];
1050 npe_rxbuf_init(sc, npe, npe->ix_m);
1051 /* set npe buf on rx free list */
1052 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1053 }
1054 }
1055
1056 /*
1057 * Reset and initialize the chip
1058 */
1059 static void
1060 npeinit_locked(void *xsc)
1061 {
1062 struct npe_softc *sc = xsc;
1063 struct ifnet *ifp = sc->sc_ifp;
1064
1065 NPE_ASSERT_LOCKED(sc);
1066 if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
1067
1068 /*
1069 * Reset MAC core.
1070 */
1071 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1072 DELAY(NPE_MAC_RESET_DELAY);
1073 /* configure MAC to generate MDC clock */
1074 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1075
1076 /* disable transmitter and reciver in the MAC */
1077 WR4(sc, NPE_MAC_RX_CNTRL1,
1078 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1079 WR4(sc, NPE_MAC_TX_CNTRL1,
1080 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1081
1082 /*
1083 * Set the MAC core registers.
1084 */
1085 WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */
1086 WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */
1087 WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */
1088 /* thresholds determined by NPE firmware FS */
1089 WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12);
1090 WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30);
1091 WR4(sc, NPE_MAC_BUF_SIZE_TX, 0x8); /* tx fifo threshold (bytes) */
1092 WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */
1093 WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/
1094 WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */
1095 WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */
1096 WR4(sc, NPE_MAC_SLOT_TIME, 0x80); /* assumes MII mode */
1097
1098 WR4(sc, NPE_MAC_TX_CNTRL1,
1099 NPE_TX_CNTRL1_RETRY /* retry failed xmits */
1100 | NPE_TX_CNTRL1_FCS_EN /* append FCS */
1101 | NPE_TX_CNTRL1_2DEFER /* 2-part deferal */
1102 | NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */
1103 /* XXX pad strip? */
1104 WR4(sc, NPE_MAC_RX_CNTRL1,
1105 NPE_RX_CNTRL1_CRC_EN /* include CRC/FCS */
1106 | NPE_RX_CNTRL1_PAUSE_EN); /* ena pause frame handling */
1107 WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1108
1109 npe_setmac(sc, IF_LLADDR(ifp));
1110 npe_setmcast(sc);
1111
1112 npe_startxmit(sc);
1113 npe_startrecv(sc);
1114
1115 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1116 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1117 ifp->if_timer = 0; /* just in case */
1118
1119 /* enable transmitter and reciver in the MAC */
1120 WR4(sc, NPE_MAC_RX_CNTRL1,
1121 RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1122 WR4(sc, NPE_MAC_TX_CNTRL1,
1123 RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1124
1125 callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
1126 }
1127
1128 static void
1129 npeinit(void *xsc)
1130 {
1131 struct npe_softc *sc = xsc;
1132 NPE_LOCK(sc);
1133 npeinit_locked(sc);
1134 NPE_UNLOCK(sc);
1135 }
1136
1137 /*
1138 * Defragment an mbuf chain, returning at most maxfrags separate
1139 * mbufs+clusters. If this is not possible NULL is returned and
1140 * the original mbuf chain is left in it's present (potentially
1141 * modified) state. We use two techniques: collapsing consecutive
1142 * mbufs and replacing consecutive mbufs by a cluster.
1143 */
1144 static struct mbuf *
1145 npe_defrag(struct mbuf *m0, int how, int maxfrags)
1146 {
1147 struct mbuf *m, *n, *n2, **prev;
1148 u_int curfrags;
1149
1150 /*
1151 * Calculate the current number of frags.
1152 */
1153 curfrags = 0;
1154 for (m = m0; m != NULL; m = m->m_next)
1155 curfrags++;
1156 /*
1157 * First, try to collapse mbufs. Note that we always collapse
1158 * towards the front so we don't need to deal with moving the
1159 * pkthdr. This may be suboptimal if the first mbuf has much
1160 * less data than the following.
1161 */
1162 m = m0;
1163 again:
1164 for (;;) {
1165 n = m->m_next;
1166 if (n == NULL)
1167 break;
1168 if ((m->m_flags & M_RDONLY) == 0 &&
1169 n->m_len < M_TRAILINGSPACE(m)) {
1170 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1171 n->m_len);
1172 m->m_len += n->m_len;
1173 m->m_next = n->m_next;
1174 m_free(n);
1175 if (--curfrags <= maxfrags)
1176 return m0;
1177 } else
1178 m = n;
1179 }
1180 KASSERT(maxfrags > 1,
1181 ("maxfrags %u, but normal collapse failed", maxfrags));
1182 /*
1183 * Collapse consecutive mbufs to a cluster.
1184 */
1185 prev = &m0->m_next; /* NB: not the first mbuf */
1186 while ((n = *prev) != NULL) {
1187 if ((n2 = n->m_next) != NULL &&
1188 n->m_len + n2->m_len < MCLBYTES) {
1189 m = m_getcl(how, MT_DATA, 0);
1190 if (m == NULL)
1191 goto bad;
1192 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1193 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1194 n2->m_len);
1195 m->m_len = n->m_len + n2->m_len;
1196 m->m_next = n2->m_next;
1197 *prev = m;
1198 m_free(n);
1199 m_free(n2);
1200 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
1201 return m0;
1202 /*
1203 * Still not there, try the normal collapse
1204 * again before we allocate another cluster.
1205 */
1206 goto again;
1207 }
1208 prev = &n->m_next;
1209 }
1210 /*
1211 * No place where we can collapse to a cluster; punt.
1212 * This can occur if, for example, you request 2 frags
1213 * but the packet requires that both be clusters (we
1214 * never reallocate the first mbuf to avoid moving the
1215 * packet header).
1216 */
1217 bad:
1218 return NULL;
1219 }
1220
1221 /*
1222 * Dequeue packets and place on the h/w transmit queue.
1223 */
1224 static void
1225 npestart_locked(struct ifnet *ifp)
1226 {
1227 struct npe_softc *sc = ifp->if_softc;
1228 struct npebuf *npe;
1229 struct npehwbuf *hw;
1230 struct mbuf *m, *n;
1231 struct npedma *dma = &sc->txdma;
1232 bus_dma_segment_t segs[NPE_MAXSEG];
1233 int nseg, len, error, i;
1234 uint32_t next;
1235
1236 NPE_ASSERT_LOCKED(sc);
1237 /* XXX can this happen? */
1238 if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1239 return;
1240
1241 while (sc->tx_free != NULL) {
1242 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1243 if (m == NULL) {
1244 /* XXX? */
1245 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1246 return;
1247 }
1248 npe = sc->tx_free;
1249 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1250 m, segs, &nseg, 0);
1251 if (error == EFBIG) {
1252 n = npe_defrag(m, M_DONTWAIT, NPE_MAXSEG);
1253 if (n == NULL) {
1254 if_printf(ifp, "%s: too many fragments %u\n",
1255 __func__, nseg);
1256 m_freem(m);
1257 return; /* XXX? */
1258 }
1259 m = n;
1260 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1261 m, segs, &nseg, 0);
1262 }
1263 if (error != 0 || nseg == 0) {
1264 if_printf(ifp, "%s: error %u nseg %u\n",
1265 __func__, error, nseg);
1266 m_freem(m);
1267 return; /* XXX? */
1268 }
1269 sc->tx_free = npe->ix_next;
1270
1271 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
1272
1273 /*
1274 * Tap off here if there is a bpf listener.
1275 */
1276 BPF_MTAP(ifp, m);
1277
1278 npe->ix_m = m;
1279 hw = npe->ix_hw;
1280 len = m->m_pkthdr.len;
1281 next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1282 for (i = 0; i < nseg; i++) {
1283 hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1284 hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1285 hw->ix_ne[i].next = htobe32(next);
1286
1287 len = 0; /* zero for segments > 1 */
1288 next += sizeof(hw->ix_ne[0]);
1289 }
1290 hw->ix_ne[i-1].next = 0; /* zero last in chain */
1291 /* XXX flush descriptor instead of using uncached memory */
1292
1293 DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1294 __func__, sc->tx_qid, npe->ix_neaddr,
1295 hw->ix_ne[0].data, hw->ix_ne[0].len);
1296 /* stick it on the tx q */
1297 /* XXX add vlan priority */
1298 ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1299
1300 ifp->if_timer = 5;
1301 }
1302 if (sc->tx_free == NULL)
1303 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1304 }
1305
1306 void
1307 npestart(struct ifnet *ifp)
1308 {
1309 struct npe_softc *sc = ifp->if_softc;
1310 NPE_LOCK(sc);
1311 npestart_locked(ifp);
1312 NPE_UNLOCK(sc);
1313 }
1314
1315 static void
1316 npe_stopxmit(struct npe_softc *sc)
1317 {
1318 struct npedma *dma = &sc->txdma;
1319 int i;
1320
1321 NPE_ASSERT_LOCKED(sc);
1322
1323 /* XXX qmgr */
1324 for (i = 0; i < dma->nbuf; i++) {
1325 struct npebuf *npe = &dma->buf[i];
1326
1327 if (npe->ix_m != NULL) {
1328 bus_dmamap_unload(dma->mtag, npe->ix_map);
1329 m_freem(npe->ix_m);
1330 npe->ix_m = NULL;
1331 }
1332 }
1333 }
1334
1335 static void
1336 npe_stoprecv(struct npe_softc *sc)
1337 {
1338 struct npedma *dma = &sc->rxdma;
1339 int i;
1340
1341 NPE_ASSERT_LOCKED(sc);
1342
1343 /* XXX qmgr */
1344 for (i = 0; i < dma->nbuf; i++) {
1345 struct npebuf *npe = &dma->buf[i];
1346
1347 if (npe->ix_m != NULL) {
1348 bus_dmamap_unload(dma->mtag, npe->ix_map);
1349 m_freem(npe->ix_m);
1350 npe->ix_m = NULL;
1351 }
1352 }
1353 }
1354
1355 /*
1356 * Turn off interrupts, and stop the nic.
1357 */
1358 void
1359 npestop(struct npe_softc *sc)
1360 {
1361 struct ifnet *ifp = sc->sc_ifp;
1362
1363 /* disable transmitter and reciver in the MAC */
1364 WR4(sc, NPE_MAC_RX_CNTRL1,
1365 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1366 WR4(sc, NPE_MAC_TX_CNTRL1,
1367 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1368
1369 ifp->if_timer = 0;
1370 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1371
1372 callout_stop(&sc->tick_ch);
1373
1374 npe_stopxmit(sc);
1375 npe_stoprecv(sc);
1376 /* XXX go into loopback & drain q's? */
1377 /* XXX but beware of disabling tx above */
1378
1379 /*
1380 * The MAC core rx/tx disable may leave the MAC hardware in an
1381 * unpredictable state. A hw reset is executed before resetting
1382 * all the MAC parameters to a known value.
1383 */
1384 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1385 DELAY(NPE_MAC_RESET_DELAY);
1386 WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1387 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1388 }
1389
1390 void
1391 npewatchdog(struct ifnet *ifp)
1392 {
1393 struct npe_softc *sc = ifp->if_softc;
1394
1395 NPE_LOCK(sc);
1396 if_printf(ifp, "device timeout\n");
1397 ifp->if_oerrors++;
1398 npeinit_locked(sc);
1399 NPE_UNLOCK(sc);
1400 }
1401
1402 static int
1403 npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1404 {
1405 struct npe_softc *sc = ifp->if_softc;
1406 struct mii_data *mii;
1407 struct ifreq *ifr = (struct ifreq *)data;
1408 int error = 0;
1409 #ifdef DEVICE_POLLING
1410 int mask;
1411 #endif
1412
1413 switch (cmd) {
1414 case SIOCSIFFLAGS:
1415 NPE_LOCK(sc);
1416 if ((ifp->if_flags & IFF_UP) == 0 &&
1417 ifp->if_drv_flags & IFF_DRV_RUNNING) {
1418 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1419 npestop(sc);
1420 } else {
1421 /* reinitialize card on any parameter change */
1422 npeinit_locked(sc);
1423 }
1424 NPE_UNLOCK(sc);
1425 break;
1426
1427 case SIOCADDMULTI:
1428 case SIOCDELMULTI:
1429 /* update multicast filter list. */
1430 NPE_LOCK(sc);
1431 npe_setmcast(sc);
1432 NPE_UNLOCK(sc);
1433 error = 0;
1434 break;
1435
1436 case SIOCSIFMEDIA:
1437 case SIOCGIFMEDIA:
1438 mii = device_get_softc(sc->sc_mii);
1439 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1440 break;
1441
1442 #ifdef DEVICE_POLLING
1443 case SIOCSIFCAP:
1444 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1445 if (mask & IFCAP_POLLING) {
1446 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1447 error = ether_poll_register(npe_poll, ifp);
1448 if (error)
1449 return error;
1450 NPE_LOCK(sc);
1451 /* disable callbacks XXX txdone is shared */
1452 ixpqmgr_notify_disable(sc->rx_qid);
1453 ixpqmgr_notify_disable(sc->tx_doneqid);
1454 ifp->if_capenable |= IFCAP_POLLING;
1455 NPE_UNLOCK(sc);
1456 } else {
1457 error = ether_poll_deregister(ifp);
1458 /* NB: always enable qmgr callbacks */
1459 NPE_LOCK(sc);
1460 /* enable qmgr callbacks */
1461 ixpqmgr_notify_enable(sc->rx_qid,
1462 IX_QMGR_Q_SOURCE_ID_NOT_E);
1463 ixpqmgr_notify_enable(sc->tx_doneqid,
1464 IX_QMGR_Q_SOURCE_ID_NOT_E);
1465 ifp->if_capenable &= ~IFCAP_POLLING;
1466 NPE_UNLOCK(sc);
1467 }
1468 }
1469 break;
1470 #endif
1471 default:
1472 error = ether_ioctl(ifp, cmd, data);
1473 break;
1474 }
1475 return error;
1476 }
1477
1478 /*
1479 * Setup a traffic class -> rx queue mapping.
1480 */
1481 static int
1482 npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1483 {
1484 int npeid = npeconfig[device_get_unit(sc->sc_dev)].npeid;
1485 uint32_t msg[2];
1486
1487 msg[0] = (NPE_SETRXQOSENTRY << 24) | (npeid << 20) | classix;
1488 msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1489 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1490 }
1491
1492 /*
1493 * Update and reset the statistics in the NPE.
1494 */
1495 static int
1496 npe_updatestats(struct npe_softc *sc)
1497 {
1498 uint32_t msg[2];
1499
1500 msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1501 msg[1] = sc->sc_stats_phys; /* physical address of stat block */
1502 return ixpnpe_sendmsg(sc->sc_npe, msg); /* NB: no recv */
1503 }
1504
1505 #if 0
1506 /*
1507 * Get the current statistics block.
1508 */
1509 static int
1510 npe_getstats(struct npe_softc *sc)
1511 {
1512 uint32_t msg[2];
1513
1514 msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1515 msg[1] = sc->sc_stats_phys; /* physical address of stat block */
1516 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1517 }
1518
1519 /*
1520 * Query the image id of the loaded firmware.
1521 */
1522 static uint32_t
1523 npe_getimageid(struct npe_softc *sc)
1524 {
1525 uint32_t msg[2];
1526
1527 msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1528 msg[1] = 0;
1529 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1530 }
1531
1532 /*
1533 * Enable/disable loopback.
1534 */
1535 static int
1536 npe_setloopback(struct npe_softc *sc, int ena)
1537 {
1538 uint32_t msg[2];
1539
1540 msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1541 msg[1] = 0;
1542 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1543 }
1544 #endif
1545
1546 static void
1547 npe_child_detached(device_t dev, device_t child)
1548 {
1549 struct npe_softc *sc;
1550
1551 sc = device_get_softc(dev);
1552 if (child == sc->sc_mii)
1553 sc->sc_mii = NULL;
1554 }
1555
1556 /*
1557 * MII bus support routines.
1558 *
1559 * NB: ixp425 has one PHY per NPE
1560 */
1561 static uint32_t
1562 npe_mii_mdio_read(struct npe_softc *sc, int reg)
1563 {
1564 #define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1565 uint32_t v;
1566
1567 /* NB: registers are known to be sequential */
1568 v = (MII_RD4(sc, reg+0) & 0xff) << 0;
1569 v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1570 v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1571 v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1572 return v;
1573 #undef MII_RD4
1574 }
1575
1576 static void
1577 npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1578 {
1579 #define MII_WR4(sc, reg, v) \
1580 bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1581
1582 /* NB: registers are known to be sequential */
1583 MII_WR4(sc, reg+0, cmd & 0xff);
1584 MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1585 MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1586 MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1587 #undef MII_WR4
1588 }
1589
1590 static int
1591 npe_mii_mdio_wait(struct npe_softc *sc)
1592 {
1593 #define MAXTRIES 100 /* XXX */
1594 uint32_t v;
1595 int i;
1596
1597 for (i = 0; i < MAXTRIES; i++) {
1598 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1599 if ((v & NPE_MII_GO) == 0)
1600 return 1;
1601 }
1602 return 0; /* NB: timeout */
1603 #undef MAXTRIES
1604 }
1605
1606 static int
1607 npe_miibus_readreg(device_t dev, int phy, int reg)
1608 {
1609 struct npe_softc *sc = device_get_softc(dev);
1610 uint32_t v;
1611
1612 if (phy != device_get_unit(dev)) /* XXX */
1613 return 0xffff;
1614 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1615 | NPE_MII_GO;
1616 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1617 if (npe_mii_mdio_wait(sc))
1618 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1619 else
1620 v = 0xffff | NPE_MII_READ_FAIL;
1621 return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
1622 #undef MAXTRIES
1623 }
1624
1625 static void
1626 npe_miibus_writereg(device_t dev, int phy, int reg, int data)
1627 {
1628 struct npe_softc *sc = device_get_softc(dev);
1629 uint32_t v;
1630
1631 if (phy != device_get_unit(dev)) /* XXX */
1632 return;
1633 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1634 | data | NPE_MII_WRITE
1635 | NPE_MII_GO;
1636 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1637 /* XXX complain about timeout */
1638 (void) npe_mii_mdio_wait(sc);
1639 }
1640
1641 static void
1642 npe_miibus_statchg(device_t dev)
1643 {
1644 struct npe_softc *sc = device_get_softc(dev);
1645 struct mii_data *mii = device_get_softc(sc->sc_mii);
1646 uint32_t tx1, rx1;
1647
1648 /* sync MAC duplex state */
1649 tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1650 rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1651 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1652 tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1653 rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1654 } else {
1655 tx1 |= NPE_TX_CNTRL1_DUPLEX;
1656 rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1657 }
1658 WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1659 WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1660 }
1661
1662 static device_method_t npe_methods[] = {
1663 /* Device interface */
1664 DEVMETHOD(device_probe, npe_probe),
1665 DEVMETHOD(device_attach, npe_attach),
1666 DEVMETHOD(device_detach, npe_detach),
1667
1668 /* Bus interface */
1669 DEVMETHOD(bus_child_detached, npe_child_detached),
1670
1671 /* MII interface */
1672 DEVMETHOD(miibus_readreg, npe_miibus_readreg),
1673 DEVMETHOD(miibus_writereg, npe_miibus_writereg),
1674 DEVMETHOD(miibus_statchg, npe_miibus_statchg),
1675
1676 { 0, 0 }
1677 };
1678
1679 static driver_t npe_driver = {
1680 "npe",
1681 npe_methods,
1682 sizeof(struct npe_softc),
1683 };
1684
1685 DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
1686 DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
1687 MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
1688 MODULE_DEPEND(npe, miibus, 1, 1, 1);
1689 MODULE_DEPEND(npe, ether, 1, 1, 1);
Cache object: 7493b3452c44638400020bfdc24d268f
|