1 /*-
2 * Copyright (c) 2006 Sam Leffler. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
27
28 /*
29 * Intel XScale NPE Ethernet driver.
30 *
31 * This driver handles the two ports present on the IXP425.
32 * Packet processing is done by the Network Processing Engines
33 * (NPE's) that work together with a MAC and PHY. The MAC
34 * is also mapped to the XScale cpu; the PHY is accessed via
35 * the MAC. NPE-XScale communication happens through h/w
36 * queues managed by the Q Manager block.
37 *
38 * The code here replaces the ethAcc, ethMii, and ethDB classes
39 * in the Intel Access Library (IAL) and the OS-specific driver.
40 *
41 * XXX add vlan support
42 * XXX NPE-C port doesn't work yet
43 */
44 #ifdef HAVE_KERNEL_OPTION_HEADERS
45 #include "opt_device_polling.h"
46 #endif
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bus.h>
51 #include <sys/kernel.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/module.h>
55 #include <sys/rman.h>
56 #include <sys/socket.h>
57 #include <sys/sockio.h>
58 #include <sys/sysctl.h>
59 #include <sys/endian.h>
60 #include <machine/bus.h>
61
62 #include <net/ethernet.h>
63 #include <net/if.h>
64 #include <net/if_arp.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_mib.h>
68 #include <net/if_types.h>
69
70 #ifdef INET
71 #include <netinet/in.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/in_var.h>
74 #include <netinet/ip.h>
75 #endif
76
77 #include <net/bpf.h>
78 #include <net/bpfdesc.h>
79
80 #include <arm/xscale/ixp425/ixp425reg.h>
81 #include <arm/xscale/ixp425/ixp425var.h>
82 #include <arm/xscale/ixp425/ixp425_qmgr.h>
83 #include <arm/xscale/ixp425/ixp425_npevar.h>
84
85 #include <dev/mii/mii.h>
86 #include <dev/mii/miivar.h>
87 #include <arm/xscale/ixp425/if_npereg.h>
88
89 #include "miibus_if.h"
90
91 /*
92 * XXX: For the main bus dma tag. Can go away if the new method to get the
93 * dma tag from the parent got MFC'd into RELENG_6.
94 */
95 extern struct ixp425_softc *ixp425_softc;
96
97 struct npebuf {
98 struct npebuf *ix_next; /* chain to next buffer */
99 void *ix_m; /* backpointer to mbuf */
100 bus_dmamap_t ix_map; /* bus dma map for associated data */
101 struct npehwbuf *ix_hw; /* associated h/w block */
102 uint32_t ix_neaddr; /* phys address of ix_hw */
103 };
104
105 struct npedma {
106 const char* name;
107 int nbuf; /* # npebuf's allocated */
108 bus_dma_tag_t mtag; /* bus dma tag for mbuf data */
109 struct npehwbuf *hwbuf; /* NPE h/w buffers */
110 bus_dma_tag_t buf_tag; /* tag+map for NPE buffers */
111 bus_dmamap_t buf_map;
112 bus_addr_t buf_phys; /* phys addr of buffers */
113 struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */
114 };
115
116 struct npe_softc {
117 /* XXX mii requires this be first; do not move! */
118 struct ifnet *sc_ifp; /* ifnet pointer */
119 struct mtx sc_mtx; /* basically a perimeter lock */
120 device_t sc_dev;
121 bus_space_tag_t sc_iot;
122 bus_space_handle_t sc_ioh; /* MAC register window */
123 device_t sc_mii; /* child miibus */
124 bus_space_handle_t sc_miih; /* MII register window */
125 struct ixpnpe_softc *sc_npe; /* NPE support */
126 int sc_debug; /* DPRINTF* control */
127 int sc_tickinterval;
128 struct callout tick_ch; /* Tick callout */
129 int npe_watchdog_timer;
130 struct npedma txdma;
131 struct npebuf *tx_free; /* list of free tx buffers */
132 struct npedma rxdma;
133 bus_addr_t buf_phys; /* XXX for returning a value */
134 int rx_qid; /* rx qid */
135 int rx_freeqid; /* rx free buffers qid */
136 int tx_qid; /* tx qid */
137 int tx_doneqid; /* tx completed qid */
138 struct ifmib_iso_8802_3 mibdata;
139 bus_dma_tag_t sc_stats_tag; /* bus dma tag for stats block */
140 struct npestats *sc_stats;
141 bus_dmamap_t sc_stats_map;
142 bus_addr_t sc_stats_phys; /* phys addr of sc_stats */
143 };
144
145 /*
146 * Per-unit static configuration for IXP425. The tx and
147 * rx free Q id's are fixed by the NPE microcode. The
148 * rx Q id's are programmed to be separate to simplify
149 * multi-port processing. It may be better to handle
150 * all traffic through one Q (as done by the Intel drivers).
151 *
152 * Note that the PHY's are accessible only from MAC A
153 * on the IXP425. This and other platform-specific
154 * assumptions probably need to be handled through hints.
155 */
156 static const struct {
157 const char *desc; /* device description */
158 int npeid; /* NPE assignment */
159 uint32_t imageid; /* NPE firmware image id */
160 uint32_t regbase;
161 int regsize;
162 uint32_t miibase;
163 int miisize;
164 uint8_t rx_qid;
165 uint8_t rx_freeqid;
166 uint8_t tx_qid;
167 uint8_t tx_doneqid;
168 } npeconfig[NPE_PORTS_MAX] = {
169 { .desc = "IXP NPE-B",
170 .npeid = NPE_B,
171 .imageid = IXP425_NPE_B_IMAGEID,
172 .regbase = IXP425_MAC_A_HWBASE,
173 .regsize = IXP425_MAC_A_SIZE,
174 .miibase = IXP425_MAC_A_HWBASE,
175 .miisize = IXP425_MAC_A_SIZE,
176 .rx_qid = 4,
177 .rx_freeqid = 27,
178 .tx_qid = 24,
179 .tx_doneqid = 31
180 },
181 { .desc = "IXP NPE-C",
182 .npeid = NPE_C,
183 .imageid = IXP425_NPE_C_IMAGEID,
184 .regbase = IXP425_MAC_B_HWBASE,
185 .regsize = IXP425_MAC_B_SIZE,
186 .miibase = IXP425_MAC_A_HWBASE,
187 .miisize = IXP425_MAC_A_SIZE,
188 .rx_qid = 12,
189 .rx_freeqid = 28,
190 .tx_qid = 25,
191 .tx_doneqid = 31
192 },
193 };
194 static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */
195
196 static __inline uint32_t
197 RD4(struct npe_softc *sc, bus_size_t off)
198 {
199 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
200 }
201
202 static __inline void
203 WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
204 {
205 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
206 }
207
208 #define NPE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
209 #define NPE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
210 #define NPE_LOCK_INIT(_sc) \
211 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
212 MTX_NETWORK_LOCK, MTX_DEF)
213 #define NPE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
214 #define NPE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
215 #define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
216
217 static devclass_t npe_devclass;
218
219 static int npe_activate(device_t dev);
220 static void npe_deactivate(device_t dev);
221 static int npe_ifmedia_update(struct ifnet *ifp);
222 static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
223 static void npe_setmac(struct npe_softc *sc, u_char *eaddr);
224 static void npe_getmac(struct npe_softc *sc, u_char *eaddr);
225 static void npe_txdone(int qid, void *arg);
226 static int npe_rxbuf_init(struct npe_softc *, struct npebuf *,
227 struct mbuf *);
228 static void npe_rxdone(int qid, void *arg);
229 static void npeinit(void *);
230 static void npestart_locked(struct ifnet *);
231 static void npestart(struct ifnet *);
232 static void npestop(struct npe_softc *);
233 static void npewatchdog(struct npe_softc *);
234 static int npeioctl(struct ifnet * ifp, u_long, caddr_t);
235
236 static int npe_setrxqosentry(struct npe_softc *, int classix,
237 int trafclass, int qid);
238 static int npe_updatestats(struct npe_softc *);
239 #if 0
240 static int npe_getstats(struct npe_softc *);
241 static uint32_t npe_getimageid(struct npe_softc *);
242 static int npe_setloopback(struct npe_softc *, int ena);
243 #endif
244
245 /* NB: all tx done processing goes through one queue */
246 static int tx_doneqid = -1;
247
248 SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP425 NPE driver parameters");
249
250 static int npe_debug = 0;
251 SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
252 0, "IXP425 NPE network interface debug msgs");
253 TUNABLE_INT("hw.npe.npe", &npe_debug);
254 #define DPRINTF(sc, fmt, ...) do { \
255 if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__); \
256 } while (0)
257 #define DPRINTFn(n, sc, fmt, ...) do { \
258 if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
259 } while (0)
260 static int npe_tickinterval = 3; /* npe_tick frequency (secs) */
261 SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
262 0, "periodic work interval (secs)");
263 TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
264
265 static int npe_rxbuf = 64; /* # rx buffers to allocate */
266 SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
267 0, "rx buffers allocated");
268 TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
269 static int npe_txbuf = 128; /* # tx buffers to allocate */
270 SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
271 0, "tx buffers allocated");
272 TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
273
274 static int
275 npe_probe(device_t dev)
276 {
277 int unit = device_get_unit(dev);
278
279 if (unit >= NPE_PORTS_MAX) {
280 device_printf(dev, "unit %d not supported\n", unit);
281 return EINVAL;
282 }
283 /* XXX check feature register to see if enabled */
284 device_set_desc(dev, npeconfig[unit].desc);
285 return 0;
286 }
287
288 static int
289 npe_attach(device_t dev)
290 {
291 struct npe_softc *sc = device_get_softc(dev);
292 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
293 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
294 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
295 struct ifnet *ifp = NULL;
296 int error;
297 u_char eaddr[6];
298
299 sc->sc_dev = dev;
300 sc->sc_iot = sa->sc_iot;
301 NPE_LOCK_INIT(sc);
302 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
303 sc->sc_debug = npe_debug;
304 sc->sc_tickinterval = npe_tickinterval;
305
306 sc->sc_npe = ixpnpe_attach(dev);
307 if (sc->sc_npe == NULL) {
308 error = EIO; /* XXX */
309 goto out;
310 }
311
312 error = npe_activate(dev);
313 if (error)
314 goto out;
315
316 npe_getmac(sc, eaddr);
317
318 /* NB: must be setup prior to invoking mii code */
319 sc->sc_ifp = ifp = if_alloc(IFT_ETHER);
320 if (mii_phy_probe(dev, &sc->sc_mii, npe_ifmedia_update, npe_ifmedia_status)) {
321 device_printf(dev, "Cannot find my PHY.\n");
322 error = ENXIO;
323 goto out;
324 }
325
326 ifp->if_softc = sc;
327 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
328 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
329 ifp->if_start = npestart;
330 ifp->if_ioctl = npeioctl;
331 ifp->if_init = npeinit;
332 IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
333 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
334 IFQ_SET_READY(&ifp->if_snd);
335 ifp->if_linkmib = &sc->mibdata;
336 ifp->if_linkmiblen = sizeof(sc->mibdata);
337 sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
338 #ifdef DEVICE_POLLING
339 ifp->if_capabilities |= IFCAP_POLLING;
340 #endif
341
342 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
343 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
344 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
345 CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
346
347 ether_ifattach(ifp, eaddr);
348 return 0;
349 out:
350 npe_deactivate(dev);
351 if (ifp != NULL)
352 if_free(ifp);
353 return error;
354 }
355
356 static int
357 npe_detach(device_t dev)
358 {
359 struct npe_softc *sc = device_get_softc(dev);
360 struct ifnet *ifp = sc->sc_ifp;
361
362 #ifdef DEVICE_POLLING
363 if (ifp->if_capenable & IFCAP_POLLING)
364 ether_poll_deregister(ifp);
365 #endif
366 npestop(sc);
367 if (ifp != NULL) {
368 ether_ifdetach(ifp);
369 if_free(ifp);
370 }
371 NPE_LOCK_DESTROY(sc);
372 npe_deactivate(dev);
373 if (sc->sc_npe != NULL)
374 ixpnpe_detach(sc->sc_npe);
375 return 0;
376 }
377
378 /*
379 * Compute and install the multicast filter.
380 */
381 static void
382 npe_setmcast(struct npe_softc *sc)
383 {
384 struct ifnet *ifp = sc->sc_ifp;
385 uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
386 int i;
387
388 if (ifp->if_flags & IFF_PROMISC) {
389 memset(mask, 0, ETHER_ADDR_LEN);
390 memset(addr, 0, ETHER_ADDR_LEN);
391 } else if (ifp->if_flags & IFF_ALLMULTI) {
392 static const uint8_t allmulti[ETHER_ADDR_LEN] =
393 { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
394 memcpy(mask, allmulti, ETHER_ADDR_LEN);
395 memcpy(addr, allmulti, ETHER_ADDR_LEN);
396 } else {
397 uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
398 struct ifmultiaddr *ifma;
399 const uint8_t *mac;
400
401 memset(clr, 0, ETHER_ADDR_LEN);
402 memset(set, 0xff, ETHER_ADDR_LEN);
403
404 IF_ADDR_LOCK(ifp);
405 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
406 if (ifma->ifma_addr->sa_family != AF_LINK)
407 continue;
408 mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
409 for (i = 0; i < ETHER_ADDR_LEN; i++) {
410 clr[i] |= mac[i];
411 set[i] &= mac[i];
412 }
413 }
414 IF_ADDR_UNLOCK(ifp);
415
416 for (i = 0; i < ETHER_ADDR_LEN; i++) {
417 mask[i] = set[i] | ~clr[i];
418 addr[i] = set[i];
419 }
420 }
421
422 /*
423 * Write the mask and address registers.
424 */
425 for (i = 0; i < ETHER_ADDR_LEN; i++) {
426 WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
427 WR4(sc, NPE_MAC_ADDR(i), addr[i]);
428 }
429 }
430
431 static void
432 npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
433 {
434 struct npe_softc *sc;
435
436 if (error != 0)
437 return;
438 sc = (struct npe_softc *)arg;
439 sc->buf_phys = segs[0].ds_addr;
440 }
441
442 static int
443 npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
444 const char *name, int nbuf, int maxseg)
445 {
446 int error, i;
447
448 memset(dma, 0, sizeof(dma));
449
450 dma->name = name;
451 dma->nbuf = nbuf;
452
453 /* DMA tag for mapped mbufs */
454 error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
455 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
456 MCLBYTES, maxseg, MCLBYTES, 0,
457 busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
458 if (error != 0) {
459 device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
460 "error %u\n", dma->name, error);
461 return error;
462 }
463
464 /* DMA tag and map for the NPE buffers */
465 error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
466 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
467 nbuf * sizeof(struct npehwbuf), 1,
468 nbuf * sizeof(struct npehwbuf), 0,
469 busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
470 if (error != 0) {
471 device_printf(sc->sc_dev,
472 "unable to create %s npebuf dma tag, error %u\n",
473 dma->name, error);
474 return error;
475 }
476 /* XXX COHERENT for now */
477 if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
478 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
479 &dma->buf_map) != 0) {
480 device_printf(sc->sc_dev,
481 "unable to allocate memory for %s h/w buffers, error %u\n",
482 dma->name, error);
483 return error;
484 }
485 /* XXX M_TEMP */
486 dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
487 if (dma->buf == NULL) {
488 device_printf(sc->sc_dev,
489 "unable to allocate memory for %s s/w buffers\n",
490 dma->name);
491 return error;
492 }
493 if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
494 dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
495 device_printf(sc->sc_dev,
496 "unable to map memory for %s h/w buffers, error %u\n",
497 dma->name, error);
498 return error;
499 }
500 dma->buf_phys = sc->buf_phys;
501 for (i = 0; i < dma->nbuf; i++) {
502 struct npebuf *npe = &dma->buf[i];
503 struct npehwbuf *hw = &dma->hwbuf[i];
504
505 /* calculate offset to shared area */
506 npe->ix_neaddr = dma->buf_phys +
507 ((uintptr_t)hw - (uintptr_t)dma->hwbuf);
508 KASSERT((npe->ix_neaddr & 0x1f) == 0,
509 ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
510 error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
511 &npe->ix_map);
512 if (error != 0) {
513 device_printf(sc->sc_dev,
514 "unable to create dmamap for %s buffer %u, "
515 "error %u\n", dma->name, i, error);
516 return error;
517 }
518 npe->ix_hw = hw;
519 }
520 bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
521 return 0;
522 }
523
524 static void
525 npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
526 {
527 int i;
528
529 if (dma->hwbuf != NULL) {
530 for (i = 0; i < dma->nbuf; i++) {
531 struct npebuf *npe = &dma->buf[i];
532 bus_dmamap_destroy(dma->mtag, npe->ix_map);
533 }
534 bus_dmamap_unload(dma->buf_tag, dma->buf_map);
535 bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
536 bus_dmamap_destroy(dma->buf_tag, dma->buf_map);
537 }
538 if (dma->buf != NULL)
539 free(dma->buf, M_TEMP);
540 if (dma->buf_tag)
541 bus_dma_tag_destroy(dma->buf_tag);
542 if (dma->mtag)
543 bus_dma_tag_destroy(dma->mtag);
544 memset(dma, 0, sizeof(*dma));
545 }
546
547 static int
548 npe_activate(device_t dev)
549 {
550 struct npe_softc * sc = device_get_softc(dev);
551 int unit = device_get_unit(dev);
552 int error, i;
553 uint32_t imageid;
554
555 /*
556 * Load NPE firmware and start it running. We assume
557 * that minor version bumps remain compatible so probe
558 * the firmware image starting with the expected version
559 * and then bump the minor version up to the max.
560 */
561 imageid = npeconfig[unit].imageid;
562 for (;;) {
563 error = ixpnpe_init(sc->sc_npe, "npe_fw", imageid);
564 if (error == 0)
565 break;
566 /* ESRCH is returned when the requested image is not present */
567 if (error != ESRCH)
568 return error;
569 /* bump the minor version up to the max possible */
570 if (NPEIMAGE_MINOR(imageid) == 0xff)
571 return error;
572 imageid++;
573 }
574
575 if (bus_space_map(sc->sc_iot, npeconfig[unit].regbase,
576 npeconfig[unit].regsize, 0, &sc->sc_ioh)) {
577 device_printf(dev, "Cannot map registers 0x%x:0x%x\n",
578 npeconfig[unit].regbase, npeconfig[unit].regsize);
579 return ENOMEM;
580 }
581
582 if (npeconfig[unit].miibase != npeconfig[unit].regbase) {
583 /*
584 * The PHY's are only accessible from one MAC (it appears)
585 * so for other MAC's setup an additional mapping for
586 * frobbing the PHY registers.
587 */
588 if (bus_space_map(sc->sc_iot, npeconfig[unit].miibase,
589 npeconfig[unit].miisize, 0, &sc->sc_miih)) {
590 device_printf(dev,
591 "Cannot map MII registers 0x%x:0x%x\n",
592 npeconfig[unit].miibase, npeconfig[unit].miisize);
593 return ENOMEM;
594 }
595 } else
596 sc->sc_miih = sc->sc_ioh;
597 error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
598 if (error != 0)
599 return error;
600 error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
601 if (error != 0)
602 return error;
603
604 /* setup statistics block */
605 error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
606 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
607 sizeof(struct npestats), 1, sizeof(struct npestats), 0,
608 busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
609 if (error != 0) {
610 device_printf(sc->sc_dev, "unable to create stats tag, "
611 "error %u\n", error);
612 return error;
613 }
614 if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
615 BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
616 device_printf(sc->sc_dev,
617 "unable to allocate memory for stats block, error %u\n",
618 error);
619 return error;
620 }
621 if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
622 sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
623 device_printf(sc->sc_dev,
624 "unable to load memory for stats block, error %u\n",
625 error);
626 return error;
627 }
628 sc->sc_stats_phys = sc->buf_phys;
629
630 /* XXX disable half-bridge LEARNING+FILTERING feature */
631
632 /*
633 * Setup h/w rx/tx queues. There are four q's:
634 * rx inbound q of rx'd frames
635 * rx_free pool of ixpbuf's for receiving frames
636 * tx outbound q of frames to send
637 * tx_done q of tx frames that have been processed
638 *
639 * The NPE handles the actual tx/rx process and the q manager
640 * handles the queues. The driver just writes entries to the
641 * q manager mailbox's and gets callbacks when there are rx'd
642 * frames to process or tx'd frames to reap. These callbacks
643 * are controlled by the q configurations; e.g. we get a
644 * callback when tx_done has 2 or more frames to process and
645 * when the rx q has at least one frame. These setings can
646 * changed at the time the q is configured.
647 */
648 sc->rx_qid = npeconfig[unit].rx_qid;
649 ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0, 1,
650 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc);
651 sc->rx_freeqid = npeconfig[unit].rx_freeqid;
652 ixpqmgr_qconfig(sc->rx_freeqid, npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
653 /* tell the NPE to direct all traffic to rx_qid */
654 #if 0
655 for (i = 0; i < 8; i++)
656 #else
657 device_printf(sc->sc_dev, "remember to fix rx q setup\n");
658 for (i = 0; i < 4; i++)
659 #endif
660 npe_setrxqosentry(sc, i, 0, sc->rx_qid);
661
662 sc->tx_qid = npeconfig[unit].tx_qid;
663 sc->tx_doneqid = npeconfig[unit].tx_doneqid;
664 ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
665 if (tx_doneqid == -1) {
666 ixpqmgr_qconfig(sc->tx_doneqid, npe_txbuf, 0, 2,
667 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
668 tx_doneqid = sc->tx_doneqid;
669 }
670
671 KASSERT(npes[npeconfig[unit].npeid] == NULL,
672 ("npe %u already setup", npeconfig[unit].npeid));
673 npes[npeconfig[unit].npeid] = sc;
674
675 return 0;
676 }
677
678 static void
679 npe_deactivate(device_t dev)
680 {
681 struct npe_softc *sc = device_get_softc(dev);
682 int unit = device_get_unit(dev);
683
684 npes[npeconfig[unit].npeid] = NULL;
685
686 /* XXX disable q's */
687 if (sc->sc_npe != NULL)
688 ixpnpe_stop(sc->sc_npe);
689 if (sc->sc_stats != NULL) {
690 bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
691 bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
692 sc->sc_stats_map);
693 bus_dmamap_destroy(sc->sc_stats_tag, sc->sc_stats_map);
694 }
695 if (sc->sc_stats_tag != NULL)
696 bus_dma_tag_destroy(sc->sc_stats_tag);
697 npe_dma_destroy(sc, &sc->txdma);
698 npe_dma_destroy(sc, &sc->rxdma);
699 bus_generic_detach(sc->sc_dev);
700 if (sc->sc_mii)
701 device_delete_child(sc->sc_dev, sc->sc_mii);
702 #if 0
703 /* XXX sc_ioh and sc_miih */
704 if (sc->mem_res)
705 bus_release_resource(dev, SYS_RES_IOPORT,
706 rman_get_rid(sc->mem_res), sc->mem_res);
707 sc->mem_res = 0;
708 #endif
709 }
710
711 /*
712 * Change media according to request.
713 */
714 static int
715 npe_ifmedia_update(struct ifnet *ifp)
716 {
717 struct npe_softc *sc = ifp->if_softc;
718 struct mii_data *mii;
719
720 mii = device_get_softc(sc->sc_mii);
721 NPE_LOCK(sc);
722 mii_mediachg(mii);
723 /* XXX push state ourself? */
724 NPE_UNLOCK(sc);
725 return (0);
726 }
727
728 /*
729 * Notify the world which media we're using.
730 */
731 static void
732 npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
733 {
734 struct npe_softc *sc = ifp->if_softc;
735 struct mii_data *mii;
736
737 mii = device_get_softc(sc->sc_mii);
738 NPE_LOCK(sc);
739 mii_pollstat(mii);
740 ifmr->ifm_active = mii->mii_media_active;
741 ifmr->ifm_status = mii->mii_media_status;
742 NPE_UNLOCK(sc);
743 }
744
745 static void
746 npe_addstats(struct npe_softc *sc)
747 {
748 #define MIBADD(x) sc->mibdata.x += be32toh(ns->x)
749 struct ifnet *ifp = sc->sc_ifp;
750 struct npestats *ns = sc->sc_stats;
751
752 MIBADD(dot3StatsAlignmentErrors);
753 MIBADD(dot3StatsFCSErrors);
754 MIBADD(dot3StatsSingleCollisionFrames);
755 MIBADD(dot3StatsMultipleCollisionFrames);
756 MIBADD(dot3StatsDeferredTransmissions);
757 MIBADD(dot3StatsLateCollisions);
758 MIBADD(dot3StatsExcessiveCollisions);
759 MIBADD(dot3StatsInternalMacTransmitErrors);
760 MIBADD(dot3StatsCarrierSenseErrors);
761 sc->mibdata.dot3StatsFrameTooLongs +=
762 be32toh(ns->RxLargeFramesDiscards)
763 + be32toh(ns->TxLargeFrameDiscards);
764 MIBADD(dot3StatsInternalMacReceiveErrors);
765 sc->mibdata.dot3StatsMissedFrames +=
766 be32toh(ns->RxOverrunDiscards)
767 + be32toh(ns->RxUnderflowEntryDiscards);
768
769 ifp->if_oerrors +=
770 be32toh(ns->dot3StatsInternalMacTransmitErrors)
771 + be32toh(ns->dot3StatsCarrierSenseErrors)
772 + be32toh(ns->TxVLANIdFilterDiscards)
773 ;
774 ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors)
775 + be32toh(ns->dot3StatsInternalMacReceiveErrors)
776 + be32toh(ns->RxOverrunDiscards)
777 + be32toh(ns->RxUnderflowEntryDiscards)
778 ;
779 ifp->if_collisions +=
780 be32toh(ns->dot3StatsSingleCollisionFrames)
781 + be32toh(ns->dot3StatsMultipleCollisionFrames)
782 ;
783 #undef MIBADD
784 }
785
786 static void
787 npe_tick(void *xsc)
788 {
789 #define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
790 struct npe_softc *sc = xsc;
791 struct mii_data *mii = device_get_softc(sc->sc_mii);
792 uint32_t msg[2];
793
794 NPE_ASSERT_LOCKED(sc);
795
796 /*
797 * NB: to avoid sleeping with the softc lock held we
798 * split the NPE msg processing into two parts. The
799 * request for statistics is sent w/o waiting for a
800 * reply and then on the next tick we retrieve the
801 * results. This works because npe_tick is the only
802 * code that talks via the mailbox's (except at setup).
803 * This likely can be handled better.
804 */
805 if (ixpnpe_recvmsg(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
806 bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
807 BUS_DMASYNC_POSTREAD);
808 npe_addstats(sc);
809 }
810 npe_updatestats(sc);
811 mii_tick(mii);
812
813 npewatchdog(sc);
814
815 /* schedule next poll */
816 callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
817 #undef ACK
818 }
819
820 static void
821 npe_setmac(struct npe_softc *sc, u_char *eaddr)
822 {
823 WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
824 WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
825 WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
826 WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
827 WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
828 WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
829
830 }
831
832 static void
833 npe_getmac(struct npe_softc *sc, u_char *eaddr)
834 {
835 /* NB: the unicast address appears to be loaded from EEPROM on reset */
836 eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
837 eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
838 eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
839 eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
840 eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
841 eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
842 }
843
844 struct txdone {
845 struct npebuf *head;
846 struct npebuf **tail;
847 int count;
848 };
849
850 static __inline void
851 npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
852 {
853 struct ifnet *ifp = sc->sc_ifp;
854
855 NPE_LOCK(sc);
856 *td->tail = sc->tx_free;
857 sc->tx_free = td->head;
858 /*
859 * We're no longer busy, so clear the busy flag and call the
860 * start routine to xmit more packets.
861 */
862 ifp->if_opackets += td->count;
863 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
864 sc->npe_watchdog_timer = 0;
865 npestart_locked(ifp);
866 NPE_UNLOCK(sc);
867 }
868
869 /*
870 * Q manager callback on tx done queue. Reap mbufs
871 * and return tx buffers to the free list. Finally
872 * restart output. Note the microcode has only one
873 * txdone q wired into it so we must use the NPE ID
874 * returned with each npehwbuf to decide where to
875 * send buffers.
876 */
877 static void
878 npe_txdone(int qid, void *arg)
879 {
880 #define P2V(a, dma) \
881 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
882 struct npe_softc *sc0 = arg;
883 struct npe_softc *sc;
884 struct npebuf *npe;
885 struct txdone *td, q[NPE_MAX];
886 uint32_t entry;
887
888 /* XXX no NPE-A support */
889 q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
890 q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
891 /* XXX max # at a time? */
892 while (ixpqmgr_qread(qid, &entry) == 0) {
893 DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
894 __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
895
896 sc = npes[NPE_QM_Q_NPE(entry)];
897 npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
898 m_freem(npe->ix_m);
899 npe->ix_m = NULL;
900
901 td = &q[NPE_QM_Q_NPE(entry)];
902 *td->tail = npe;
903 td->tail = &npe->ix_next;
904 td->count++;
905 }
906
907 if (q[NPE_B].count)
908 npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
909 if (q[NPE_C].count)
910 npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
911 #undef P2V
912 }
913
914 static int
915 npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
916 {
917 bus_dma_segment_t segs[1];
918 struct npedma *dma = &sc->rxdma;
919 struct npehwbuf *hw;
920 int error, nseg;
921
922 if (m == NULL) {
923 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
924 if (m == NULL)
925 return ENOBUFS;
926 }
927 KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
928 ("ext_size %d", m->m_ext.ext_size));
929 m->m_pkthdr.len = m->m_len = 1536;
930 /* backload payload and align ip hdr */
931 m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
932 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
933 segs, &nseg, 0);
934 if (error != 0) {
935 m_freem(m);
936 return error;
937 }
938 hw = npe->ix_hw;
939 hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
940 /* NB: NPE requires length be a multiple of 64 */
941 /* NB: buffer length is shifted in word */
942 hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
943 hw->ix_ne[0].next = 0;
944 npe->ix_m = m;
945 /* Flush the memory in the mbuf */
946 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
947 return 0;
948 }
949
950 /*
951 * RX q processing for a specific NPE. Claim entries
952 * from the hardware queue and pass the frames up the
953 * stack. Pass the rx buffers to the free list.
954 */
955 static void
956 npe_rxdone(int qid, void *arg)
957 {
958 #define P2V(a, dma) \
959 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
960 struct npe_softc *sc = arg;
961 struct npedma *dma = &sc->rxdma;
962 uint32_t entry;
963
964 while (ixpqmgr_qread(qid, &entry) == 0) {
965 struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
966 struct mbuf *m;
967
968 DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
969 __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
970 /*
971 * Allocate a new mbuf to replenish the rx buffer.
972 * If doing so fails we drop the rx'd frame so we
973 * can reuse the previous mbuf. When we're able to
974 * allocate a new mbuf dispatch the mbuf w/ rx'd
975 * data up the stack and replace it with the newly
976 * allocated one.
977 */
978 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
979 if (m != NULL) {
980 struct mbuf *mrx = npe->ix_m;
981 struct npehwbuf *hw = npe->ix_hw;
982 struct ifnet *ifp = sc->sc_ifp;
983
984 /* Flush mbuf memory for rx'd data */
985 bus_dmamap_sync(dma->mtag, npe->ix_map,
986 BUS_DMASYNC_POSTREAD);
987
988 /* XXX flush hw buffer; works now 'cuz coherent */
989 /* set m_len etc. per rx frame size */
990 mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
991 mrx->m_pkthdr.len = mrx->m_len;
992 mrx->m_pkthdr.rcvif = ifp;
993 mrx->m_flags |= M_HASFCS;
994
995 ifp->if_ipackets++;
996 ifp->if_input(ifp, mrx);
997 } else {
998 /* discard frame and re-use mbuf */
999 m = npe->ix_m;
1000 }
1001 if (npe_rxbuf_init(sc, npe, m) == 0) {
1002 /* return npe buf to rx free list */
1003 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1004 } else {
1005 /* XXX should not happen */
1006 }
1007 }
1008 #undef P2V
1009 }
1010
1011 #ifdef DEVICE_POLLING
1012 static void
1013 npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1014 {
1015 struct npe_softc *sc = ifp->if_softc;
1016
1017 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1018 npe_rxdone(sc->rx_qid, sc);
1019 npe_txdone(sc->tx_doneqid, sc); /* XXX polls both NPE's */
1020 }
1021 }
1022 #endif /* DEVICE_POLLING */
1023
1024 static void
1025 npe_startxmit(struct npe_softc *sc)
1026 {
1027 struct npedma *dma = &sc->txdma;
1028 int i;
1029
1030 NPE_ASSERT_LOCKED(sc);
1031 sc->tx_free = NULL;
1032 for (i = 0; i < dma->nbuf; i++) {
1033 struct npebuf *npe = &dma->buf[i];
1034 if (npe->ix_m != NULL) {
1035 /* NB: should not happen */
1036 device_printf(sc->sc_dev,
1037 "%s: free mbuf at entry %u\n", __func__, i);
1038 m_freem(npe->ix_m);
1039 }
1040 npe->ix_m = NULL;
1041 npe->ix_next = sc->tx_free;
1042 sc->tx_free = npe;
1043 }
1044 }
1045
1046 static void
1047 npe_startrecv(struct npe_softc *sc)
1048 {
1049 struct npedma *dma = &sc->rxdma;
1050 struct npebuf *npe;
1051 int i;
1052
1053 NPE_ASSERT_LOCKED(sc);
1054 for (i = 0; i < dma->nbuf; i++) {
1055 npe = &dma->buf[i];
1056 npe_rxbuf_init(sc, npe, npe->ix_m);
1057 /* set npe buf on rx free list */
1058 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1059 }
1060 }
1061
1062 /*
1063 * Reset and initialize the chip
1064 */
1065 static void
1066 npeinit_locked(void *xsc)
1067 {
1068 struct npe_softc *sc = xsc;
1069 struct ifnet *ifp = sc->sc_ifp;
1070
1071 NPE_ASSERT_LOCKED(sc);
1072 if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
1073
1074 /*
1075 * Reset MAC core.
1076 */
1077 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1078 DELAY(NPE_MAC_RESET_DELAY);
1079 /* configure MAC to generate MDC clock */
1080 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1081
1082 /* disable transmitter and reciver in the MAC */
1083 WR4(sc, NPE_MAC_RX_CNTRL1,
1084 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1085 WR4(sc, NPE_MAC_TX_CNTRL1,
1086 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1087
1088 /*
1089 * Set the MAC core registers.
1090 */
1091 WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */
1092 WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */
1093 WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */
1094 /* thresholds determined by NPE firmware FS */
1095 WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12);
1096 WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30);
1097 WR4(sc, NPE_MAC_BUF_SIZE_TX, 0x8); /* tx fifo threshold (bytes) */
1098 WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */
1099 WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/
1100 WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */
1101 WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */
1102 WR4(sc, NPE_MAC_SLOT_TIME, 0x80); /* assumes MII mode */
1103
1104 WR4(sc, NPE_MAC_TX_CNTRL1,
1105 NPE_TX_CNTRL1_RETRY /* retry failed xmits */
1106 | NPE_TX_CNTRL1_FCS_EN /* append FCS */
1107 | NPE_TX_CNTRL1_2DEFER /* 2-part deferal */
1108 | NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */
1109 /* XXX pad strip? */
1110 WR4(sc, NPE_MAC_RX_CNTRL1,
1111 NPE_RX_CNTRL1_CRC_EN /* include CRC/FCS */
1112 | NPE_RX_CNTRL1_PAUSE_EN); /* ena pause frame handling */
1113 WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1114
1115 npe_setmac(sc, IF_LLADDR(ifp));
1116 npe_setmcast(sc);
1117
1118 npe_startxmit(sc);
1119 npe_startrecv(sc);
1120
1121 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1122 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1123 sc->npe_watchdog_timer = 0; /* just in case */
1124
1125 /* enable transmitter and reciver in the MAC */
1126 WR4(sc, NPE_MAC_RX_CNTRL1,
1127 RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1128 WR4(sc, NPE_MAC_TX_CNTRL1,
1129 RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1130
1131 callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
1132 }
1133
1134 static void
1135 npeinit(void *xsc)
1136 {
1137 struct npe_softc *sc = xsc;
1138 NPE_LOCK(sc);
1139 npeinit_locked(sc);
1140 NPE_UNLOCK(sc);
1141 }
1142
1143 /*
1144 * Defragment an mbuf chain, returning at most maxfrags separate
1145 * mbufs+clusters. If this is not possible NULL is returned and
1146 * the original mbuf chain is left in it's present (potentially
1147 * modified) state. We use two techniques: collapsing consecutive
1148 * mbufs and replacing consecutive mbufs by a cluster.
1149 */
1150 static struct mbuf *
1151 npe_defrag(struct mbuf *m0, int how, int maxfrags)
1152 {
1153 struct mbuf *m, *n, *n2, **prev;
1154 u_int curfrags;
1155
1156 /*
1157 * Calculate the current number of frags.
1158 */
1159 curfrags = 0;
1160 for (m = m0; m != NULL; m = m->m_next)
1161 curfrags++;
1162 /*
1163 * First, try to collapse mbufs. Note that we always collapse
1164 * towards the front so we don't need to deal with moving the
1165 * pkthdr. This may be suboptimal if the first mbuf has much
1166 * less data than the following.
1167 */
1168 m = m0;
1169 again:
1170 for (;;) {
1171 n = m->m_next;
1172 if (n == NULL)
1173 break;
1174 if ((m->m_flags & M_RDONLY) == 0 &&
1175 n->m_len < M_TRAILINGSPACE(m)) {
1176 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1177 n->m_len);
1178 m->m_len += n->m_len;
1179 m->m_next = n->m_next;
1180 m_free(n);
1181 if (--curfrags <= maxfrags)
1182 return m0;
1183 } else
1184 m = n;
1185 }
1186 KASSERT(maxfrags > 1,
1187 ("maxfrags %u, but normal collapse failed", maxfrags));
1188 /*
1189 * Collapse consecutive mbufs to a cluster.
1190 */
1191 prev = &m0->m_next; /* NB: not the first mbuf */
1192 while ((n = *prev) != NULL) {
1193 if ((n2 = n->m_next) != NULL &&
1194 n->m_len + n2->m_len < MCLBYTES) {
1195 m = m_getcl(how, MT_DATA, 0);
1196 if (m == NULL)
1197 goto bad;
1198 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1199 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1200 n2->m_len);
1201 m->m_len = n->m_len + n2->m_len;
1202 m->m_next = n2->m_next;
1203 *prev = m;
1204 m_free(n);
1205 m_free(n2);
1206 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
1207 return m0;
1208 /*
1209 * Still not there, try the normal collapse
1210 * again before we allocate another cluster.
1211 */
1212 goto again;
1213 }
1214 prev = &n->m_next;
1215 }
1216 /*
1217 * No place where we can collapse to a cluster; punt.
1218 * This can occur if, for example, you request 2 frags
1219 * but the packet requires that both be clusters (we
1220 * never reallocate the first mbuf to avoid moving the
1221 * packet header).
1222 */
1223 bad:
1224 return NULL;
1225 }
1226
1227 /*
1228 * Dequeue packets and place on the h/w transmit queue.
1229 */
1230 static void
1231 npestart_locked(struct ifnet *ifp)
1232 {
1233 struct npe_softc *sc = ifp->if_softc;
1234 struct npebuf *npe;
1235 struct npehwbuf *hw;
1236 struct mbuf *m, *n;
1237 struct npedma *dma = &sc->txdma;
1238 bus_dma_segment_t segs[NPE_MAXSEG];
1239 int nseg, len, error, i;
1240 uint32_t next;
1241
1242 NPE_ASSERT_LOCKED(sc);
1243 /* XXX can this happen? */
1244 if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1245 return;
1246
1247 while (sc->tx_free != NULL) {
1248 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1249 if (m == NULL) {
1250 /* XXX? */
1251 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1252 return;
1253 }
1254 npe = sc->tx_free;
1255 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1256 m, segs, &nseg, 0);
1257 if (error == EFBIG) {
1258 n = npe_defrag(m, M_DONTWAIT, NPE_MAXSEG);
1259 if (n == NULL) {
1260 if_printf(ifp, "%s: too many fragments %u\n",
1261 __func__, nseg);
1262 m_freem(m);
1263 return; /* XXX? */
1264 }
1265 m = n;
1266 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1267 m, segs, &nseg, 0);
1268 }
1269 if (error != 0 || nseg == 0) {
1270 if_printf(ifp, "%s: error %u nseg %u\n",
1271 __func__, error, nseg);
1272 m_freem(m);
1273 return; /* XXX? */
1274 }
1275 sc->tx_free = npe->ix_next;
1276
1277 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
1278
1279 /*
1280 * Tap off here if there is a bpf listener.
1281 */
1282 BPF_MTAP(ifp, m);
1283
1284 npe->ix_m = m;
1285 hw = npe->ix_hw;
1286 len = m->m_pkthdr.len;
1287 next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1288 for (i = 0; i < nseg; i++) {
1289 hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1290 hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1291 hw->ix_ne[i].next = htobe32(next);
1292
1293 len = 0; /* zero for segments > 1 */
1294 next += sizeof(hw->ix_ne[0]);
1295 }
1296 hw->ix_ne[i-1].next = 0; /* zero last in chain */
1297 /* XXX flush descriptor instead of using uncached memory */
1298
1299 DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1300 __func__, sc->tx_qid, npe->ix_neaddr,
1301 hw->ix_ne[0].data, hw->ix_ne[0].len);
1302 /* stick it on the tx q */
1303 /* XXX add vlan priority */
1304 ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1305
1306 sc->npe_watchdog_timer = 5;
1307 }
1308 if (sc->tx_free == NULL)
1309 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1310 }
1311
1312 void
1313 npestart(struct ifnet *ifp)
1314 {
1315 struct npe_softc *sc = ifp->if_softc;
1316 NPE_LOCK(sc);
1317 npestart_locked(ifp);
1318 NPE_UNLOCK(sc);
1319 }
1320
1321 static void
1322 npe_stopxmit(struct npe_softc *sc)
1323 {
1324 struct npedma *dma = &sc->txdma;
1325 int i;
1326
1327 NPE_ASSERT_LOCKED(sc);
1328
1329 /* XXX qmgr */
1330 for (i = 0; i < dma->nbuf; i++) {
1331 struct npebuf *npe = &dma->buf[i];
1332
1333 if (npe->ix_m != NULL) {
1334 bus_dmamap_unload(dma->mtag, npe->ix_map);
1335 m_freem(npe->ix_m);
1336 npe->ix_m = NULL;
1337 }
1338 }
1339 }
1340
1341 static void
1342 npe_stoprecv(struct npe_softc *sc)
1343 {
1344 struct npedma *dma = &sc->rxdma;
1345 int i;
1346
1347 NPE_ASSERT_LOCKED(sc);
1348
1349 /* XXX qmgr */
1350 for (i = 0; i < dma->nbuf; i++) {
1351 struct npebuf *npe = &dma->buf[i];
1352
1353 if (npe->ix_m != NULL) {
1354 bus_dmamap_unload(dma->mtag, npe->ix_map);
1355 m_freem(npe->ix_m);
1356 npe->ix_m = NULL;
1357 }
1358 }
1359 }
1360
1361 /*
1362 * Turn off interrupts, and stop the nic.
1363 */
1364 void
1365 npestop(struct npe_softc *sc)
1366 {
1367 struct ifnet *ifp = sc->sc_ifp;
1368
1369 /* disable transmitter and reciver in the MAC */
1370 WR4(sc, NPE_MAC_RX_CNTRL1,
1371 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1372 WR4(sc, NPE_MAC_TX_CNTRL1,
1373 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1374
1375 sc->npe_watchdog_timer = 0;
1376 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1377
1378 callout_stop(&sc->tick_ch);
1379
1380 npe_stopxmit(sc);
1381 npe_stoprecv(sc);
1382 /* XXX go into loopback & drain q's? */
1383 /* XXX but beware of disabling tx above */
1384
1385 /*
1386 * The MAC core rx/tx disable may leave the MAC hardware in an
1387 * unpredictable state. A hw reset is executed before resetting
1388 * all the MAC parameters to a known value.
1389 */
1390 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1391 DELAY(NPE_MAC_RESET_DELAY);
1392 WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1393 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1394 }
1395
1396 void
1397 npewatchdog(struct npe_softc *sc)
1398 {
1399 NPE_ASSERT_LOCKED(sc);
1400
1401 if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
1402 return;
1403
1404 device_printf(sc->sc_dev, "watchdog timeout\n");
1405 sc->sc_ifp->if_oerrors++;
1406
1407 npeinit_locked(sc);
1408 }
1409
1410 static int
1411 npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1412 {
1413 struct npe_softc *sc = ifp->if_softc;
1414 struct mii_data *mii;
1415 struct ifreq *ifr = (struct ifreq *)data;
1416 int error = 0;
1417 #ifdef DEVICE_POLLING
1418 int mask;
1419 #endif
1420
1421 switch (cmd) {
1422 case SIOCSIFFLAGS:
1423 NPE_LOCK(sc);
1424 if ((ifp->if_flags & IFF_UP) == 0 &&
1425 ifp->if_drv_flags & IFF_DRV_RUNNING) {
1426 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1427 npestop(sc);
1428 } else {
1429 /* reinitialize card on any parameter change */
1430 npeinit_locked(sc);
1431 }
1432 NPE_UNLOCK(sc);
1433 break;
1434
1435 case SIOCADDMULTI:
1436 case SIOCDELMULTI:
1437 /* update multicast filter list. */
1438 NPE_LOCK(sc);
1439 npe_setmcast(sc);
1440 NPE_UNLOCK(sc);
1441 error = 0;
1442 break;
1443
1444 case SIOCSIFMEDIA:
1445 case SIOCGIFMEDIA:
1446 mii = device_get_softc(sc->sc_mii);
1447 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1448 break;
1449
1450 #ifdef DEVICE_POLLING
1451 case SIOCSIFCAP:
1452 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1453 if (mask & IFCAP_POLLING) {
1454 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1455 error = ether_poll_register(npe_poll, ifp);
1456 if (error)
1457 return error;
1458 NPE_LOCK(sc);
1459 /* disable callbacks XXX txdone is shared */
1460 ixpqmgr_notify_disable(sc->rx_qid);
1461 ixpqmgr_notify_disable(sc->tx_doneqid);
1462 ifp->if_capenable |= IFCAP_POLLING;
1463 NPE_UNLOCK(sc);
1464 } else {
1465 error = ether_poll_deregister(ifp);
1466 /* NB: always enable qmgr callbacks */
1467 NPE_LOCK(sc);
1468 /* enable qmgr callbacks */
1469 ixpqmgr_notify_enable(sc->rx_qid,
1470 IX_QMGR_Q_SOURCE_ID_NOT_E);
1471 ixpqmgr_notify_enable(sc->tx_doneqid,
1472 IX_QMGR_Q_SOURCE_ID_NOT_E);
1473 ifp->if_capenable &= ~IFCAP_POLLING;
1474 NPE_UNLOCK(sc);
1475 }
1476 }
1477 break;
1478 #endif
1479 default:
1480 error = ether_ioctl(ifp, cmd, data);
1481 break;
1482 }
1483 return error;
1484 }
1485
1486 /*
1487 * Setup a traffic class -> rx queue mapping.
1488 */
1489 static int
1490 npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1491 {
1492 int npeid = npeconfig[device_get_unit(sc->sc_dev)].npeid;
1493 uint32_t msg[2];
1494
1495 msg[0] = (NPE_SETRXQOSENTRY << 24) | (npeid << 20) | classix;
1496 msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1497 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1498 }
1499
1500 /*
1501 * Update and reset the statistics in the NPE.
1502 */
1503 static int
1504 npe_updatestats(struct npe_softc *sc)
1505 {
1506 uint32_t msg[2];
1507
1508 msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1509 msg[1] = sc->sc_stats_phys; /* physical address of stat block */
1510 return ixpnpe_sendmsg(sc->sc_npe, msg); /* NB: no recv */
1511 }
1512
1513 #if 0
1514 /*
1515 * Get the current statistics block.
1516 */
1517 static int
1518 npe_getstats(struct npe_softc *sc)
1519 {
1520 uint32_t msg[2];
1521
1522 msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1523 msg[1] = sc->sc_stats_phys; /* physical address of stat block */
1524 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1525 }
1526
1527 /*
1528 * Query the image id of the loaded firmware.
1529 */
1530 static uint32_t
1531 npe_getimageid(struct npe_softc *sc)
1532 {
1533 uint32_t msg[2];
1534
1535 msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1536 msg[1] = 0;
1537 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1538 }
1539
1540 /*
1541 * Enable/disable loopback.
1542 */
1543 static int
1544 npe_setloopback(struct npe_softc *sc, int ena)
1545 {
1546 uint32_t msg[2];
1547
1548 msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1549 msg[1] = 0;
1550 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1551 }
1552 #endif
1553
1554 static void
1555 npe_child_detached(device_t dev, device_t child)
1556 {
1557 struct npe_softc *sc;
1558
1559 sc = device_get_softc(dev);
1560 if (child == sc->sc_mii)
1561 sc->sc_mii = NULL;
1562 }
1563
1564 /*
1565 * MII bus support routines.
1566 *
1567 * NB: ixp425 has one PHY per NPE
1568 */
1569 static uint32_t
1570 npe_mii_mdio_read(struct npe_softc *sc, int reg)
1571 {
1572 #define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1573 uint32_t v;
1574
1575 /* NB: registers are known to be sequential */
1576 v = (MII_RD4(sc, reg+0) & 0xff) << 0;
1577 v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1578 v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1579 v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1580 return v;
1581 #undef MII_RD4
1582 }
1583
1584 static void
1585 npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1586 {
1587 #define MII_WR4(sc, reg, v) \
1588 bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1589
1590 /* NB: registers are known to be sequential */
1591 MII_WR4(sc, reg+0, cmd & 0xff);
1592 MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1593 MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1594 MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1595 #undef MII_WR4
1596 }
1597
1598 static int
1599 npe_mii_mdio_wait(struct npe_softc *sc)
1600 {
1601 #define MAXTRIES 100 /* XXX */
1602 uint32_t v;
1603 int i;
1604
1605 for (i = 0; i < MAXTRIES; i++) {
1606 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1607 if ((v & NPE_MII_GO) == 0)
1608 return 1;
1609 }
1610 return 0; /* NB: timeout */
1611 #undef MAXTRIES
1612 }
1613
1614 static int
1615 npe_miibus_readreg(device_t dev, int phy, int reg)
1616 {
1617 struct npe_softc *sc = device_get_softc(dev);
1618 uint32_t v;
1619
1620 if (phy != device_get_unit(dev)) /* XXX */
1621 return 0xffff;
1622 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1623 | NPE_MII_GO;
1624 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1625 if (npe_mii_mdio_wait(sc))
1626 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1627 else
1628 v = 0xffff | NPE_MII_READ_FAIL;
1629 return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
1630 #undef MAXTRIES
1631 }
1632
1633 static void
1634 npe_miibus_writereg(device_t dev, int phy, int reg, int data)
1635 {
1636 struct npe_softc *sc = device_get_softc(dev);
1637 uint32_t v;
1638
1639 if (phy != device_get_unit(dev)) /* XXX */
1640 return;
1641 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1642 | data | NPE_MII_WRITE
1643 | NPE_MII_GO;
1644 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1645 /* XXX complain about timeout */
1646 (void) npe_mii_mdio_wait(sc);
1647 }
1648
1649 static void
1650 npe_miibus_statchg(device_t dev)
1651 {
1652 struct npe_softc *sc = device_get_softc(dev);
1653 struct mii_data *mii = device_get_softc(sc->sc_mii);
1654 uint32_t tx1, rx1;
1655
1656 /* sync MAC duplex state */
1657 tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1658 rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1659 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1660 tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1661 rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1662 } else {
1663 tx1 |= NPE_TX_CNTRL1_DUPLEX;
1664 rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1665 }
1666 WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1667 WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1668 }
1669
1670 static device_method_t npe_methods[] = {
1671 /* Device interface */
1672 DEVMETHOD(device_probe, npe_probe),
1673 DEVMETHOD(device_attach, npe_attach),
1674 DEVMETHOD(device_detach, npe_detach),
1675
1676 /* Bus interface */
1677 DEVMETHOD(bus_child_detached, npe_child_detached),
1678
1679 /* MII interface */
1680 DEVMETHOD(miibus_readreg, npe_miibus_readreg),
1681 DEVMETHOD(miibus_writereg, npe_miibus_writereg),
1682 DEVMETHOD(miibus_statchg, npe_miibus_statchg),
1683
1684 { 0, 0 }
1685 };
1686
1687 static driver_t npe_driver = {
1688 "npe",
1689 npe_methods,
1690 sizeof(struct npe_softc),
1691 };
1692
1693 DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
1694 DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
1695 MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
1696 MODULE_DEPEND(npe, miibus, 1, 1, 1);
1697 MODULE_DEPEND(npe, ether, 1, 1, 1);
Cache object: 15559743d9c88a5123cb17ab48066960
|