The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/xscale/ixp425/if_npe.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006-2008 Sam Leffler.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   23  */
   24 
   25 #include <sys/cdefs.h>
   26 __FBSDID("$FreeBSD: releng/10.1/sys/arm/xscale/ixp425/if_npe.c 266406 2014-05-18 16:07:35Z ian $");
   27 
   28 /*
   29  * Intel XScale NPE Ethernet driver.
   30  *
   31  * This driver handles the two ports present on the IXP425.
   32  * Packet processing is done by the Network Processing Engines
   33  * (NPE's) that work together with a MAC and PHY. The MAC
   34  * is also mapped to the XScale cpu; the PHY is accessed via
   35  * the MAC. NPE-XScale communication happens through h/w
   36  * queues managed by the Q Manager block.
   37  *
   38  * The code here replaces the ethAcc, ethMii, and ethDB classes
   39  * in the Intel Access Library (IAL) and the OS-specific driver.
   40  *
   41  * XXX add vlan support
   42  */
   43 #ifdef HAVE_KERNEL_OPTION_HEADERS
   44 #include "opt_device_polling.h"
   45 #endif
   46 
   47 #include <sys/param.h>
   48 #include <sys/systm.h>
   49 #include <sys/bus.h>
   50 #include <sys/kernel.h>
   51 #include <sys/mbuf.h>
   52 #include <sys/malloc.h>
   53 #include <sys/module.h>
   54 #include <sys/rman.h>
   55 #include <sys/socket.h>
   56 #include <sys/sockio.h>
   57 #include <sys/sysctl.h>
   58 #include <sys/endian.h>
   59 #include <machine/bus.h>
   60 
   61 #include <net/ethernet.h>
   62 #include <net/if.h>
   63 #include <net/if_arp.h>
   64 #include <net/if_dl.h>
   65 #include <net/if_media.h>
   66 #include <net/if_mib.h>
   67 #include <net/if_types.h>
   68 #include <net/if_var.h>
   69 
   70 #ifdef INET
   71 #include <netinet/in.h>
   72 #include <netinet/in_systm.h>
   73 #include <netinet/in_var.h>
   74 #include <netinet/ip.h>
   75 #endif
   76 
   77 #include <net/bpf.h>
   78 #include <net/bpfdesc.h>
   79 
   80 #include <arm/xscale/ixp425/ixp425reg.h>
   81 #include <arm/xscale/ixp425/ixp425var.h>
   82 #include <arm/xscale/ixp425/ixp425_qmgr.h>
   83 #include <arm/xscale/ixp425/ixp425_npevar.h>
   84 
   85 #include <dev/mii/mii.h>
   86 #include <dev/mii/miivar.h>
   87 #include <arm/xscale/ixp425/if_npereg.h>
   88 
   89 #include <machine/armreg.h>
   90 
   91 #include "miibus_if.h"
   92 
   93 /*
   94  * XXX: For the main bus dma tag. Can go away if the new method to get the
   95  * dma tag from the parent got MFC'd into RELENG_6.
   96  */
   97 extern struct ixp425_softc *ixp425_softc;
   98 
   99 struct npebuf {
  100         struct npebuf   *ix_next;       /* chain to next buffer */
  101         void            *ix_m;          /* backpointer to mbuf */
  102         bus_dmamap_t    ix_map;         /* bus dma map for associated data */
  103         struct npehwbuf *ix_hw;         /* associated h/w block */
  104         uint32_t        ix_neaddr;      /* phys address of ix_hw */
  105 };
  106 
  107 struct npedma {
  108         const char*     name;
  109         int             nbuf;           /* # npebuf's allocated */
  110         bus_dma_tag_t   mtag;           /* bus dma tag for mbuf data */
  111         struct npehwbuf *hwbuf;         /* NPE h/w buffers */
  112         bus_dma_tag_t   buf_tag;        /* tag+map for NPE buffers */
  113         bus_dmamap_t    buf_map;
  114         bus_addr_t      buf_phys;       /* phys addr of buffers */
  115         struct npebuf   *buf;           /* s/w buffers (1-1 w/ h/w) */
  116 };
  117 
  118 struct npe_softc {
  119         /* XXX mii requires this be first; do not move! */
  120         struct ifnet    *sc_ifp;        /* ifnet pointer */
  121         struct mtx      sc_mtx;         /* basically a perimeter lock */
  122         device_t        sc_dev;
  123         bus_space_tag_t sc_iot;         
  124         bus_space_handle_t sc_ioh;      /* MAC register window */
  125         device_t        sc_mii;         /* child miibus */
  126         bus_space_handle_t sc_miih;     /* MII register window */
  127         int             sc_npeid;
  128         struct ixpnpe_softc *sc_npe;    /* NPE support */
  129         int             sc_debug;       /* DPRINTF* control */
  130         int             sc_tickinterval;
  131         struct callout  tick_ch;        /* Tick callout */
  132         int             npe_watchdog_timer;
  133         struct npedma   txdma;
  134         struct npebuf   *tx_free;       /* list of free tx buffers */
  135         struct npedma   rxdma;
  136         bus_addr_t      buf_phys;       /* XXX for returning a value */
  137         int             rx_qid;         /* rx qid */
  138         int             rx_freeqid;     /* rx free buffers qid */
  139         int             tx_qid;         /* tx qid */
  140         int             tx_doneqid;     /* tx completed qid */
  141         struct ifmib_iso_8802_3 mibdata;
  142         bus_dma_tag_t   sc_stats_tag;   /* bus dma tag for stats block */
  143         struct npestats *sc_stats;
  144         bus_dmamap_t    sc_stats_map;
  145         bus_addr_t      sc_stats_phys;  /* phys addr of sc_stats */
  146         struct npestats sc_totals;      /* accumulated sc_stats */
  147 };
  148 
  149 /*
  150  * Static configuration for IXP425.  The tx and
  151  * rx free Q id's are fixed by the NPE microcode.  The
  152  * rx Q id's are programmed to be separate to simplify
  153  * multi-port processing.  It may be better to handle
  154  * all traffic through one Q (as done by the Intel drivers).
  155  *
  156  * Note that the PHY's are accessible only from MAC B on the
  157  * IXP425 and from MAC C on other devices.  This and other
  158  * platform-specific assumptions are handled with hints.
  159  */
  160 static const struct {
  161         uint32_t        macbase;
  162         uint32_t        miibase;
  163         int             phy;            /* phy id */
  164         uint8_t         rx_qid;
  165         uint8_t         rx_freeqid;
  166         uint8_t         tx_qid;
  167         uint8_t         tx_doneqid;
  168 } npeconfig[NPE_MAX] = {
  169         [NPE_A] = {
  170           .macbase      = IXP435_MAC_A_HWBASE,
  171           .miibase      = IXP425_MAC_C_HWBASE,
  172           .phy          = 2,
  173           .rx_qid       = 4,
  174           .rx_freeqid   = 26,
  175           .tx_qid       = 23,
  176           .tx_doneqid   = 31
  177         },
  178         [NPE_B] = {
  179           .macbase      = IXP425_MAC_B_HWBASE,
  180           .miibase      = IXP425_MAC_B_HWBASE,
  181           .phy          = 0,
  182           .rx_qid       = 4,
  183           .rx_freeqid   = 27,
  184           .tx_qid       = 24,
  185           .tx_doneqid   = 31
  186         },
  187         [NPE_C] = {
  188           .macbase      = IXP425_MAC_C_HWBASE,
  189           .miibase      = IXP425_MAC_B_HWBASE,
  190           .phy          = 1,
  191           .rx_qid       = 12,
  192           .rx_freeqid   = 28,
  193           .tx_qid       = 25,
  194           .tx_doneqid   = 31
  195         },
  196 };
  197 static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */
  198 
  199 static __inline uint32_t
  200 RD4(struct npe_softc *sc, bus_size_t off)
  201 {
  202         return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
  203 }
  204 
  205 static __inline void
  206 WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
  207 {
  208         bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
  209 }
  210 
  211 #define NPE_LOCK(_sc)           mtx_lock(&(_sc)->sc_mtx)
  212 #define NPE_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_mtx)
  213 #define NPE_LOCK_INIT(_sc) \
  214         mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
  215             MTX_NETWORK_LOCK, MTX_DEF)
  216 #define NPE_LOCK_DESTROY(_sc)   mtx_destroy(&_sc->sc_mtx);
  217 #define NPE_ASSERT_LOCKED(_sc)  mtx_assert(&_sc->sc_mtx, MA_OWNED);
  218 #define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
  219 
  220 static devclass_t npe_devclass;
  221 
  222 static int      override_npeid(device_t, const char *resname, int *val);
  223 static int      npe_activate(device_t dev);
  224 static void     npe_deactivate(device_t dev);
  225 static int      npe_ifmedia_update(struct ifnet *ifp);
  226 static void     npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
  227 static void     npe_setmac(struct npe_softc *sc, u_char *eaddr);
  228 static void     npe_getmac(struct npe_softc *sc, u_char *eaddr);
  229 static void     npe_txdone(int qid, void *arg);
  230 static int      npe_rxbuf_init(struct npe_softc *, struct npebuf *,
  231                         struct mbuf *);
  232 static int      npe_rxdone(int qid, void *arg);
  233 static void     npeinit(void *);
  234 static void     npestart_locked(struct ifnet *);
  235 static void     npestart(struct ifnet *);
  236 static void     npestop(struct npe_softc *);
  237 static void     npewatchdog(struct npe_softc *);
  238 static int      npeioctl(struct ifnet * ifp, u_long, caddr_t);
  239 
  240 static int      npe_setrxqosentry(struct npe_softc *, int classix,
  241                         int trafclass, int qid);
  242 static int      npe_setportaddress(struct npe_softc *, const uint8_t mac[]);
  243 static int      npe_setfirewallmode(struct npe_softc *, int onoff);
  244 static int      npe_updatestats(struct npe_softc *);
  245 #if 0
  246 static int      npe_getstats(struct npe_softc *);
  247 static uint32_t npe_getimageid(struct npe_softc *);
  248 static int      npe_setloopback(struct npe_softc *, int ena);
  249 #endif
  250 
  251 /* NB: all tx done processing goes through one queue */
  252 static int tx_doneqid = -1;
  253 
  254 static SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0,
  255     "IXP4XX NPE driver parameters");
  256 
  257 static int npe_debug = 0;
  258 SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
  259            0, "IXP4XX NPE network interface debug msgs");
  260 TUNABLE_INT("hw.npe.debug", &npe_debug);
  261 #define DPRINTF(sc, fmt, ...) do {                                      \
  262         if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__);  \
  263 } while (0)
  264 #define DPRINTFn(n, sc, fmt, ...) do {                                  \
  265         if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
  266 } while (0)
  267 static int npe_tickinterval = 3;                /* npe_tick frequency (secs) */
  268 SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
  269             0, "periodic work interval (secs)");
  270 TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
  271 
  272 static  int npe_rxbuf = 64;             /* # rx buffers to allocate */
  273 SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
  274             0, "rx buffers allocated");
  275 TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
  276 static  int npe_txbuf = 128;            /* # tx buffers to allocate */
  277 SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
  278             0, "tx buffers allocated");
  279 TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
  280 
  281 static int
  282 unit2npeid(int unit)
  283 {
  284         static const int npeidmap[2][3] = {
  285                 /* on 425 A is for HSS, B & C are for Ethernet */
  286                 { NPE_B, NPE_C, -1 },   /* IXP425 */
  287                 /* 435 only has A & C, order C then A */
  288                 { NPE_C, NPE_A, -1 },   /* IXP435 */
  289         };
  290         /* XXX check feature register instead */
  291         return (unit < 3 ? npeidmap[
  292             (cpu_id() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1);
  293 }
  294 
  295 static int
  296 npe_probe(device_t dev)
  297 {
  298         static const char *desc[NPE_MAX] = {
  299                 [NPE_A] = "IXP NPE-A",
  300                 [NPE_B] = "IXP NPE-B",
  301                 [NPE_C] = "IXP NPE-C"
  302         };
  303         int unit = device_get_unit(dev);
  304         int npeid;
  305 
  306         if (unit > 2 ||
  307             (ixp4xx_read_feature_bits() &
  308              (unit == 0 ? EXP_FCTRL_ETH0 : EXP_FCTRL_ETH1)) == 0)
  309                 return EINVAL;
  310 
  311         npeid = -1;
  312         if (!override_npeid(dev, "npeid", &npeid))
  313                 npeid = unit2npeid(unit);
  314         if (npeid == -1) {
  315                 device_printf(dev, "unit %d not supported\n", unit);
  316                 return EINVAL;
  317         }
  318         device_set_desc(dev, desc[npeid]);
  319         return 0;
  320 }
  321 
  322 static int
  323 npe_attach(device_t dev)
  324 {
  325         struct npe_softc *sc = device_get_softc(dev);
  326         struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
  327         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
  328         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
  329         struct ifnet *ifp;
  330         int error;
  331         u_char eaddr[6];
  332 
  333         sc->sc_dev = dev;
  334         sc->sc_iot = sa->sc_iot;
  335         NPE_LOCK_INIT(sc);
  336         callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
  337         sc->sc_debug = npe_debug;
  338         sc->sc_tickinterval = npe_tickinterval;
  339 
  340         ifp = if_alloc(IFT_ETHER);
  341         if (ifp == NULL) {
  342                 device_printf(dev, "cannot allocate ifnet\n");
  343                 error = EIO;            /* XXX */
  344                 goto out;
  345         }
  346         /* NB: must be setup prior to invoking mii code */
  347         sc->sc_ifp = ifp;
  348 
  349         error = npe_activate(dev);
  350         if (error) {
  351                 device_printf(dev, "cannot activate npe\n");
  352                 goto out;
  353         }
  354 
  355         npe_getmac(sc, eaddr);
  356 
  357         ifp->if_softc = sc;
  358         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  359         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  360         ifp->if_start = npestart;
  361         ifp->if_ioctl = npeioctl;
  362         ifp->if_init = npeinit;
  363         IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
  364         ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
  365         IFQ_SET_READY(&ifp->if_snd);
  366         ifp->if_linkmib = &sc->mibdata;
  367         ifp->if_linkmiblen = sizeof(sc->mibdata);
  368         sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
  369         /* device supports oversided vlan frames */
  370         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  371         ifp->if_capenable = ifp->if_capabilities;
  372 #ifdef DEVICE_POLLING
  373         ifp->if_capabilities |= IFCAP_POLLING;
  374 #endif
  375 
  376         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
  377             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
  378         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
  379             CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
  380         SYSCTL_ADD_STRUCT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats",
  381             CTLFLAG_RD, &sc->sc_totals, npestats, "onboard stats");
  382 
  383         ether_ifattach(ifp, eaddr);
  384         return 0;
  385 out:
  386         if (ifp != NULL)
  387                 if_free(ifp);
  388         NPE_LOCK_DESTROY(sc);
  389         npe_deactivate(dev);
  390         return error;
  391 }
  392 
  393 static int
  394 npe_detach(device_t dev)
  395 {
  396         struct npe_softc *sc = device_get_softc(dev);
  397         struct ifnet *ifp = sc->sc_ifp;
  398 
  399 #ifdef DEVICE_POLLING
  400         if (ifp->if_capenable & IFCAP_POLLING)
  401                 ether_poll_deregister(ifp);
  402 #endif
  403         npestop(sc);
  404         if (ifp != NULL) {
  405                 ether_ifdetach(ifp);
  406                 if_free(ifp);
  407         }
  408         NPE_LOCK_DESTROY(sc);
  409         npe_deactivate(dev);
  410         return 0;
  411 }
  412 
  413 /*
  414  * Compute and install the multicast filter.
  415  */
  416 static void
  417 npe_setmcast(struct npe_softc *sc)
  418 {
  419         struct ifnet *ifp = sc->sc_ifp;
  420         uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
  421         int i;
  422 
  423         if (ifp->if_flags & IFF_PROMISC) {
  424                 memset(mask, 0, ETHER_ADDR_LEN);
  425                 memset(addr, 0, ETHER_ADDR_LEN);
  426         } else if (ifp->if_flags & IFF_ALLMULTI) {
  427                 static const uint8_t allmulti[ETHER_ADDR_LEN] =
  428                     { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
  429                 memcpy(mask, allmulti, ETHER_ADDR_LEN);
  430                 memcpy(addr, allmulti, ETHER_ADDR_LEN);
  431         } else {
  432                 uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
  433                 struct ifmultiaddr *ifma;
  434                 const uint8_t *mac;
  435 
  436                 memset(clr, 0, ETHER_ADDR_LEN);
  437                 memset(set, 0xff, ETHER_ADDR_LEN);
  438 
  439                 if_maddr_rlock(ifp);
  440                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  441                         if (ifma->ifma_addr->sa_family != AF_LINK)
  442                                 continue;
  443                         mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
  444                         for (i = 0; i < ETHER_ADDR_LEN; i++) {
  445                                 clr[i] |= mac[i];
  446                                 set[i] &= mac[i];
  447                         }
  448                 }
  449                 if_maddr_runlock(ifp);
  450 
  451                 for (i = 0; i < ETHER_ADDR_LEN; i++) {
  452                         mask[i] = set[i] | ~clr[i];
  453                         addr[i] = set[i];
  454                 }
  455         }
  456 
  457         /*
  458          * Write the mask and address registers.
  459          */
  460         for (i = 0; i < ETHER_ADDR_LEN; i++) {
  461                 WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
  462                 WR4(sc, NPE_MAC_ADDR(i), addr[i]);
  463         }
  464 }
  465 
  466 static void
  467 npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  468 {
  469         struct npe_softc *sc;
  470 
  471         if (error != 0)
  472                 return;
  473         sc = (struct npe_softc *)arg;
  474         sc->buf_phys = segs[0].ds_addr;
  475 }
  476 
  477 static int
  478 npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
  479         const char *name, int nbuf, int maxseg)
  480 {
  481         int error, i;
  482 
  483         memset(dma, 0, sizeof(*dma));
  484 
  485         dma->name = name;
  486         dma->nbuf = nbuf;
  487 
  488         /* DMA tag for mapped mbufs  */
  489         error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
  490             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  491             MCLBYTES, maxseg, MCLBYTES, 0,
  492             busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
  493         if (error != 0) {
  494                 device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
  495                      "error %u\n", dma->name, error);
  496                 return error;
  497         }
  498 
  499         /* DMA tag and map for the NPE buffers */
  500         error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
  501             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  502             nbuf * sizeof(struct npehwbuf), 1,
  503             nbuf * sizeof(struct npehwbuf), 0,
  504             busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
  505         if (error != 0) {
  506                 device_printf(sc->sc_dev,
  507                     "unable to create %s npebuf dma tag, error %u\n",
  508                     dma->name, error);
  509                 return error;
  510         }
  511         if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
  512             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
  513             &dma->buf_map) != 0) {
  514                 device_printf(sc->sc_dev,
  515                      "unable to allocate memory for %s h/w buffers, error %u\n",
  516                      dma->name, error);
  517                 return error;
  518         }
  519         /* XXX M_TEMP */
  520         dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
  521         if (dma->buf == NULL) {
  522                 device_printf(sc->sc_dev,
  523                      "unable to allocate memory for %s s/w buffers\n",
  524                      dma->name);
  525                 return error;
  526         }
  527         if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
  528             dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
  529                 device_printf(sc->sc_dev,
  530                      "unable to map memory for %s h/w buffers, error %u\n",
  531                      dma->name, error);
  532                 return error;
  533         }
  534         dma->buf_phys = sc->buf_phys;
  535         for (i = 0; i < dma->nbuf; i++) {
  536                 struct npebuf *npe = &dma->buf[i];
  537                 struct npehwbuf *hw = &dma->hwbuf[i];
  538 
  539                 /* calculate offset to shared area */
  540                 npe->ix_neaddr = dma->buf_phys +
  541                         ((uintptr_t)hw - (uintptr_t)dma->hwbuf);
  542                 KASSERT((npe->ix_neaddr & 0x1f) == 0,
  543                     ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
  544                 error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
  545                                 &npe->ix_map);
  546                 if (error != 0) {
  547                         device_printf(sc->sc_dev,
  548                              "unable to create dmamap for %s buffer %u, "
  549                              "error %u\n", dma->name, i, error);
  550                         return error;
  551                 }
  552                 npe->ix_hw = hw;
  553         }
  554         bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
  555         return 0;
  556 }
  557 
  558 static void
  559 npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
  560 {
  561         int i;
  562 
  563         if (dma->hwbuf != NULL) {
  564                 for (i = 0; i < dma->nbuf; i++) {
  565                         struct npebuf *npe = &dma->buf[i];
  566                         bus_dmamap_destroy(dma->mtag, npe->ix_map);
  567                 }
  568                 bus_dmamap_unload(dma->buf_tag, dma->buf_map);
  569                 bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
  570         }
  571         if (dma->buf != NULL)
  572                 free(dma->buf, M_TEMP);
  573         if (dma->buf_tag)
  574                 bus_dma_tag_destroy(dma->buf_tag);
  575         if (dma->mtag)
  576                 bus_dma_tag_destroy(dma->mtag);
  577         memset(dma, 0, sizeof(*dma));
  578 }
  579 
  580 static int
  581 override_addr(device_t dev, const char *resname, int *base)
  582 {
  583         int unit = device_get_unit(dev);
  584         const char *resval;
  585 
  586         /* XXX warn for wrong hint type */
  587         if (resource_string_value("npe", unit, resname, &resval) != 0)
  588                 return 0;
  589         switch (resval[0]) {
  590         case 'A':
  591                 *base = IXP435_MAC_A_HWBASE;
  592                 break;
  593         case 'B':
  594                 *base = IXP425_MAC_B_HWBASE;
  595                 break;
  596         case 'C':
  597                 *base = IXP425_MAC_C_HWBASE;
  598                 break;
  599         default:
  600                 device_printf(dev, "Warning, bad value %s for "
  601                     "npe.%d.%s ignored\n", resval, unit, resname);
  602                 return 0;
  603         }
  604         if (bootverbose)
  605                 device_printf(dev, "using npe.%d.%s=%s override\n",
  606                     unit, resname, resval);
  607         return 1;
  608 }
  609 
  610 static int
  611 override_npeid(device_t dev, const char *resname, int *npeid)
  612 {
  613         int unit = device_get_unit(dev);
  614         const char *resval;
  615 
  616         /* XXX warn for wrong hint type */
  617         if (resource_string_value("npe", unit, resname, &resval) != 0)
  618                 return 0;
  619         switch (resval[0]) {
  620         case 'A': *npeid = NPE_A; break;
  621         case 'B': *npeid = NPE_B; break;
  622         case 'C': *npeid = NPE_C; break;
  623         default:
  624                 device_printf(dev, "Warning, bad value %s for "
  625                     "npe.%d.%s ignored\n", resval, unit, resname);
  626                 return 0;
  627         }
  628         if (bootverbose)
  629                 device_printf(dev, "using npe.%d.%s=%s override\n",
  630                     unit, resname, resval);
  631         return 1;
  632 }
  633 
  634 static int
  635 override_unit(device_t dev, const char *resname, int *val, int min, int max)
  636 {
  637         int unit = device_get_unit(dev);
  638         int resval;
  639 
  640         if (resource_int_value("npe", unit, resname, &resval) != 0)
  641                 return 0;
  642         if (!(min <= resval && resval <= max)) {
  643                 device_printf(dev, "Warning, bad value %d for npe.%d.%s "
  644                     "ignored (value must be [%d-%d])\n", resval, unit,
  645                     resname, min, max);
  646                 return 0;
  647         }
  648         if (bootverbose)
  649                 device_printf(dev, "using npe.%d.%s=%d override\n",
  650                     unit, resname, resval);
  651         *val = resval;
  652         return 1;
  653 }
  654 
  655 static void
  656 npe_mac_reset(struct npe_softc *sc)
  657 {
  658         /*
  659          * Reset MAC core.
  660          */
  661         WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
  662         DELAY(NPE_MAC_RESET_DELAY);
  663         /* configure MAC to generate MDC clock */
  664         WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
  665 }
  666 
  667 static int
  668 npe_activate(device_t dev)
  669 {
  670         struct npe_softc *sc = device_get_softc(dev);
  671         int error, i, macbase, miibase, phy;
  672 
  673         /*
  674          * Setup NEP ID, MAC, and MII bindings.  We allow override
  675          * via hints to handle unexpected board configs.
  676          */
  677         if (!override_npeid(dev, "npeid", &sc->sc_npeid))
  678                 sc->sc_npeid = unit2npeid(device_get_unit(dev));
  679         sc->sc_npe = ixpnpe_attach(dev, sc->sc_npeid);
  680         if (sc->sc_npe == NULL) {
  681                 device_printf(dev, "cannot attach ixpnpe\n");
  682                 return EIO;             /* XXX */
  683         }
  684 
  685         /* MAC */
  686         if (!override_addr(dev, "mac", &macbase))
  687                 macbase = npeconfig[sc->sc_npeid].macbase;
  688         device_printf(sc->sc_dev, "MAC at 0x%x\n", macbase);
  689         if (bus_space_map(sc->sc_iot, macbase, IXP425_REG_SIZE, 0, &sc->sc_ioh)) {
  690                 device_printf(dev, "cannot map mac registers 0x%x:0x%x\n",
  691                     macbase, IXP425_REG_SIZE);
  692                 return ENOMEM;
  693         }
  694 
  695         /* PHY */
  696         if (!override_unit(dev, "phy", &phy, 0, MII_NPHY - 1))
  697                 phy = npeconfig[sc->sc_npeid].phy;
  698         if (!override_addr(dev, "mii", &miibase))
  699                 miibase = npeconfig[sc->sc_npeid].miibase;
  700         device_printf(sc->sc_dev, "MII at 0x%x\n", miibase);
  701         if (miibase != macbase) {
  702                 /*
  703                  * PHY is mapped through a different MAC, setup an
  704                  * additional mapping for frobbing the PHY registers.
  705                  */
  706                 if (bus_space_map(sc->sc_iot, miibase, IXP425_REG_SIZE, 0, &sc->sc_miih)) {
  707                         device_printf(dev,
  708                             "cannot map MII registers 0x%x:0x%x\n",
  709                             miibase, IXP425_REG_SIZE);
  710                         return ENOMEM;
  711                 }
  712         } else
  713                 sc->sc_miih = sc->sc_ioh;
  714 
  715         /*
  716          * Load NPE firmware and start it running.
  717          */
  718         error = ixpnpe_init(sc->sc_npe);
  719         if (error != 0) {
  720                 device_printf(dev, "cannot init NPE (error %d)\n", error);
  721                 return error;
  722         }
  723 
  724         /* attach PHY */
  725         error = mii_attach(dev, &sc->sc_mii, sc->sc_ifp, npe_ifmedia_update,
  726             npe_ifmedia_status, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
  727         if (error != 0) {
  728                 device_printf(dev, "attaching PHYs failed\n");
  729                 return error;
  730         }
  731 
  732         error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
  733         if (error != 0)
  734                 return error;
  735         error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
  736         if (error != 0)
  737                 return error;
  738 
  739         /* setup statistics block */
  740         error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
  741             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  742             sizeof(struct npestats), 1, sizeof(struct npestats), 0,
  743             busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
  744         if (error != 0) {
  745                 device_printf(sc->sc_dev, "unable to create stats tag, "
  746                      "error %u\n", error);
  747                 return error;
  748         }
  749         if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
  750             BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
  751                 device_printf(sc->sc_dev,
  752                      "unable to allocate memory for stats block, error %u\n",
  753                      error);
  754                 return error;
  755         }
  756         if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
  757             sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
  758                 device_printf(sc->sc_dev,
  759                      "unable to load memory for stats block, error %u\n",
  760                      error);
  761                 return error;
  762         }
  763         sc->sc_stats_phys = sc->buf_phys;
  764 
  765         /*
  766          * Setup h/w rx/tx queues.  There are four q's:
  767          *   rx         inbound q of rx'd frames
  768          *   rx_free    pool of ixpbuf's for receiving frames
  769          *   tx         outbound q of frames to send
  770          *   tx_done    q of tx frames that have been processed
  771          *
  772          * The NPE handles the actual tx/rx process and the q manager
  773          * handles the queues.  The driver just writes entries to the
  774          * q manager mailbox's and gets callbacks when there are rx'd
  775          * frames to process or tx'd frames to reap.  These callbacks
  776          * are controlled by the q configurations; e.g. we get a
  777          * callback when tx_done has 2 or more frames to process and
  778          * when the rx q has at least one frame.  These setings can
  779          * changed at the time the q is configured.
  780          */
  781         sc->rx_qid = npeconfig[sc->sc_npeid].rx_qid;
  782         ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0,  1,
  783                 IX_QMGR_Q_SOURCE_ID_NOT_E, (qconfig_hand_t *)npe_rxdone, sc);
  784         sc->rx_freeqid = npeconfig[sc->sc_npeid].rx_freeqid;
  785         ixpqmgr_qconfig(sc->rx_freeqid, npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
  786         /*
  787          * Setup the NPE to direct all traffic to rx_qid.
  788          * When QoS is enabled in the firmware there are
  789          * 8 traffic classes; otherwise just 4.
  790          */
  791         for (i = 0; i < 8; i++)
  792                 npe_setrxqosentry(sc, i, 0, sc->rx_qid);
  793 
  794         /* disable firewall mode just in case (should be off) */
  795         npe_setfirewallmode(sc, 0);
  796 
  797         sc->tx_qid = npeconfig[sc->sc_npeid].tx_qid;
  798         sc->tx_doneqid = npeconfig[sc->sc_npeid].tx_doneqid;
  799         ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
  800         if (tx_doneqid == -1) {
  801                 ixpqmgr_qconfig(sc->tx_doneqid, npe_txbuf, 0,  2,
  802                         IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
  803                 tx_doneqid = sc->tx_doneqid;
  804         }
  805 
  806         KASSERT(npes[sc->sc_npeid] == NULL,
  807             ("npe %u already setup", sc->sc_npeid));
  808         npes[sc->sc_npeid] = sc;
  809 
  810         return 0;
  811 }
  812 
  813 static void
  814 npe_deactivate(device_t dev)
  815 {
  816         struct npe_softc *sc = device_get_softc(dev);
  817 
  818         npes[sc->sc_npeid] = NULL;
  819 
  820         /* XXX disable q's */
  821         if (sc->sc_npe != NULL) {
  822                 ixpnpe_stop(sc->sc_npe);
  823                 ixpnpe_detach(sc->sc_npe);
  824         }
  825         if (sc->sc_stats != NULL) {
  826                 bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
  827                 bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
  828                         sc->sc_stats_map);
  829         }
  830         if (sc->sc_stats_tag != NULL)
  831                 bus_dma_tag_destroy(sc->sc_stats_tag);
  832         npe_dma_destroy(sc, &sc->txdma);
  833         npe_dma_destroy(sc, &sc->rxdma);
  834         bus_generic_detach(sc->sc_dev);
  835         if (sc->sc_mii != NULL)
  836                 device_delete_child(sc->sc_dev, sc->sc_mii);
  837 }
  838 
  839 /*
  840  * Change media according to request.
  841  */
  842 static int
  843 npe_ifmedia_update(struct ifnet *ifp)
  844 {
  845         struct npe_softc *sc = ifp->if_softc;
  846         struct mii_data *mii;
  847 
  848         mii = device_get_softc(sc->sc_mii);
  849         NPE_LOCK(sc);
  850         mii_mediachg(mii);
  851         /* XXX push state ourself? */
  852         NPE_UNLOCK(sc);
  853         return (0);
  854 }
  855 
  856 /*
  857  * Notify the world which media we're using.
  858  */
  859 static void
  860 npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
  861 {
  862         struct npe_softc *sc = ifp->if_softc;
  863         struct mii_data *mii;
  864 
  865         mii = device_get_softc(sc->sc_mii);
  866         NPE_LOCK(sc);
  867         mii_pollstat(mii);
  868         ifmr->ifm_active = mii->mii_media_active;
  869         ifmr->ifm_status = mii->mii_media_status;
  870         NPE_UNLOCK(sc);
  871 }
  872 
  873 static void
  874 npe_addstats(struct npe_softc *sc)
  875 {
  876 #define NPEADD(x)       sc->sc_totals.x += be32toh(ns->x)
  877 #define MIBADD(x) do { sc->mibdata.x += be32toh(ns->x); NPEADD(x); } while (0)
  878         struct ifnet *ifp = sc->sc_ifp;
  879         struct npestats *ns = sc->sc_stats;
  880 
  881         MIBADD(dot3StatsAlignmentErrors);
  882         MIBADD(dot3StatsFCSErrors);
  883         MIBADD(dot3StatsInternalMacReceiveErrors);
  884         NPEADD(RxOverrunDiscards);
  885         NPEADD(RxLearnedEntryDiscards);
  886         NPEADD(RxLargeFramesDiscards);
  887         NPEADD(RxSTPBlockedDiscards);
  888         NPEADD(RxVLANTypeFilterDiscards);
  889         NPEADD(RxVLANIdFilterDiscards);
  890         NPEADD(RxInvalidSourceDiscards);
  891         NPEADD(RxBlackListDiscards);
  892         NPEADD(RxWhiteListDiscards);
  893         NPEADD(RxUnderflowEntryDiscards);
  894         MIBADD(dot3StatsSingleCollisionFrames);
  895         MIBADD(dot3StatsMultipleCollisionFrames);
  896         MIBADD(dot3StatsDeferredTransmissions);
  897         MIBADD(dot3StatsLateCollisions);
  898         MIBADD(dot3StatsExcessiveCollisions);
  899         MIBADD(dot3StatsInternalMacTransmitErrors);
  900         MIBADD(dot3StatsCarrierSenseErrors);
  901         NPEADD(TxLargeFrameDiscards);
  902         NPEADD(TxVLANIdFilterDiscards);
  903 
  904         sc->mibdata.dot3StatsFrameTooLongs +=
  905               be32toh(ns->RxLargeFramesDiscards)
  906             + be32toh(ns->TxLargeFrameDiscards);
  907         sc->mibdata.dot3StatsMissedFrames +=
  908               be32toh(ns->RxOverrunDiscards)
  909             + be32toh(ns->RxUnderflowEntryDiscards);
  910 
  911         ifp->if_oerrors +=
  912                   be32toh(ns->dot3StatsInternalMacTransmitErrors)
  913                 + be32toh(ns->dot3StatsCarrierSenseErrors)
  914                 + be32toh(ns->TxVLANIdFilterDiscards)
  915                 ;
  916         ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors)
  917                 + be32toh(ns->dot3StatsInternalMacReceiveErrors)
  918                 + be32toh(ns->RxOverrunDiscards)
  919                 + be32toh(ns->RxUnderflowEntryDiscards)
  920                 ;
  921         ifp->if_collisions +=
  922                   be32toh(ns->dot3StatsSingleCollisionFrames)
  923                 + be32toh(ns->dot3StatsMultipleCollisionFrames)
  924                 ;
  925 #undef NPEADD
  926 #undef MIBADD
  927 }
  928 
  929 static void
  930 npe_tick(void *xsc)
  931 {
  932 #define ACK     (NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
  933         struct npe_softc *sc = xsc;
  934         struct mii_data *mii = device_get_softc(sc->sc_mii);
  935         uint32_t msg[2];
  936 
  937         NPE_ASSERT_LOCKED(sc);
  938 
  939         /*
  940          * NB: to avoid sleeping with the softc lock held we
  941          * split the NPE msg processing into two parts.  The
  942          * request for statistics is sent w/o waiting for a
  943          * reply and then on the next tick we retrieve the
  944          * results.  This works because npe_tick is the only
  945          * code that talks via the mailbox's (except at setup).
  946          * This likely can be handled better.
  947          */
  948         if (ixpnpe_recvmsg_async(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
  949                 bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
  950                     BUS_DMASYNC_POSTREAD);
  951                 npe_addstats(sc);
  952         }
  953         npe_updatestats(sc);
  954         mii_tick(mii);
  955 
  956         npewatchdog(sc);
  957 
  958         /* schedule next poll */
  959         callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
  960 #undef ACK
  961 }
  962 
  963 static void
  964 npe_setmac(struct npe_softc *sc, u_char *eaddr)
  965 {
  966         WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
  967         WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
  968         WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
  969         WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
  970         WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
  971         WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
  972 }
  973 
  974 static void
  975 npe_getmac(struct npe_softc *sc, u_char *eaddr)
  976 {
  977         /* NB: the unicast address appears to be loaded from EEPROM on reset */
  978         eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
  979         eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
  980         eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
  981         eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
  982         eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
  983         eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
  984 }
  985 
  986 struct txdone {
  987         struct npebuf *head;
  988         struct npebuf **tail;
  989         int count;
  990 };
  991 
  992 static __inline void
  993 npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
  994 {
  995         struct ifnet *ifp = sc->sc_ifp;
  996 
  997         NPE_LOCK(sc);
  998         *td->tail = sc->tx_free;
  999         sc->tx_free = td->head;
 1000         /*
 1001          * We're no longer busy, so clear the busy flag and call the
 1002          * start routine to xmit more packets.
 1003          */
 1004         ifp->if_opackets += td->count;
 1005         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1006         sc->npe_watchdog_timer = 0;
 1007         npestart_locked(ifp);
 1008         NPE_UNLOCK(sc);
 1009 }
 1010 
 1011 /*
 1012  * Q manager callback on tx done queue.  Reap mbufs
 1013  * and return tx buffers to the free list.  Finally
 1014  * restart output.  Note the microcode has only one
 1015  * txdone q wired into it so we must use the NPE ID
 1016  * returned with each npehwbuf to decide where to
 1017  * send buffers.
 1018  */
 1019 static void
 1020 npe_txdone(int qid, void *arg)
 1021 {
 1022 #define P2V(a, dma) \
 1023         &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
 1024         struct npe_softc *sc0 = arg;
 1025         struct npe_softc *sc;
 1026         struct npebuf *npe;
 1027         struct txdone *td, q[NPE_MAX];
 1028         uint32_t entry;
 1029 
 1030         q[NPE_A].tail = &q[NPE_A].head; q[NPE_A].count = 0;
 1031         q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
 1032         q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
 1033         /* XXX max # at a time? */
 1034         while (ixpqmgr_qread(qid, &entry) == 0) {
 1035                 DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
 1036                     __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
 1037 
 1038                 sc = npes[NPE_QM_Q_NPE(entry)];
 1039                 npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
 1040                 m_freem(npe->ix_m);
 1041                 npe->ix_m = NULL;
 1042 
 1043                 td = &q[NPE_QM_Q_NPE(entry)];
 1044                 *td->tail = npe;
 1045                 td->tail = &npe->ix_next;
 1046                 td->count++;
 1047         }
 1048 
 1049         if (q[NPE_A].count)
 1050                 npe_txdone_finish(npes[NPE_A], &q[NPE_A]);
 1051         if (q[NPE_B].count)
 1052                 npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
 1053         if (q[NPE_C].count)
 1054                 npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
 1055 #undef P2V
 1056 }
 1057 
 1058 static int
 1059 npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
 1060 {
 1061         bus_dma_segment_t segs[1];
 1062         struct npedma *dma = &sc->rxdma;
 1063         struct npehwbuf *hw;
 1064         int error, nseg;
 1065 
 1066         if (m == NULL) {
 1067                 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 1068                 if (m == NULL)
 1069                         return ENOBUFS;
 1070         }
 1071         KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
 1072                 ("ext_size %d", m->m_ext.ext_size));
 1073         m->m_pkthdr.len = m->m_len = 1536;
 1074         /* backload payload and align ip hdr */
 1075         m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
 1076         bus_dmamap_unload(dma->mtag, npe->ix_map);
 1077         error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
 1078                         segs, &nseg, 0);
 1079         if (error != 0) {
 1080                 m_freem(m);
 1081                 return error;
 1082         }
 1083         hw = npe->ix_hw;
 1084         hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
 1085         /* NB: NPE requires length be a multiple of 64 */
 1086         /* NB: buffer length is shifted in word */
 1087         hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
 1088         hw->ix_ne[0].next = 0;
 1089         bus_dmamap_sync(dma->buf_tag, dma->buf_map, 
 1090             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1091         npe->ix_m = m;
 1092         /* Flush the memory in the mbuf */
 1093         bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
 1094         return 0;
 1095 }
 1096 
 1097 /*
 1098  * RX q processing for a specific NPE.  Claim entries
 1099  * from the hardware queue and pass the frames up the
 1100  * stack. Pass the rx buffers to the free list.
 1101  */
 1102 static int
 1103 npe_rxdone(int qid, void *arg)
 1104 {
 1105 #define P2V(a, dma) \
 1106         &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
 1107         struct npe_softc *sc = arg;
 1108         struct npedma *dma = &sc->rxdma;
 1109         uint32_t entry;
 1110         int rx_npkts = 0;
 1111 
 1112         while (ixpqmgr_qread(qid, &entry) == 0) {
 1113                 struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
 1114                 struct mbuf *m;
 1115 
 1116                 bus_dmamap_sync(dma->buf_tag, dma->buf_map,
 1117                     BUS_DMASYNC_POSTREAD);
 1118                 DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
 1119                     __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
 1120                 /*
 1121                  * Allocate a new mbuf to replenish the rx buffer.
 1122                  * If doing so fails we drop the rx'd frame so we
 1123                  * can reuse the previous mbuf.  When we're able to
 1124                  * allocate a new mbuf dispatch the mbuf w/ rx'd
 1125                  * data up the stack and replace it with the newly
 1126                  * allocated one.
 1127                  */
 1128                 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 1129                 if (m != NULL) {
 1130                         struct mbuf *mrx = npe->ix_m;
 1131                         struct npehwbuf *hw = npe->ix_hw;
 1132                         struct ifnet *ifp = sc->sc_ifp;
 1133 
 1134                         /* Flush mbuf memory for rx'd data */
 1135                         bus_dmamap_sync(dma->mtag, npe->ix_map,
 1136                             BUS_DMASYNC_POSTREAD);
 1137 
 1138                         /* set m_len etc. per rx frame size */
 1139                         mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
 1140                         mrx->m_pkthdr.len = mrx->m_len;
 1141                         mrx->m_pkthdr.rcvif = ifp;
 1142 
 1143                         ifp->if_ipackets++;
 1144                         ifp->if_input(ifp, mrx);
 1145                         rx_npkts++;
 1146                 } else {
 1147                         /* discard frame and re-use mbuf */
 1148                         m = npe->ix_m;
 1149                 }
 1150                 if (npe_rxbuf_init(sc, npe, m) == 0) {
 1151                         /* return npe buf to rx free list */
 1152                         ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
 1153                 } else {
 1154                         /* XXX should not happen */
 1155                 }
 1156         }
 1157         return rx_npkts;
 1158 #undef P2V
 1159 }
 1160 
 1161 #ifdef DEVICE_POLLING
 1162 static int
 1163 npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 1164 {
 1165         struct npe_softc *sc = ifp->if_softc;
 1166         int rx_npkts = 0;
 1167 
 1168         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1169                 rx_npkts = npe_rxdone(sc->rx_qid, sc);
 1170                 npe_txdone(sc->tx_doneqid, sc); /* XXX polls both NPE's */
 1171         }
 1172         return rx_npkts;
 1173 }
 1174 #endif /* DEVICE_POLLING */
 1175 
 1176 static void
 1177 npe_startxmit(struct npe_softc *sc)
 1178 {
 1179         struct npedma *dma = &sc->txdma;
 1180         int i;
 1181 
 1182         NPE_ASSERT_LOCKED(sc);
 1183         sc->tx_free = NULL;
 1184         for (i = 0; i < dma->nbuf; i++) {
 1185                 struct npebuf *npe = &dma->buf[i];
 1186                 if (npe->ix_m != NULL) {
 1187                         /* NB: should not happen */
 1188                         device_printf(sc->sc_dev,
 1189                             "%s: free mbuf at entry %u\n", __func__, i);
 1190                         m_freem(npe->ix_m);
 1191                 }
 1192                 npe->ix_m = NULL;
 1193                 npe->ix_next = sc->tx_free;
 1194                 sc->tx_free = npe;
 1195         }
 1196 }
 1197 
 1198 static void
 1199 npe_startrecv(struct npe_softc *sc)
 1200 {
 1201         struct npedma *dma = &sc->rxdma;
 1202         struct npebuf *npe;
 1203         int i;
 1204 
 1205         NPE_ASSERT_LOCKED(sc);
 1206         for (i = 0; i < dma->nbuf; i++) {
 1207                 npe = &dma->buf[i];
 1208                 npe_rxbuf_init(sc, npe, npe->ix_m);
 1209                 /* set npe buf on rx free list */
 1210                 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
 1211         }
 1212 }
 1213 
 1214 /*
 1215  * Reset and initialize the chip
 1216  */
 1217 static void
 1218 npeinit_locked(void *xsc)
 1219 {
 1220         struct npe_softc *sc = xsc;
 1221         struct ifnet *ifp = sc->sc_ifp;
 1222 
 1223         NPE_ASSERT_LOCKED(sc);
 1224 if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
 1225 
 1226         /*
 1227          * Reset MAC core.
 1228          */
 1229         npe_mac_reset(sc);
 1230 
 1231         /* disable transmitter and reciver in the MAC */
 1232         WR4(sc, NPE_MAC_RX_CNTRL1,
 1233             RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
 1234         WR4(sc, NPE_MAC_TX_CNTRL1,
 1235             RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
 1236 
 1237         /*
 1238          * Set the MAC core registers.
 1239          */
 1240         WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1);   /* clock ratio: for ipx4xx */
 1241         WR4(sc, NPE_MAC_TX_CNTRL2,      0xf);   /* max retries */
 1242         WR4(sc, NPE_MAC_RANDOM_SEED,    0x8);   /* LFSR back-off seed */
 1243         /* thresholds determined by NPE firmware FS */
 1244         WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12);
 1245         WR4(sc, NPE_MAC_THRESH_P_FULL,  0x30);
 1246         WR4(sc, NPE_MAC_BUF_SIZE_TX,    0x8);   /* tx fifo threshold (bytes) */
 1247         WR4(sc, NPE_MAC_TX_DEFER,       0x15);  /* for single deferral */
 1248         WR4(sc, NPE_MAC_RX_DEFER,       0x16);  /* deferral on inter-frame gap*/
 1249         WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8);   /* for 2-part deferral */
 1250         WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7);   /* for 2-part deferral */
 1251         WR4(sc, NPE_MAC_SLOT_TIME,      0x80);  /* assumes MII mode */
 1252 
 1253         WR4(sc, NPE_MAC_TX_CNTRL1,
 1254                   NPE_TX_CNTRL1_RETRY           /* retry failed xmits */
 1255                 | NPE_TX_CNTRL1_FCS_EN          /* append FCS */
 1256                 | NPE_TX_CNTRL1_2DEFER          /* 2-part deferal */
 1257                 | NPE_TX_CNTRL1_PAD_EN);        /* pad runt frames */
 1258         /* XXX pad strip? */
 1259         /* ena pause frame handling */
 1260         WR4(sc, NPE_MAC_RX_CNTRL1, NPE_RX_CNTRL1_PAUSE_EN);
 1261         WR4(sc, NPE_MAC_RX_CNTRL2, 0);
 1262 
 1263         npe_setmac(sc, IF_LLADDR(ifp));
 1264         npe_setportaddress(sc, IF_LLADDR(ifp));
 1265         npe_setmcast(sc);
 1266 
 1267         npe_startxmit(sc);
 1268         npe_startrecv(sc);
 1269 
 1270         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1271         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1272         sc->npe_watchdog_timer = 0;             /* just in case */
 1273 
 1274         /* enable transmitter and reciver in the MAC */
 1275         WR4(sc, NPE_MAC_RX_CNTRL1,
 1276             RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
 1277         WR4(sc, NPE_MAC_TX_CNTRL1,
 1278             RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
 1279 
 1280         callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
 1281 }
 1282 
 1283 static void
 1284 npeinit(void *xsc)
 1285 {
 1286         struct npe_softc *sc = xsc;
 1287         NPE_LOCK(sc);
 1288         npeinit_locked(sc);
 1289         NPE_UNLOCK(sc);
 1290 }
 1291 
 1292 /*
 1293  * Dequeue packets and place on the h/w transmit queue.
 1294  */
 1295 static void
 1296 npestart_locked(struct ifnet *ifp)
 1297 {
 1298         struct npe_softc *sc = ifp->if_softc;
 1299         struct npebuf *npe;
 1300         struct npehwbuf *hw;
 1301         struct mbuf *m, *n;
 1302         struct npedma *dma = &sc->txdma;
 1303         bus_dma_segment_t segs[NPE_MAXSEG];
 1304         int nseg, len, error, i;
 1305         uint32_t next;
 1306 
 1307         NPE_ASSERT_LOCKED(sc);
 1308         /* XXX can this happen? */
 1309         if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
 1310                 return;
 1311 
 1312         while (sc->tx_free != NULL) {
 1313                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1314                 if (m == NULL) {
 1315                         /* XXX? */
 1316                         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1317                         return;
 1318                 }
 1319                 npe = sc->tx_free;
 1320                 bus_dmamap_unload(dma->mtag, npe->ix_map);
 1321                 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
 1322                     m, segs, &nseg, 0);
 1323                 if (error == EFBIG) {
 1324                         n = m_collapse(m, M_NOWAIT, NPE_MAXSEG);
 1325                         if (n == NULL) {
 1326                                 if_printf(ifp, "%s: too many fragments %u\n",
 1327                                     __func__, nseg);
 1328                                 m_freem(m);
 1329                                 return; /* XXX? */
 1330                         }
 1331                         m = n;
 1332                         error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
 1333                             m, segs, &nseg, 0);
 1334                 }
 1335                 if (error != 0 || nseg == 0) {
 1336                         if_printf(ifp, "%s: error %u nseg %u\n",
 1337                             __func__, error, nseg);
 1338                         m_freem(m);
 1339                         return; /* XXX? */
 1340                 }
 1341                 sc->tx_free = npe->ix_next;
 1342 
 1343                 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
 1344         
 1345                 /*
 1346                  * Tap off here if there is a bpf listener.
 1347                  */
 1348                 BPF_MTAP(ifp, m);
 1349 
 1350                 npe->ix_m = m;
 1351                 hw = npe->ix_hw;
 1352                 len = m->m_pkthdr.len;
 1353                 next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
 1354                 for (i = 0; i < nseg; i++) {
 1355                         hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
 1356                         hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
 1357                         hw->ix_ne[i].next = htobe32(next);
 1358 
 1359                         len = 0;                /* zero for segments > 1 */
 1360                         next += sizeof(hw->ix_ne[0]);
 1361                 }
 1362                 hw->ix_ne[i-1].next = 0;        /* zero last in chain */
 1363                 bus_dmamap_sync(dma->buf_tag, dma->buf_map,
 1364                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1365 
 1366                 DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
 1367                     __func__, sc->tx_qid, npe->ix_neaddr,
 1368                     hw->ix_ne[0].data, hw->ix_ne[0].len);
 1369                 /* stick it on the tx q */
 1370                 /* XXX add vlan priority */
 1371                 ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
 1372 
 1373                 sc->npe_watchdog_timer = 5;
 1374         }
 1375         if (sc->tx_free == NULL)
 1376                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1377 }
 1378 
 1379 void
 1380 npestart(struct ifnet *ifp)
 1381 {
 1382         struct npe_softc *sc = ifp->if_softc;
 1383         NPE_LOCK(sc);
 1384         npestart_locked(ifp);
 1385         NPE_UNLOCK(sc);
 1386 }
 1387 
 1388 static void
 1389 npe_stopxmit(struct npe_softc *sc)
 1390 {
 1391         struct npedma *dma = &sc->txdma;
 1392         int i;
 1393 
 1394         NPE_ASSERT_LOCKED(sc);
 1395 
 1396         /* XXX qmgr */
 1397         for (i = 0; i < dma->nbuf; i++) {
 1398                 struct npebuf *npe = &dma->buf[i];
 1399 
 1400                 if (npe->ix_m != NULL) {
 1401                         bus_dmamap_unload(dma->mtag, npe->ix_map);
 1402                         m_freem(npe->ix_m);
 1403                         npe->ix_m = NULL;
 1404                 }
 1405         }
 1406 }
 1407 
 1408 static void
 1409 npe_stoprecv(struct npe_softc *sc)
 1410 {
 1411         struct npedma *dma = &sc->rxdma;
 1412         int i;
 1413 
 1414         NPE_ASSERT_LOCKED(sc);
 1415 
 1416         /* XXX qmgr */
 1417         for (i = 0; i < dma->nbuf; i++) {
 1418                 struct npebuf *npe = &dma->buf[i];
 1419 
 1420                 if (npe->ix_m != NULL) {
 1421                         bus_dmamap_unload(dma->mtag, npe->ix_map);
 1422                         m_freem(npe->ix_m);
 1423                         npe->ix_m = NULL;
 1424                 }
 1425         }
 1426 }
 1427 
 1428 /*
 1429  * Turn off interrupts, and stop the nic.
 1430  */
 1431 void
 1432 npestop(struct npe_softc *sc)
 1433 {
 1434         struct ifnet *ifp = sc->sc_ifp;
 1435 
 1436         /*  disable transmitter and reciver in the MAC  */
 1437         WR4(sc, NPE_MAC_RX_CNTRL1,
 1438             RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
 1439         WR4(sc, NPE_MAC_TX_CNTRL1,
 1440             RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
 1441 
 1442         sc->npe_watchdog_timer = 0;
 1443         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1444 
 1445         callout_stop(&sc->tick_ch);
 1446 
 1447         npe_stopxmit(sc);
 1448         npe_stoprecv(sc);
 1449         /* XXX go into loopback & drain q's? */
 1450         /* XXX but beware of disabling tx above */
 1451 
 1452         /*
 1453          * The MAC core rx/tx disable may leave the MAC hardware in an
 1454          * unpredictable state. A hw reset is executed before resetting
 1455          * all the MAC parameters to a known value.
 1456          */
 1457         WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
 1458         DELAY(NPE_MAC_RESET_DELAY);
 1459         WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
 1460         WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
 1461 }
 1462 
 1463 void
 1464 npewatchdog(struct npe_softc *sc)
 1465 {
 1466         NPE_ASSERT_LOCKED(sc);
 1467 
 1468         if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
 1469                 return;
 1470 
 1471         device_printf(sc->sc_dev, "watchdog timeout\n");
 1472         sc->sc_ifp->if_oerrors++;
 1473 
 1474         npeinit_locked(sc);
 1475 }
 1476 
 1477 static int
 1478 npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1479 {
 1480         struct npe_softc *sc = ifp->if_softc;
 1481         struct mii_data *mii;
 1482         struct ifreq *ifr = (struct ifreq *)data;       
 1483         int error = 0;
 1484 #ifdef DEVICE_POLLING
 1485         int mask;
 1486 #endif
 1487 
 1488         switch (cmd) {
 1489         case SIOCSIFFLAGS:
 1490                 NPE_LOCK(sc);
 1491                 if ((ifp->if_flags & IFF_UP) == 0 &&
 1492                     ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1493                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1494                         npestop(sc);
 1495                 } else {
 1496                         /* reinitialize card on any parameter change */
 1497                         npeinit_locked(sc);
 1498                 }
 1499                 NPE_UNLOCK(sc);
 1500                 break;
 1501 
 1502         case SIOCADDMULTI:
 1503         case SIOCDELMULTI:
 1504                 /* update multicast filter list. */
 1505                 NPE_LOCK(sc);
 1506                 npe_setmcast(sc);
 1507                 NPE_UNLOCK(sc);
 1508                 error = 0;
 1509                 break;
 1510 
 1511         case SIOCSIFMEDIA:
 1512         case SIOCGIFMEDIA:
 1513                 mii = device_get_softc(sc->sc_mii);
 1514                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 1515                 break;
 1516 
 1517 #ifdef DEVICE_POLLING
 1518         case SIOCSIFCAP:
 1519                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
 1520                 if (mask & IFCAP_POLLING) {
 1521                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
 1522                                 error = ether_poll_register(npe_poll, ifp);
 1523                                 if (error)
 1524                                         return error;
 1525                                 NPE_LOCK(sc);
 1526                                 /* disable callbacks XXX txdone is shared */
 1527                                 ixpqmgr_notify_disable(sc->rx_qid);
 1528                                 ixpqmgr_notify_disable(sc->tx_doneqid);
 1529                                 ifp->if_capenable |= IFCAP_POLLING;
 1530                                 NPE_UNLOCK(sc);
 1531                         } else {
 1532                                 error = ether_poll_deregister(ifp);
 1533                                 /* NB: always enable qmgr callbacks */
 1534                                 NPE_LOCK(sc);
 1535                                 /* enable qmgr callbacks */
 1536                                 ixpqmgr_notify_enable(sc->rx_qid,
 1537                                     IX_QMGR_Q_SOURCE_ID_NOT_E);
 1538                                 ixpqmgr_notify_enable(sc->tx_doneqid,
 1539                                     IX_QMGR_Q_SOURCE_ID_NOT_E);
 1540                                 ifp->if_capenable &= ~IFCAP_POLLING;
 1541                                 NPE_UNLOCK(sc);
 1542                         }
 1543                 }
 1544                 break;
 1545 #endif
 1546         default:
 1547                 error = ether_ioctl(ifp, cmd, data);
 1548                 break;
 1549         }
 1550         return error;
 1551 }
 1552 
 1553 /*
 1554  * Setup a traffic class -> rx queue mapping.
 1555  */
 1556 static int
 1557 npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
 1558 {
 1559         uint32_t msg[2];
 1560 
 1561         msg[0] = (NPE_SETRXQOSENTRY << 24) | (sc->sc_npeid << 20) | classix;
 1562         msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
 1563         return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
 1564 }
 1565 
 1566 static int
 1567 npe_setportaddress(struct npe_softc *sc, const uint8_t mac[ETHER_ADDR_LEN])
 1568 {
 1569         uint32_t msg[2];
 1570 
 1571         msg[0] = (NPE_SETPORTADDRESS << 24)
 1572                | (sc->sc_npeid << 20)
 1573                | (mac[0] << 8)
 1574                | (mac[1] << 0);
 1575         msg[1] = (mac[2] << 24)
 1576                | (mac[3] << 16)
 1577                | (mac[4] << 8)
 1578                | (mac[5] << 0);
 1579         return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
 1580 }
 1581 
 1582 static int
 1583 npe_setfirewallmode(struct npe_softc *sc, int onoff)
 1584 {
 1585         uint32_t msg[2];
 1586 
 1587         /* XXX honor onoff */
 1588         msg[0] = (NPE_SETFIREWALLMODE << 24) | (sc->sc_npeid << 20);
 1589         msg[1] = 0;
 1590         return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
 1591 }
 1592 
 1593 /*
 1594  * Update and reset the statistics in the NPE.
 1595  */
 1596 static int
 1597 npe_updatestats(struct npe_softc *sc)
 1598 {
 1599         uint32_t msg[2];
 1600 
 1601         msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
 1602         msg[1] = sc->sc_stats_phys;     /* physical address of stat block */
 1603         return ixpnpe_sendmsg_async(sc->sc_npe, msg);
 1604 }
 1605 
 1606 #if 0
 1607 /*
 1608  * Get the current statistics block.
 1609  */
 1610 static int
 1611 npe_getstats(struct npe_softc *sc)
 1612 {
 1613         uint32_t msg[2];
 1614 
 1615         msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
 1616         msg[1] = sc->sc_stats_phys;     /* physical address of stat block */
 1617         return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
 1618 }
 1619 
 1620 /*
 1621  * Query the image id of the loaded firmware.
 1622  */
 1623 static uint32_t
 1624 npe_getimageid(struct npe_softc *sc)
 1625 {
 1626         uint32_t msg[2];
 1627 
 1628         msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
 1629         msg[1] = 0;
 1630         return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
 1631 }
 1632 
 1633 /*
 1634  * Enable/disable loopback.
 1635  */
 1636 static int
 1637 npe_setloopback(struct npe_softc *sc, int ena)
 1638 {
 1639         uint32_t msg[2];
 1640 
 1641         msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
 1642         msg[1] = 0;
 1643         return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
 1644 }
 1645 #endif
 1646 
 1647 static void
 1648 npe_child_detached(device_t dev, device_t child)
 1649 {
 1650         struct npe_softc *sc;
 1651 
 1652         sc = device_get_softc(dev);
 1653         if (child == sc->sc_mii)
 1654                 sc->sc_mii = NULL;
 1655 }
 1656 
 1657 /*
 1658  * MII bus support routines.
 1659  */
 1660 #define MII_RD4(sc, reg)        bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
 1661 #define MII_WR4(sc, reg, v) \
 1662         bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
 1663 
 1664 static uint32_t
 1665 npe_mii_mdio_read(struct npe_softc *sc, int reg)
 1666 {
 1667         uint32_t v;
 1668 
 1669         /* NB: registers are known to be sequential */
 1670         v =  (MII_RD4(sc, reg+0) & 0xff) << 0;
 1671         v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
 1672         v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
 1673         v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
 1674         return v;
 1675 }
 1676 
 1677 static void
 1678 npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
 1679 {
 1680         /* NB: registers are known to be sequential */
 1681         MII_WR4(sc, reg+0, cmd & 0xff);
 1682         MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
 1683         MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
 1684         MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
 1685 }
 1686 
 1687 static int
 1688 npe_mii_mdio_wait(struct npe_softc *sc)
 1689 {
 1690         uint32_t v;
 1691         int i;
 1692 
 1693         /* NB: typically this takes 25-30 trips */
 1694         for (i = 0; i < 1000; i++) {
 1695                 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
 1696                 if ((v & NPE_MII_GO) == 0)
 1697                         return 1;
 1698                 DELAY(1);
 1699         }
 1700         device_printf(sc->sc_dev, "%s: timeout after ~1ms, cmd 0x%x\n",
 1701             __func__, v);
 1702         return 0;               /* NB: timeout */
 1703 }
 1704 
 1705 static int
 1706 npe_miibus_readreg(device_t dev, int phy, int reg)
 1707 {
 1708         struct npe_softc *sc = device_get_softc(dev);
 1709         uint32_t v;
 1710 
 1711         v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) | NPE_MII_GO;
 1712         npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
 1713         if (npe_mii_mdio_wait(sc))
 1714                 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
 1715         else
 1716                 v = 0xffff | NPE_MII_READ_FAIL;
 1717         return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
 1718 }
 1719 
 1720 static int
 1721 npe_miibus_writereg(device_t dev, int phy, int reg, int data)
 1722 {
 1723         struct npe_softc *sc = device_get_softc(dev);
 1724         uint32_t v;
 1725 
 1726         v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
 1727           | data | NPE_MII_WRITE
 1728           | NPE_MII_GO;
 1729         npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
 1730         /* XXX complain about timeout */
 1731         (void) npe_mii_mdio_wait(sc);
 1732         return (0);
 1733 }
 1734 
 1735 static void
 1736 npe_miibus_statchg(device_t dev)
 1737 {
 1738         struct npe_softc *sc = device_get_softc(dev);
 1739         struct mii_data *mii = device_get_softc(sc->sc_mii);
 1740         uint32_t tx1, rx1;
 1741 
 1742         /* sync MAC duplex state */
 1743         tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
 1744         rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
 1745         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
 1746                 tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
 1747                 rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
 1748         } else {
 1749                 tx1 |= NPE_TX_CNTRL1_DUPLEX;
 1750                 rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
 1751         }
 1752         WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
 1753         WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
 1754 }
 1755 
 1756 static device_method_t npe_methods[] = {
 1757         /* Device interface */
 1758         DEVMETHOD(device_probe,         npe_probe),
 1759         DEVMETHOD(device_attach,        npe_attach),
 1760         DEVMETHOD(device_detach,        npe_detach),
 1761 
 1762         /* Bus interface */
 1763         DEVMETHOD(bus_child_detached,   npe_child_detached),
 1764 
 1765         /* MII interface */
 1766         DEVMETHOD(miibus_readreg,       npe_miibus_readreg),
 1767         DEVMETHOD(miibus_writereg,      npe_miibus_writereg),
 1768         DEVMETHOD(miibus_statchg,       npe_miibus_statchg),
 1769 
 1770         { 0, 0 }
 1771 };
 1772 
 1773 static driver_t npe_driver = {
 1774         "npe",
 1775         npe_methods,
 1776         sizeof(struct npe_softc),
 1777 };
 1778 
 1779 DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
 1780 DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
 1781 MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
 1782 MODULE_DEPEND(npe, miibus, 1, 1, 1);
 1783 MODULE_DEPEND(npe, ether, 1, 1, 1);

Cache object: 7ff96e43919ae774df180545face44fd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.