The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/xscale/ixp425/if_npe.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006-2008 Sam Leffler.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   23  */
   24 
   25 #include <sys/cdefs.h>
   26 __FBSDID("$FreeBSD$");
   27 
   28 /*
   29  * Intel XScale NPE Ethernet driver.
   30  *
   31  * This driver handles the two ports present on the IXP425.
   32  * Packet processing is done by the Network Processing Engines
   33  * (NPE's) that work together with a MAC and PHY. The MAC
   34  * is also mapped to the XScale cpu; the PHY is accessed via
   35  * the MAC. NPE-XScale communication happens through h/w
   36  * queues managed by the Q Manager block.
   37  *
   38  * The code here replaces the ethAcc, ethMii, and ethDB classes
   39  * in the Intel Access Library (IAL) and the OS-specific driver.
   40  *
   41  * XXX add vlan support
   42  */
   43 #ifdef HAVE_KERNEL_OPTION_HEADERS
   44 #include "opt_device_polling.h"
   45 #endif
   46 
   47 #include <sys/param.h>
   48 #include <sys/systm.h>
   49 #include <sys/bus.h>
   50 #include <sys/kernel.h>
   51 #include <sys/mbuf.h>
   52 #include <sys/malloc.h>
   53 #include <sys/module.h>
   54 #include <sys/rman.h>
   55 #include <sys/socket.h>
   56 #include <sys/sockio.h>
   57 #include <sys/sysctl.h>
   58 #include <sys/endian.h>
   59 #include <machine/bus.h>
   60 
   61 #include <net/ethernet.h>
   62 #include <net/if.h>
   63 #include <net/if_arp.h>
   64 #include <net/if_dl.h>
   65 #include <net/if_media.h>
   66 #include <net/if_mib.h>
   67 #include <net/if_types.h>
   68 
   69 #ifdef INET
   70 #include <netinet/in.h>
   71 #include <netinet/in_systm.h>
   72 #include <netinet/in_var.h>
   73 #include <netinet/ip.h>
   74 #endif
   75 
   76 #include <net/bpf.h>
   77 #include <net/bpfdesc.h>
   78 
   79 #include <arm/xscale/ixp425/ixp425reg.h>
   80 #include <arm/xscale/ixp425/ixp425var.h>
   81 #include <arm/xscale/ixp425/ixp425_qmgr.h>
   82 #include <arm/xscale/ixp425/ixp425_npevar.h>
   83 
   84 #include <dev/mii/mii.h>
   85 #include <dev/mii/miivar.h>
   86 #include <arm/xscale/ixp425/if_npereg.h>
   87 
   88 #include "miibus_if.h"
   89 
   90 /* 
   91  * XXX: For the main bus dma tag. Can go away if the new method to get the 
   92  * dma tag from the parent got MFC'd into RELENG_6.
   93  */
   94 extern struct ixp425_softc *ixp425_softc;
   95 
   96 struct npebuf {
   97         struct npebuf   *ix_next;       /* chain to next buffer */
   98         void            *ix_m;          /* backpointer to mbuf */
   99         bus_dmamap_t    ix_map;         /* bus dma map for associated data */
  100         struct npehwbuf *ix_hw;         /* associated h/w block */
  101         uint32_t        ix_neaddr;      /* phys address of ix_hw */
  102 };
  103 
  104 struct npedma {
  105         const char*     name;
  106         int             nbuf;           /* # npebuf's allocated */
  107         bus_dma_tag_t   mtag;           /* bus dma tag for mbuf data */
  108         struct npehwbuf *hwbuf;         /* NPE h/w buffers */
  109         bus_dma_tag_t   buf_tag;        /* tag+map for NPE buffers */
  110         bus_dmamap_t    buf_map;
  111         bus_addr_t      buf_phys;       /* phys addr of buffers */
  112         struct npebuf   *buf;           /* s/w buffers (1-1 w/ h/w) */
  113 };
  114 
  115 struct npe_softc {
  116         /* XXX mii requires this be first; do not move! */
  117         struct ifnet    *sc_ifp;        /* ifnet pointer */
  118         struct mtx      sc_mtx;         /* basically a perimeter lock */
  119         device_t        sc_dev;
  120         bus_space_tag_t sc_iot;         
  121         bus_space_handle_t sc_ioh;      /* MAC register window */
  122         device_t        sc_mii;         /* child miibus */
  123         bus_space_handle_t sc_miih;     /* MII register window */
  124         struct ixpnpe_softc *sc_npe;    /* NPE support */
  125         int             sc_debug;       /* DPRINTF* control */
  126         int             sc_tickinterval;
  127         struct callout  tick_ch;        /* Tick callout */
  128         int             npe_watchdog_timer;
  129         struct npedma   txdma;
  130         struct npebuf   *tx_free;       /* list of free tx buffers */
  131         struct npedma   rxdma;
  132         bus_addr_t      buf_phys;       /* XXX for returning a value */
  133         int             rx_qid;         /* rx qid */
  134         int             rx_freeqid;     /* rx free buffers qid */
  135         int             tx_qid;         /* tx qid */
  136         int             tx_doneqid;     /* tx completed qid */
  137         int             sc_phy;         /* PHY id */
  138         struct ifmib_iso_8802_3 mibdata;
  139         bus_dma_tag_t   sc_stats_tag;   /* bus dma tag for stats block */
  140         struct npestats *sc_stats;
  141         bus_dmamap_t    sc_stats_map;
  142         bus_addr_t      sc_stats_phys;  /* phys addr of sc_stats */
  143 };
  144 
  145 /*
  146  * Per-unit static configuration for IXP425.  The tx and
  147  * rx free Q id's are fixed by the NPE microcode.  The
  148  * rx Q id's are programmed to be separate to simplify
  149  * multi-port processing.  It may be better to handle
  150  * all traffic through one Q (as done by the Intel drivers).
  151  *
  152  * Note that the PHY's are accessible only from MAC A
  153  * on the IXP425.  This and other platform-specific
  154  * assumptions probably need to be handled through hints.
  155  */
  156 static const struct {
  157         const char      *desc;          /* device description */
  158         int             npeid;          /* NPE assignment */
  159         uint32_t        imageid;        /* NPE firmware image id */
  160         uint32_t        regbase;
  161         int             regsize;
  162         uint32_t        miibase;
  163         int             miisize;
  164         int             phy;            /* phy id */
  165         uint8_t         rx_qid;
  166         uint8_t         rx_freeqid;
  167         uint8_t         tx_qid;
  168         uint8_t         tx_doneqid;
  169 } npeconfig[NPE_PORTS_MAX] = {
  170         { .desc         = "IXP NPE-B",
  171           .npeid        = NPE_B,
  172           .imageid      = IXP425_NPE_B_IMAGEID,
  173           .regbase      = IXP425_MAC_A_HWBASE,
  174           .regsize      = IXP425_MAC_A_SIZE,
  175           .miibase      = IXP425_MAC_A_HWBASE,
  176           .miisize      = IXP425_MAC_A_SIZE,
  177           .phy          = 0,
  178           .rx_qid       = 4,
  179           .rx_freeqid   = 27,
  180           .tx_qid       = 24,
  181           .tx_doneqid   = 31
  182         },
  183         { .desc         = "IXP NPE-C",
  184           .npeid        = NPE_C,
  185           .imageid      = IXP425_NPE_C_IMAGEID,
  186           .regbase      = IXP425_MAC_B_HWBASE,
  187           .regsize      = IXP425_MAC_B_SIZE,
  188           .miibase      = IXP425_MAC_A_HWBASE,
  189           .miisize      = IXP425_MAC_A_SIZE,
  190           .phy          = 1,
  191           .rx_qid       = 12,
  192           .rx_freeqid   = 28,
  193           .tx_qid       = 25,
  194           .tx_doneqid   = 31
  195         },
  196 };
  197 static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */
  198 
  199 static __inline uint32_t
  200 RD4(struct npe_softc *sc, bus_size_t off)
  201 {
  202         return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
  203 }
  204 
  205 static __inline void
  206 WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
  207 {
  208         bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
  209 }
  210 
  211 #define NPE_LOCK(_sc)           mtx_lock(&(_sc)->sc_mtx)
  212 #define NPE_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_mtx)
  213 #define NPE_LOCK_INIT(_sc) \
  214         mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
  215             MTX_NETWORK_LOCK, MTX_DEF)
  216 #define NPE_LOCK_DESTROY(_sc)   mtx_destroy(&_sc->sc_mtx);
  217 #define NPE_ASSERT_LOCKED(_sc)  mtx_assert(&_sc->sc_mtx, MA_OWNED);
  218 #define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
  219 
  220 static devclass_t npe_devclass;
  221 
  222 static int      npe_activate(device_t dev);
  223 static void     npe_deactivate(device_t dev);
  224 static int      npe_ifmedia_update(struct ifnet *ifp);
  225 static void     npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
  226 static void     npe_setmac(struct npe_softc *sc, u_char *eaddr);
  227 static void     npe_getmac(struct npe_softc *sc, u_char *eaddr);
  228 static void     npe_txdone(int qid, void *arg);
  229 static int      npe_rxbuf_init(struct npe_softc *, struct npebuf *,
  230                         struct mbuf *);
  231 static void     npe_rxdone(int qid, void *arg);
  232 static void     npeinit(void *);
  233 static void     npestart_locked(struct ifnet *);
  234 static void     npestart(struct ifnet *);
  235 static void     npestop(struct npe_softc *);
  236 static void     npewatchdog(struct npe_softc *);
  237 static int      npeioctl(struct ifnet * ifp, u_long, caddr_t);
  238 
  239 static int      npe_setrxqosentry(struct npe_softc *, int classix,
  240                         int trafclass, int qid);
  241 static int      npe_updatestats(struct npe_softc *);
  242 #if 0
  243 static int      npe_getstats(struct npe_softc *);
  244 static uint32_t npe_getimageid(struct npe_softc *);
  245 static int      npe_setloopback(struct npe_softc *, int ena);
  246 #endif
  247 
  248 /* NB: all tx done processing goes through one queue */
  249 static int tx_doneqid = -1;
  250 
  251 SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP425 NPE driver parameters");
  252 
  253 static int npe_debug = 0;
  254 SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
  255            0, "IXP425 NPE network interface debug msgs");
  256 TUNABLE_INT("hw.npe.npe", &npe_debug);
  257 #define DPRINTF(sc, fmt, ...) do {                                      \
  258         if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__);  \
  259 } while (0)
  260 #define DPRINTFn(n, sc, fmt, ...) do {                                  \
  261         if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
  262 } while (0)
  263 static int npe_tickinterval = 3;                /* npe_tick frequency (secs) */
  264 SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
  265             0, "periodic work interval (secs)");
  266 TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
  267 
  268 static  int npe_rxbuf = 64;             /* # rx buffers to allocate */
  269 SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
  270             0, "rx buffers allocated");
  271 TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
  272 static  int npe_txbuf = 128;            /* # tx buffers to allocate */
  273 SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
  274             0, "tx buffers allocated");
  275 TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
  276 
  277 static int
  278 npe_probe(device_t dev)
  279 {
  280         int unit = device_get_unit(dev);
  281 
  282         if (unit >= NPE_PORTS_MAX) {
  283                 device_printf(dev, "unit %d not supported\n", unit);
  284                 return EINVAL;
  285         }
  286         /* XXX check feature register to see if enabled */
  287         device_set_desc(dev, npeconfig[unit].desc);
  288         return 0;
  289 }
  290 
  291 static int
  292 npe_attach(device_t dev)
  293 {
  294         struct npe_softc *sc = device_get_softc(dev);
  295         struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
  296         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
  297         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
  298         struct ifnet *ifp = NULL;
  299         int error;
  300         u_char eaddr[6];
  301 
  302         sc->sc_dev = dev;
  303         sc->sc_iot = sa->sc_iot;
  304         NPE_LOCK_INIT(sc);
  305         callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
  306         sc->sc_debug = npe_debug;
  307         sc->sc_tickinterval = npe_tickinterval;
  308 
  309         sc->sc_npe = ixpnpe_attach(dev);
  310         if (sc->sc_npe == NULL) {
  311                 error = EIO;            /* XXX */
  312                 goto out;
  313         }
  314 
  315         error = npe_activate(dev);
  316         if (error)
  317                 goto out;
  318 
  319         npe_getmac(sc, eaddr);
  320 
  321         sc->sc_ifp = ifp = if_alloc(IFT_ETHER);
  322         error = mii_attach(dev, &sc->sc_mii, ifp, npe_ifmedia_update,
  323             npe_ifmedia_status, BMSR_DEFCAPMASK, sc->sc_phy, MII_OFFSET_ANY,
  324             0);
  325         if (error != 0) {
  326                 device_printf(dev, "attaching PHYs failed\n");
  327                 return error;
  328                 goto out;
  329         }
  330 
  331         ifp->if_softc = sc;
  332         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  333         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  334         ifp->if_start = npestart;
  335         ifp->if_ioctl = npeioctl;
  336         ifp->if_init = npeinit;
  337         IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
  338         ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
  339         IFQ_SET_READY(&ifp->if_snd);
  340         ifp->if_linkmib = &sc->mibdata;
  341         ifp->if_linkmiblen = sizeof(sc->mibdata);
  342         sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
  343 #ifdef DEVICE_POLLING
  344         ifp->if_capabilities |= IFCAP_POLLING;
  345 #endif
  346 
  347         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
  348             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
  349         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
  350             CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
  351 
  352         ether_ifattach(ifp, eaddr);
  353         return 0;
  354 out:
  355         npe_deactivate(dev);
  356         if (ifp != NULL)
  357                 if_free(ifp);
  358         return error;
  359 }
  360 
  361 static int
  362 npe_detach(device_t dev)
  363 {
  364         struct npe_softc *sc = device_get_softc(dev);
  365         struct ifnet *ifp = sc->sc_ifp;
  366 
  367 #ifdef DEVICE_POLLING
  368         if (ifp->if_capenable & IFCAP_POLLING)
  369                 ether_poll_deregister(ifp);
  370 #endif
  371         npestop(sc);
  372         if (ifp != NULL) {
  373                 ether_ifdetach(ifp);
  374                 if_free(ifp);
  375         }
  376         NPE_LOCK_DESTROY(sc);
  377         npe_deactivate(dev);
  378         if (sc->sc_npe != NULL)
  379                 ixpnpe_detach(sc->sc_npe);
  380         return 0;
  381 }
  382 
  383 /*
  384  * Compute and install the multicast filter.
  385  */
  386 static void
  387 npe_setmcast(struct npe_softc *sc)
  388 {
  389         struct ifnet *ifp = sc->sc_ifp;
  390         uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
  391         int i;
  392 
  393         if (ifp->if_flags & IFF_PROMISC) {
  394                 memset(mask, 0, ETHER_ADDR_LEN);
  395                 memset(addr, 0, ETHER_ADDR_LEN);
  396         } else if (ifp->if_flags & IFF_ALLMULTI) {
  397                 static const uint8_t allmulti[ETHER_ADDR_LEN] =
  398                     { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
  399                 memcpy(mask, allmulti, ETHER_ADDR_LEN);
  400                 memcpy(addr, allmulti, ETHER_ADDR_LEN);
  401         } else {
  402                 uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
  403                 struct ifmultiaddr *ifma;
  404                 const uint8_t *mac;
  405 
  406                 memset(clr, 0, ETHER_ADDR_LEN);
  407                 memset(set, 0xff, ETHER_ADDR_LEN);
  408 
  409                 IF_ADDR_LOCK(ifp);
  410                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  411                         if (ifma->ifma_addr->sa_family != AF_LINK)
  412                                 continue;
  413                         mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
  414                         for (i = 0; i < ETHER_ADDR_LEN; i++) {
  415                                 clr[i] |= mac[i];
  416                                 set[i] &= mac[i];
  417                         }
  418                 }
  419                 IF_ADDR_UNLOCK(ifp);
  420 
  421                 for (i = 0; i < ETHER_ADDR_LEN; i++) {
  422                         mask[i] = set[i] | ~clr[i];
  423                         addr[i] = set[i];
  424                 }
  425         }
  426 
  427         /*
  428          * Write the mask and address registers.
  429          */
  430         for (i = 0; i < ETHER_ADDR_LEN; i++) {
  431                 WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
  432                 WR4(sc, NPE_MAC_ADDR(i), addr[i]);
  433         }
  434 }
  435 
  436 static void
  437 npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  438 {
  439         struct npe_softc *sc;
  440 
  441         if (error != 0)
  442                 return;
  443         sc = (struct npe_softc *)arg;
  444         sc->buf_phys = segs[0].ds_addr;
  445 }
  446 
  447 static int
  448 npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
  449         const char *name, int nbuf, int maxseg)
  450 {
  451         int error, i;
  452 
  453         memset(dma, 0, sizeof(dma));
  454 
  455         dma->name = name;
  456         dma->nbuf = nbuf;
  457 
  458         /* DMA tag for mapped mbufs  */
  459         error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
  460             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  461             MCLBYTES, maxseg, MCLBYTES, 0,
  462             busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
  463         if (error != 0) {
  464                 device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
  465                      "error %u\n", dma->name, error);
  466                 return error;
  467         }
  468 
  469         /* DMA tag and map for the NPE buffers */
  470         error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0, 
  471             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  472             nbuf * sizeof(struct npehwbuf), 1,
  473             nbuf * sizeof(struct npehwbuf), 0,
  474             busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
  475         if (error != 0) {
  476                 device_printf(sc->sc_dev,
  477                     "unable to create %s npebuf dma tag, error %u\n",
  478                     dma->name, error);
  479                 return error;
  480         }
  481         /* XXX COHERENT for now */
  482         if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
  483             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
  484             &dma->buf_map) != 0) {
  485                 device_printf(sc->sc_dev,
  486                      "unable to allocate memory for %s h/w buffers, error %u\n",
  487                      dma->name, error);
  488                 return error;
  489         }
  490         /* XXX M_TEMP */
  491         dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
  492         if (dma->buf == NULL) {
  493                 device_printf(sc->sc_dev,
  494                      "unable to allocate memory for %s s/w buffers\n",
  495                      dma->name);
  496                 return error;
  497         }
  498         if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
  499             dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
  500                 device_printf(sc->sc_dev,
  501                      "unable to map memory for %s h/w buffers, error %u\n",
  502                      dma->name, error);
  503                 return error;
  504         }
  505         dma->buf_phys = sc->buf_phys;
  506         for (i = 0; i < dma->nbuf; i++) {
  507                 struct npebuf *npe = &dma->buf[i];
  508                 struct npehwbuf *hw = &dma->hwbuf[i];
  509 
  510                 /* calculate offset to shared area */
  511                 npe->ix_neaddr = dma->buf_phys +
  512                         ((uintptr_t)hw - (uintptr_t)dma->hwbuf);
  513                 KASSERT((npe->ix_neaddr & 0x1f) == 0,
  514                     ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
  515                 error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
  516                                 &npe->ix_map);
  517                 if (error != 0) {
  518                         device_printf(sc->sc_dev,
  519                              "unable to create dmamap for %s buffer %u, "
  520                              "error %u\n", dma->name, i, error);
  521                         return error;
  522                 }
  523                 npe->ix_hw = hw;
  524         }
  525         bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
  526         return 0;
  527 }
  528 
  529 static void
  530 npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
  531 {
  532         int i;
  533 
  534         if (dma->hwbuf != NULL) {
  535                 for (i = 0; i < dma->nbuf; i++) {
  536                         struct npebuf *npe = &dma->buf[i];
  537                         bus_dmamap_destroy(dma->mtag, npe->ix_map);
  538                 }
  539                 bus_dmamap_unload(dma->buf_tag, dma->buf_map);
  540                 bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
  541                 bus_dmamap_destroy(dma->buf_tag, dma->buf_map);
  542         }
  543         if (dma->buf != NULL)
  544                 free(dma->buf, M_TEMP);
  545         if (dma->buf_tag)
  546                 bus_dma_tag_destroy(dma->buf_tag);
  547         if (dma->mtag)
  548                 bus_dma_tag_destroy(dma->mtag);
  549         memset(dma, 0, sizeof(*dma));
  550 }
  551 
  552 static int
  553 override_addr(device_t dev, const char *resname, int *base, int *size)
  554 {
  555         int unit = device_get_unit(dev);
  556         const char *resval;
  557 
  558         /* XXX warn for wrong hint type */
  559         if (resource_string_value("npe", unit, resname, &resval) != 0)
  560                 return 0;
  561         switch (resval[0]) {
  562         case 'A':
  563                 *base = IXP425_MAC_A_HWBASE;
  564                 *size = IXP425_MAC_A_SIZE;
  565                 break;
  566         case 'B':
  567                 *base = IXP425_MAC_B_HWBASE;
  568                 *size = IXP425_MAC_B_SIZE;
  569                 break;
  570         default:
  571                 device_printf(dev, "Warning, bad value %s for "
  572                     "npe.%d.%s ignored\n", resval, unit, resname);
  573                 return 0;
  574         }
  575         if (bootverbose)
  576                 device_printf(dev, "using npe.%d.%s=%s override\n",
  577                     unit, resname, resval);
  578         return 1;
  579 }
  580 
  581 static int
  582 override_unit(device_t dev, const char *resname, int *val, int min, int max)
  583 {
  584         int unit = device_get_unit(dev);
  585         int resval;
  586 
  587         if (resource_int_value("npe", unit, resname, &resval) != 0)
  588                 return 0;
  589         if (!(min <= resval && resval <= max)) {
  590                 device_printf(dev, "Warning, bad value %d for npe.%d.%s "
  591                     "ignored (value must be [%d-%d])\n", resval, unit,
  592                     resname, min, max);
  593                 return 0;
  594         }
  595         if (bootverbose)
  596                 device_printf(dev, "using npe.%d.%s=%d override\n",
  597                     unit, resname, resval);
  598         *val = resval;
  599         return 1;
  600 }
  601 
  602 static int
  603 npe_activate(device_t dev)
  604 {
  605         struct npe_softc * sc = device_get_softc(dev);
  606         int unit = device_get_unit(dev);
  607         int error, i, regbase, regsize, miibase, miisize;
  608         uint32_t imageid;
  609 
  610         /*
  611          * Load NPE firmware and start it running.  We assume
  612          * that minor version bumps remain compatible so probe
  613          * the firmware image starting with the expected version
  614          * and then bump the minor version up to the max.
  615          */
  616         imageid = npeconfig[unit].imageid;
  617         for (;;) {
  618                 error = ixpnpe_init(sc->sc_npe, "npe_fw", imageid);
  619                 if (error == 0)
  620                         break;
  621                 /* ESRCH is returned when the requested image is not present */
  622                 if (error != ESRCH)
  623                         return error;
  624                 /* bump the minor version up to the max possible */
  625                 if (NPEIMAGE_MINOR(imageid) == 0xff)
  626                         return error;
  627                 imageid++;
  628         }
  629 
  630         if (!override_addr(dev, "mac", &regbase, &regsize)) {
  631                 regbase = npeconfig[unit].regbase;
  632                 regbase = npeconfig[unit].regsize;
  633         }
  634         if (bus_space_map(sc->sc_iot, regbase, regsize, 0, &sc->sc_ioh)) {
  635                 device_printf(dev, "Cannot map registers 0x%x:0x%x\n",
  636                     regbase, regsize);
  637                 return ENOMEM;
  638         }
  639 
  640         if (!override_addr(dev, "mii", &miibase, &miisize)) {
  641                 miibase = npeconfig[unit].miibase;
  642                 miisize = npeconfig[unit].miisize;
  643         }
  644         if (miibase != regbase) {
  645                 /*
  646                  * PHY is mapped through a different MAC, setup an
  647                  * additional mapping for frobbing the PHY registers.
  648                  */
  649                 if (bus_space_map(sc->sc_iot, miibase, miisize, 0, &sc->sc_miih)) {
  650                         device_printf(dev,
  651                             "Cannot map MII registers 0x%x:0x%x\n",
  652                             miibase, miisize);
  653                         return ENOMEM;
  654                 }
  655         } else
  656                 sc->sc_miih = sc->sc_ioh;
  657         error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
  658         if (error != 0)
  659                 return error;
  660         error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
  661         if (error != 0)
  662                 return error;
  663 
  664         /* setup statistics block */
  665         error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
  666             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  667             sizeof(struct npestats), 1, sizeof(struct npestats), 0,
  668             busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
  669         if (error != 0) {
  670                 device_printf(sc->sc_dev, "unable to create stats tag, "
  671                      "error %u\n", error);
  672                 return error;
  673         }
  674         if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
  675             BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
  676                 device_printf(sc->sc_dev,
  677                      "unable to allocate memory for stats block, error %u\n",
  678                      error);
  679                 return error;
  680         }
  681         if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
  682             sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
  683                 device_printf(sc->sc_dev,
  684                      "unable to load memory for stats block, error %u\n",
  685                      error);
  686                 return error;
  687         }
  688         sc->sc_stats_phys = sc->buf_phys;
  689 
  690         /* XXX disable half-bridge LEARNING+FILTERING feature */
  691 
  692         /*
  693          * Setup h/w rx/tx queues.  There are four q's:
  694          *   rx         inbound q of rx'd frames
  695          *   rx_free    pool of ixpbuf's for receiving frames
  696          *   tx         outbound q of frames to send
  697          *   tx_done    q of tx frames that have been processed
  698          *
  699          * The NPE handles the actual tx/rx process and the q manager
  700          * handles the queues.  The driver just writes entries to the
  701          * q manager mailbox's and gets callbacks when there are rx'd
  702          * frames to process or tx'd frames to reap.  These callbacks
  703          * are controlled by the q configurations; e.g. we get a
  704          * callback when tx_done has 2 or more frames to process and
  705          * when the rx q has at least one frame.  These setings can
  706          * changed at the time the q is configured.
  707          */
  708         sc->rx_qid = npeconfig[unit].rx_qid;
  709         ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0,  1,
  710                 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc);
  711         sc->rx_freeqid = npeconfig[unit].rx_freeqid;
  712         ixpqmgr_qconfig(sc->rx_freeqid, npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
  713         /* tell the NPE to direct all traffic to rx_qid */
  714 #if 0
  715         for (i = 0; i < 8; i++)
  716 #else
  717 device_printf(sc->sc_dev, "remember to fix rx q setup\n");
  718         for (i = 0; i < 4; i++)
  719 #endif
  720                 npe_setrxqosentry(sc, i, 0, sc->rx_qid);
  721 
  722         sc->tx_qid = npeconfig[unit].tx_qid;
  723         sc->tx_doneqid = npeconfig[unit].tx_doneqid;
  724         ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
  725         if (tx_doneqid == -1) {
  726                 ixpqmgr_qconfig(sc->tx_doneqid, npe_txbuf, 0,  2,
  727                         IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
  728                 tx_doneqid = sc->tx_doneqid;
  729         }
  730 
  731         /*
  732          * Setup phy port number.  We allow override via hints
  733          * to handle different board configs.
  734          */
  735         if (!override_unit(dev, "phy", &sc->sc_phy, 0, MII_NPHY-1))
  736                 sc->sc_phy = npeconfig[unit].phy;
  737 
  738         KASSERT(npes[npeconfig[unit].npeid] == NULL,
  739             ("npe %u already setup", npeconfig[unit].npeid));
  740         npes[npeconfig[unit].npeid] = sc;
  741 
  742         return 0;
  743 }
  744 
  745 static void
  746 npe_deactivate(device_t dev)
  747 {
  748         struct npe_softc *sc = device_get_softc(dev);
  749         int unit = device_get_unit(dev);
  750 
  751         npes[npeconfig[unit].npeid] = NULL;
  752 
  753         /* XXX disable q's */
  754         if (sc->sc_npe != NULL)
  755                 ixpnpe_stop(sc->sc_npe);
  756         if (sc->sc_stats != NULL) {
  757                 bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
  758                 bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
  759                         sc->sc_stats_map);
  760                 bus_dmamap_destroy(sc->sc_stats_tag, sc->sc_stats_map);
  761         }
  762         if (sc->sc_stats_tag != NULL)
  763                 bus_dma_tag_destroy(sc->sc_stats_tag);
  764         npe_dma_destroy(sc, &sc->txdma);
  765         npe_dma_destroy(sc, &sc->rxdma);
  766         bus_generic_detach(sc->sc_dev);
  767         if (sc->sc_mii)
  768                 device_delete_child(sc->sc_dev, sc->sc_mii);
  769 #if 0
  770         /* XXX sc_ioh and sc_miih */
  771         if (sc->mem_res)
  772                 bus_release_resource(dev, SYS_RES_IOPORT,
  773                     rman_get_rid(sc->mem_res), sc->mem_res);
  774         sc->mem_res = 0;
  775 #endif
  776 }
  777 
  778 /*
  779  * Change media according to request.
  780  */
  781 static int
  782 npe_ifmedia_update(struct ifnet *ifp)
  783 {
  784         struct npe_softc *sc = ifp->if_softc;
  785         struct mii_data *mii;
  786 
  787         mii = device_get_softc(sc->sc_mii);
  788         NPE_LOCK(sc);
  789         mii_mediachg(mii);
  790         /* XXX push state ourself? */
  791         NPE_UNLOCK(sc);
  792         return (0);
  793 }
  794 
  795 /*
  796  * Notify the world which media we're using.
  797  */
  798 static void
  799 npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
  800 {
  801         struct npe_softc *sc = ifp->if_softc;
  802         struct mii_data *mii;
  803 
  804         mii = device_get_softc(sc->sc_mii);
  805         NPE_LOCK(sc);
  806         mii_pollstat(mii);
  807         ifmr->ifm_active = mii->mii_media_active;
  808         ifmr->ifm_status = mii->mii_media_status;
  809         NPE_UNLOCK(sc);
  810 }
  811 
  812 static void
  813 npe_addstats(struct npe_softc *sc)
  814 {
  815 #define MIBADD(x)       sc->mibdata.x += be32toh(ns->x)
  816         struct ifnet *ifp = sc->sc_ifp;
  817         struct npestats *ns = sc->sc_stats;
  818 
  819         MIBADD(dot3StatsAlignmentErrors);
  820         MIBADD(dot3StatsFCSErrors);
  821         MIBADD(dot3StatsSingleCollisionFrames);
  822         MIBADD(dot3StatsMultipleCollisionFrames);
  823         MIBADD(dot3StatsDeferredTransmissions);
  824         MIBADD(dot3StatsLateCollisions);
  825         MIBADD(dot3StatsExcessiveCollisions);
  826         MIBADD(dot3StatsInternalMacTransmitErrors);
  827         MIBADD(dot3StatsCarrierSenseErrors);
  828         sc->mibdata.dot3StatsFrameTooLongs +=
  829               be32toh(ns->RxLargeFramesDiscards)
  830             + be32toh(ns->TxLargeFrameDiscards);
  831         MIBADD(dot3StatsInternalMacReceiveErrors);
  832         sc->mibdata.dot3StatsMissedFrames +=
  833               be32toh(ns->RxOverrunDiscards)
  834             + be32toh(ns->RxUnderflowEntryDiscards);
  835 
  836         ifp->if_oerrors +=
  837                   be32toh(ns->dot3StatsInternalMacTransmitErrors)
  838                 + be32toh(ns->dot3StatsCarrierSenseErrors)
  839                 + be32toh(ns->TxVLANIdFilterDiscards)
  840                 ;
  841         ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors)
  842                 + be32toh(ns->dot3StatsInternalMacReceiveErrors)
  843                 + be32toh(ns->RxOverrunDiscards)
  844                 + be32toh(ns->RxUnderflowEntryDiscards)
  845                 ;
  846         ifp->if_collisions +=
  847                   be32toh(ns->dot3StatsSingleCollisionFrames)
  848                 + be32toh(ns->dot3StatsMultipleCollisionFrames)
  849                 ;
  850 #undef MIBADD
  851 }
  852 
  853 static void
  854 npe_tick(void *xsc)
  855 {
  856 #define ACK     (NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
  857         struct npe_softc *sc = xsc;
  858         struct mii_data *mii = device_get_softc(sc->sc_mii);
  859         uint32_t msg[2];
  860 
  861         NPE_ASSERT_LOCKED(sc);
  862 
  863         /*
  864          * NB: to avoid sleeping with the softc lock held we
  865          * split the NPE msg processing into two parts.  The
  866          * request for statistics is sent w/o waiting for a
  867          * reply and then on the next tick we retrieve the
  868          * results.  This works because npe_tick is the only
  869          * code that talks via the mailbox's (except at setup).
  870          * This likely can be handled better.
  871          */
  872         if (ixpnpe_recvmsg(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
  873                 bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
  874                     BUS_DMASYNC_POSTREAD);
  875                 npe_addstats(sc);
  876         }
  877         npe_updatestats(sc);
  878         mii_tick(mii);
  879 
  880         npewatchdog(sc);
  881 
  882         /* schedule next poll */
  883         callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
  884 #undef ACK
  885 }
  886 
  887 static void
  888 npe_setmac(struct npe_softc *sc, u_char *eaddr)
  889 {
  890         WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
  891         WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
  892         WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
  893         WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
  894         WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
  895         WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
  896 
  897 }
  898 
  899 static void
  900 npe_getmac(struct npe_softc *sc, u_char *eaddr)
  901 {
  902         /* NB: the unicast address appears to be loaded from EEPROM on reset */
  903         eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
  904         eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
  905         eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
  906         eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
  907         eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
  908         eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
  909 }
  910 
  911 struct txdone {
  912         struct npebuf *head;
  913         struct npebuf **tail;
  914         int count;
  915 };
  916 
  917 static __inline void
  918 npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
  919 {
  920         struct ifnet *ifp = sc->sc_ifp;
  921 
  922         NPE_LOCK(sc);
  923         *td->tail = sc->tx_free;
  924         sc->tx_free = td->head;
  925         /*
  926          * We're no longer busy, so clear the busy flag and call the
  927          * start routine to xmit more packets.
  928          */
  929         ifp->if_opackets += td->count;
  930         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  931         sc->npe_watchdog_timer = 0;
  932         npestart_locked(ifp);
  933         NPE_UNLOCK(sc);
  934 }
  935 
  936 /*
  937  * Q manager callback on tx done queue.  Reap mbufs
  938  * and return tx buffers to the free list.  Finally
  939  * restart output.  Note the microcode has only one
  940  * txdone q wired into it so we must use the NPE ID
  941  * returned with each npehwbuf to decide where to
  942  * send buffers.
  943  */
  944 static void
  945 npe_txdone(int qid, void *arg)
  946 {
  947 #define P2V(a, dma) \
  948         &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
  949         struct npe_softc *sc0 = arg;
  950         struct npe_softc *sc;
  951         struct npebuf *npe;
  952         struct txdone *td, q[NPE_MAX];
  953         uint32_t entry;
  954 
  955         /* XXX no NPE-A support */
  956         q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
  957         q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
  958         /* XXX max # at a time? */
  959         while (ixpqmgr_qread(qid, &entry) == 0) {
  960                 DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
  961                     __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
  962 
  963                 sc = npes[NPE_QM_Q_NPE(entry)];
  964                 npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
  965                 m_freem(npe->ix_m);
  966                 npe->ix_m = NULL;
  967 
  968                 td = &q[NPE_QM_Q_NPE(entry)];
  969                 *td->tail = npe;
  970                 td->tail = &npe->ix_next;
  971                 td->count++;
  972         }
  973 
  974         if (q[NPE_B].count)
  975                 npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
  976         if (q[NPE_C].count)
  977                 npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
  978 #undef P2V
  979 }
  980 
  981 static int
  982 npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
  983 {
  984         bus_dma_segment_t segs[1];
  985         struct npedma *dma = &sc->rxdma;
  986         struct npehwbuf *hw;
  987         int error, nseg;
  988 
  989         if (m == NULL) {
  990                 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
  991                 if (m == NULL)
  992                         return ENOBUFS;
  993         }
  994         KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
  995                 ("ext_size %d", m->m_ext.ext_size));
  996         m->m_pkthdr.len = m->m_len = 1536;
  997         /* backload payload and align ip hdr */
  998         m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
  999         error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
 1000                         segs, &nseg, 0);
 1001         if (error != 0) {
 1002                 m_freem(m);
 1003                 return error;
 1004         }
 1005         hw = npe->ix_hw;
 1006         hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
 1007         /* NB: NPE requires length be a multiple of 64 */
 1008         /* NB: buffer length is shifted in word */
 1009         hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
 1010         hw->ix_ne[0].next = 0;
 1011         npe->ix_m = m;
 1012         /* Flush the memory in the mbuf */
 1013         bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
 1014         return 0;
 1015 }
 1016 
 1017 /*
 1018  * RX q processing for a specific NPE.  Claim entries
 1019  * from the hardware queue and pass the frames up the
 1020  * stack. Pass the rx buffers to the free list.
 1021  */
 1022 static void
 1023 npe_rxdone(int qid, void *arg)
 1024 {
 1025 #define P2V(a, dma) \
 1026         &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
 1027         struct npe_softc *sc = arg;
 1028         struct npedma *dma = &sc->rxdma;
 1029         uint32_t entry;
 1030 
 1031         while (ixpqmgr_qread(qid, &entry) == 0) {
 1032                 struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
 1033                 struct mbuf *m;
 1034 
 1035                 DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
 1036                     __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
 1037                 /*
 1038                  * Allocate a new mbuf to replenish the rx buffer.
 1039                  * If doing so fails we drop the rx'd frame so we
 1040                  * can reuse the previous mbuf.  When we're able to
 1041                  * allocate a new mbuf dispatch the mbuf w/ rx'd
 1042                  * data up the stack and replace it with the newly
 1043                  * allocated one.
 1044                  */
 1045                 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 1046                 if (m != NULL) {
 1047                         struct mbuf *mrx = npe->ix_m;
 1048                         struct npehwbuf *hw = npe->ix_hw;
 1049                         struct ifnet *ifp = sc->sc_ifp;
 1050 
 1051                         /* Flush mbuf memory for rx'd data */
 1052                         bus_dmamap_sync(dma->mtag, npe->ix_map,
 1053                             BUS_DMASYNC_POSTREAD);
 1054 
 1055                         /* XXX flush hw buffer; works now 'cuz coherent */
 1056                         /* set m_len etc. per rx frame size */
 1057                         mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
 1058                         mrx->m_pkthdr.len = mrx->m_len;
 1059                         mrx->m_pkthdr.rcvif = ifp;
 1060                         mrx->m_flags |= M_HASFCS;
 1061 
 1062                         ifp->if_ipackets++;
 1063                         ifp->if_input(ifp, mrx);
 1064                 } else {
 1065                         /* discard frame and re-use mbuf */
 1066                         m = npe->ix_m;
 1067                 }
 1068                 if (npe_rxbuf_init(sc, npe, m) == 0) {
 1069                         /* return npe buf to rx free list */
 1070                         ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
 1071                 } else {
 1072                         /* XXX should not happen */
 1073                 }
 1074         }
 1075 #undef P2V
 1076 }
 1077 
 1078 #ifdef DEVICE_POLLING
 1079 static void
 1080 npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 1081 {
 1082         struct npe_softc *sc = ifp->if_softc;
 1083 
 1084         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1085                 npe_rxdone(sc->rx_qid, sc);
 1086                 npe_txdone(sc->tx_doneqid, sc); /* XXX polls both NPE's */
 1087         }
 1088 }
 1089 #endif /* DEVICE_POLLING */
 1090 
 1091 static void
 1092 npe_startxmit(struct npe_softc *sc)
 1093 {
 1094         struct npedma *dma = &sc->txdma;
 1095         int i;
 1096 
 1097         NPE_ASSERT_LOCKED(sc);
 1098         sc->tx_free = NULL;
 1099         for (i = 0; i < dma->nbuf; i++) {
 1100                 struct npebuf *npe = &dma->buf[i];
 1101                 if (npe->ix_m != NULL) {
 1102                         /* NB: should not happen */
 1103                         device_printf(sc->sc_dev,
 1104                             "%s: free mbuf at entry %u\n", __func__, i);
 1105                         m_freem(npe->ix_m);
 1106                 }
 1107                 npe->ix_m = NULL;
 1108                 npe->ix_next = sc->tx_free;
 1109                 sc->tx_free = npe;
 1110         }
 1111 }
 1112 
 1113 static void
 1114 npe_startrecv(struct npe_softc *sc)
 1115 {
 1116         struct npedma *dma = &sc->rxdma;
 1117         struct npebuf *npe;
 1118         int i;
 1119 
 1120         NPE_ASSERT_LOCKED(sc);
 1121         for (i = 0; i < dma->nbuf; i++) {
 1122                 npe = &dma->buf[i];
 1123                 npe_rxbuf_init(sc, npe, npe->ix_m);
 1124                 /* set npe buf on rx free list */
 1125                 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
 1126         }
 1127 }
 1128 
 1129 /*
 1130  * Reset and initialize the chip
 1131  */
 1132 static void
 1133 npeinit_locked(void *xsc)
 1134 {
 1135         struct npe_softc *sc = xsc;
 1136         struct ifnet *ifp = sc->sc_ifp;
 1137 
 1138         NPE_ASSERT_LOCKED(sc);
 1139 if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
 1140 
 1141         /*
 1142          * Reset MAC core.
 1143          */
 1144         WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
 1145         DELAY(NPE_MAC_RESET_DELAY);
 1146         /* configure MAC to generate MDC clock */
 1147         WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
 1148 
 1149         /* disable transmitter and reciver in the MAC */
 1150         WR4(sc, NPE_MAC_RX_CNTRL1,
 1151             RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
 1152         WR4(sc, NPE_MAC_TX_CNTRL1,
 1153             RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
 1154 
 1155         /*
 1156          * Set the MAC core registers.
 1157          */
 1158         WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1);   /* clock ratio: for ipx4xx */
 1159         WR4(sc, NPE_MAC_TX_CNTRL2,      0xf);   /* max retries */
 1160         WR4(sc, NPE_MAC_RANDOM_SEED,    0x8);   /* LFSR back-off seed */
 1161         /* thresholds determined by NPE firmware FS */
 1162         WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12);
 1163         WR4(sc, NPE_MAC_THRESH_P_FULL,  0x30);
 1164         WR4(sc, NPE_MAC_BUF_SIZE_TX,    0x8);   /* tx fifo threshold (bytes) */
 1165         WR4(sc, NPE_MAC_TX_DEFER,       0x15);  /* for single deferral */
 1166         WR4(sc, NPE_MAC_RX_DEFER,       0x16);  /* deferral on inter-frame gap*/
 1167         WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8);   /* for 2-part deferral */
 1168         WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7);   /* for 2-part deferral */
 1169         WR4(sc, NPE_MAC_SLOT_TIME,      0x80);  /* assumes MII mode */
 1170 
 1171         WR4(sc, NPE_MAC_TX_CNTRL1,
 1172                   NPE_TX_CNTRL1_RETRY           /* retry failed xmits */
 1173                 | NPE_TX_CNTRL1_FCS_EN          /* append FCS */
 1174                 | NPE_TX_CNTRL1_2DEFER          /* 2-part deferal */
 1175                 | NPE_TX_CNTRL1_PAD_EN);        /* pad runt frames */
 1176         /* XXX pad strip? */
 1177         WR4(sc, NPE_MAC_RX_CNTRL1,
 1178                   NPE_RX_CNTRL1_CRC_EN          /* include CRC/FCS */
 1179                 | NPE_RX_CNTRL1_PAUSE_EN);      /* ena pause frame handling */
 1180         WR4(sc, NPE_MAC_RX_CNTRL2, 0);
 1181 
 1182         npe_setmac(sc, IF_LLADDR(ifp));
 1183         npe_setmcast(sc);
 1184 
 1185         npe_startxmit(sc);
 1186         npe_startrecv(sc);
 1187 
 1188         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1189         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1190         sc->npe_watchdog_timer = 0;             /* just in case */
 1191 
 1192         /* enable transmitter and reciver in the MAC */
 1193         WR4(sc, NPE_MAC_RX_CNTRL1,
 1194             RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
 1195         WR4(sc, NPE_MAC_TX_CNTRL1,
 1196             RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
 1197 
 1198         callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
 1199 }
 1200 
 1201 static void
 1202 npeinit(void *xsc)
 1203 {
 1204         struct npe_softc *sc = xsc;
 1205         NPE_LOCK(sc);
 1206         npeinit_locked(sc);
 1207         NPE_UNLOCK(sc);
 1208 }
 1209 
 1210 /*
 1211  * Dequeue packets and place on the h/w transmit queue.
 1212  */
 1213 static void
 1214 npestart_locked(struct ifnet *ifp)
 1215 {
 1216         struct npe_softc *sc = ifp->if_softc;
 1217         struct npebuf *npe;
 1218         struct npehwbuf *hw;
 1219         struct mbuf *m, *n;
 1220         struct npedma *dma = &sc->txdma;
 1221         bus_dma_segment_t segs[NPE_MAXSEG];
 1222         int nseg, len, error, i;
 1223         uint32_t next;
 1224 
 1225         NPE_ASSERT_LOCKED(sc);
 1226         /* XXX can this happen? */
 1227         if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
 1228                 return;
 1229 
 1230         while (sc->tx_free != NULL) {
 1231                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1232                 if (m == NULL) {
 1233                         /* XXX? */
 1234                         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1235                         return;
 1236                 }
 1237                 npe = sc->tx_free;
 1238                 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
 1239                     m, segs, &nseg, 0);
 1240                 if (error == EFBIG) {
 1241                         n = m_collapse(m, M_DONTWAIT, NPE_MAXSEG);
 1242                         if (n == NULL) {
 1243                                 if_printf(ifp, "%s: too many fragments %u\n",
 1244                                     __func__, nseg);
 1245                                 m_freem(m);
 1246                                 return; /* XXX? */
 1247                         }
 1248                         m = n;
 1249                         error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
 1250                             m, segs, &nseg, 0);
 1251                 }
 1252                 if (error != 0 || nseg == 0) {
 1253                         if_printf(ifp, "%s: error %u nseg %u\n",
 1254                             __func__, error, nseg);
 1255                         m_freem(m);
 1256                         return; /* XXX? */
 1257                 }
 1258                 sc->tx_free = npe->ix_next;
 1259 
 1260                 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
 1261         
 1262                 /*
 1263                  * Tap off here if there is a bpf listener.
 1264                  */
 1265                 BPF_MTAP(ifp, m);
 1266 
 1267                 npe->ix_m = m;
 1268                 hw = npe->ix_hw;
 1269                 len = m->m_pkthdr.len;
 1270                 next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
 1271                 for (i = 0; i < nseg; i++) {
 1272                         hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
 1273                         hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
 1274                         hw->ix_ne[i].next = htobe32(next);
 1275 
 1276                         len = 0;                /* zero for segments > 1 */
 1277                         next += sizeof(hw->ix_ne[0]);
 1278                 }
 1279                 hw->ix_ne[i-1].next = 0;        /* zero last in chain */
 1280                 /* XXX flush descriptor instead of using uncached memory */
 1281 
 1282                 DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
 1283                     __func__, sc->tx_qid, npe->ix_neaddr,
 1284                     hw->ix_ne[0].data, hw->ix_ne[0].len);
 1285                 /* stick it on the tx q */
 1286                 /* XXX add vlan priority */
 1287                 ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
 1288 
 1289                 sc->npe_watchdog_timer = 5;
 1290         }
 1291         if (sc->tx_free == NULL)
 1292                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1293 }
 1294 
 1295 void
 1296 npestart(struct ifnet *ifp)
 1297 {
 1298         struct npe_softc *sc = ifp->if_softc;
 1299         NPE_LOCK(sc);
 1300         npestart_locked(ifp);
 1301         NPE_UNLOCK(sc);
 1302 }
 1303 
 1304 static void
 1305 npe_stopxmit(struct npe_softc *sc)
 1306 {
 1307         struct npedma *dma = &sc->txdma;
 1308         int i;
 1309 
 1310         NPE_ASSERT_LOCKED(sc);
 1311 
 1312         /* XXX qmgr */
 1313         for (i = 0; i < dma->nbuf; i++) {
 1314                 struct npebuf *npe = &dma->buf[i];
 1315 
 1316                 if (npe->ix_m != NULL) {
 1317                         bus_dmamap_unload(dma->mtag, npe->ix_map);
 1318                         m_freem(npe->ix_m);
 1319                         npe->ix_m = NULL;
 1320                 }
 1321         }
 1322 }
 1323 
 1324 static void
 1325 npe_stoprecv(struct npe_softc *sc)
 1326 {
 1327         struct npedma *dma = &sc->rxdma;
 1328         int i;
 1329 
 1330         NPE_ASSERT_LOCKED(sc);
 1331 
 1332         /* XXX qmgr */
 1333         for (i = 0; i < dma->nbuf; i++) {
 1334                 struct npebuf *npe = &dma->buf[i];
 1335 
 1336                 if (npe->ix_m != NULL) {
 1337                         bus_dmamap_unload(dma->mtag, npe->ix_map);
 1338                         m_freem(npe->ix_m);
 1339                         npe->ix_m = NULL;
 1340                 }
 1341         }
 1342 }
 1343 
 1344 /*
 1345  * Turn off interrupts, and stop the nic.
 1346  */
 1347 void
 1348 npestop(struct npe_softc *sc)
 1349 {
 1350         struct ifnet *ifp = sc->sc_ifp;
 1351 
 1352         /*  disable transmitter and reciver in the MAC  */
 1353         WR4(sc, NPE_MAC_RX_CNTRL1,
 1354             RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
 1355         WR4(sc, NPE_MAC_TX_CNTRL1,
 1356             RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
 1357 
 1358         sc->npe_watchdog_timer = 0;
 1359         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1360 
 1361         callout_stop(&sc->tick_ch);
 1362 
 1363         npe_stopxmit(sc);
 1364         npe_stoprecv(sc);
 1365         /* XXX go into loopback & drain q's? */
 1366         /* XXX but beware of disabling tx above */
 1367 
 1368         /*
 1369          * The MAC core rx/tx disable may leave the MAC hardware in an
 1370          * unpredictable state. A hw reset is executed before resetting 
 1371          * all the MAC parameters to a known value.
 1372          */
 1373         WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
 1374         DELAY(NPE_MAC_RESET_DELAY);
 1375         WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
 1376         WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
 1377 }
 1378 
 1379 void
 1380 npewatchdog(struct npe_softc *sc)
 1381 {
 1382         NPE_ASSERT_LOCKED(sc);
 1383 
 1384         if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
 1385                 return;
 1386 
 1387         device_printf(sc->sc_dev, "watchdog timeout\n");
 1388         sc->sc_ifp->if_oerrors++;
 1389 
 1390         npeinit_locked(sc);
 1391 }
 1392 
 1393 static int
 1394 npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1395 {
 1396         struct npe_softc *sc = ifp->if_softc;
 1397         struct mii_data *mii;
 1398         struct ifreq *ifr = (struct ifreq *)data;       
 1399         int error = 0;
 1400 #ifdef DEVICE_POLLING
 1401         int mask;
 1402 #endif
 1403 
 1404         switch (cmd) {
 1405         case SIOCSIFFLAGS:
 1406                 NPE_LOCK(sc);
 1407                 if ((ifp->if_flags & IFF_UP) == 0 &&
 1408                     ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1409                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1410                         npestop(sc);
 1411                 } else {
 1412                         /* reinitialize card on any parameter change */
 1413                         npeinit_locked(sc);
 1414                 }
 1415                 NPE_UNLOCK(sc);
 1416                 break;
 1417 
 1418         case SIOCADDMULTI:
 1419         case SIOCDELMULTI:
 1420                 /* update multicast filter list. */
 1421                 NPE_LOCK(sc);
 1422                 npe_setmcast(sc);
 1423                 NPE_UNLOCK(sc);
 1424                 error = 0;
 1425                 break;
 1426 
 1427         case SIOCSIFMEDIA:
 1428         case SIOCGIFMEDIA:
 1429                 mii = device_get_softc(sc->sc_mii);
 1430                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 1431                 break;
 1432 
 1433 #ifdef DEVICE_POLLING
 1434         case SIOCSIFCAP:
 1435                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
 1436                 if (mask & IFCAP_POLLING) {
 1437                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
 1438                                 error = ether_poll_register(npe_poll, ifp);
 1439                                 if (error)
 1440                                         return error;
 1441                                 NPE_LOCK(sc);
 1442                                 /* disable callbacks XXX txdone is shared */
 1443                                 ixpqmgr_notify_disable(sc->rx_qid);
 1444                                 ixpqmgr_notify_disable(sc->tx_doneqid);
 1445                                 ifp->if_capenable |= IFCAP_POLLING;
 1446                                 NPE_UNLOCK(sc);
 1447                         } else {
 1448                                 error = ether_poll_deregister(ifp);
 1449                                 /* NB: always enable qmgr callbacks */
 1450                                 NPE_LOCK(sc);
 1451                                 /* enable qmgr callbacks */
 1452                                 ixpqmgr_notify_enable(sc->rx_qid,
 1453                                     IX_QMGR_Q_SOURCE_ID_NOT_E);
 1454                                 ixpqmgr_notify_enable(sc->tx_doneqid,
 1455                                     IX_QMGR_Q_SOURCE_ID_NOT_E);
 1456                                 ifp->if_capenable &= ~IFCAP_POLLING;
 1457                                 NPE_UNLOCK(sc);
 1458                         }
 1459                 }
 1460                 break;
 1461 #endif
 1462         default:
 1463                 error = ether_ioctl(ifp, cmd, data);
 1464                 break;
 1465         }
 1466         return error;
 1467 }
 1468 
 1469 /*
 1470  * Setup a traffic class -> rx queue mapping.
 1471  */
 1472 static int
 1473 npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
 1474 {
 1475         int npeid = npeconfig[device_get_unit(sc->sc_dev)].npeid;
 1476         uint32_t msg[2];
 1477 
 1478         msg[0] = (NPE_SETRXQOSENTRY << 24) | (npeid << 20) | classix;
 1479         msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
 1480         return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
 1481 }
 1482 
 1483 /*
 1484  * Update and reset the statistics in the NPE.
 1485  */
 1486 static int
 1487 npe_updatestats(struct npe_softc *sc)
 1488 {
 1489         uint32_t msg[2];
 1490 
 1491         msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
 1492         msg[1] = sc->sc_stats_phys;     /* physical address of stat block */
 1493         return ixpnpe_sendmsg(sc->sc_npe, msg);         /* NB: no recv */
 1494 }
 1495 
 1496 #if 0
 1497 /*
 1498  * Get the current statistics block.
 1499  */
 1500 static int
 1501 npe_getstats(struct npe_softc *sc)
 1502 {
 1503         uint32_t msg[2];
 1504 
 1505         msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
 1506         msg[1] = sc->sc_stats_phys;     /* physical address of stat block */
 1507         return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
 1508 }
 1509 
 1510 /*
 1511  * Query the image id of the loaded firmware.
 1512  */
 1513 static uint32_t
 1514 npe_getimageid(struct npe_softc *sc)
 1515 {
 1516         uint32_t msg[2];
 1517 
 1518         msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
 1519         msg[1] = 0;
 1520         return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
 1521 }
 1522 
 1523 /*
 1524  * Enable/disable loopback.
 1525  */
 1526 static int
 1527 npe_setloopback(struct npe_softc *sc, int ena)
 1528 {
 1529         uint32_t msg[2];
 1530 
 1531         msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
 1532         msg[1] = 0;
 1533         return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
 1534 }
 1535 #endif
 1536 
 1537 static void
 1538 npe_child_detached(device_t dev, device_t child)
 1539 {
 1540         struct npe_softc *sc;
 1541 
 1542         sc = device_get_softc(dev);
 1543         if (child == sc->sc_mii)
 1544                 sc->sc_mii = NULL;
 1545 }
 1546 
 1547 /*
 1548  * MII bus support routines.
 1549  */
 1550 static uint32_t
 1551 npe_mii_mdio_read(struct npe_softc *sc, int reg)
 1552 {
 1553 #define MII_RD4(sc, reg)        bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
 1554         uint32_t v;
 1555 
 1556         /* NB: registers are known to be sequential */
 1557         v =  (MII_RD4(sc, reg+0) & 0xff) << 0;
 1558         v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
 1559         v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
 1560         v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
 1561         return v;
 1562 #undef MII_RD4
 1563 }
 1564 
 1565 static void
 1566 npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
 1567 {
 1568 #define MII_WR4(sc, reg, v) \
 1569         bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
 1570 
 1571         /* NB: registers are known to be sequential */
 1572         MII_WR4(sc, reg+0, cmd & 0xff);
 1573         MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
 1574         MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
 1575         MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
 1576 #undef MII_WR4
 1577 }
 1578 
 1579 static int
 1580 npe_mii_mdio_wait(struct npe_softc *sc)
 1581 {
 1582 #define MAXTRIES        100     /* XXX */
 1583         uint32_t v;
 1584         int i;
 1585 
 1586         for (i = 0; i < MAXTRIES; i++) {
 1587                 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
 1588                 if ((v & NPE_MII_GO) == 0)
 1589                         return 1;
 1590         }
 1591         return 0;               /* NB: timeout */
 1592 #undef MAXTRIES
 1593 }
 1594 
 1595 static int
 1596 npe_miibus_readreg(device_t dev, int phy, int reg)
 1597 {
 1598         struct npe_softc *sc = device_get_softc(dev);
 1599         uint32_t v;
 1600 
 1601         v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
 1602           | NPE_MII_GO;
 1603         npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
 1604         if (npe_mii_mdio_wait(sc))
 1605                 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
 1606         else
 1607                 v = 0xffff | NPE_MII_READ_FAIL;
 1608         return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
 1609 #undef MAXTRIES
 1610 }
 1611 
 1612 static void
 1613 npe_miibus_writereg(device_t dev, int phy, int reg, int data)
 1614 {
 1615         struct npe_softc *sc = device_get_softc(dev);
 1616         uint32_t v;
 1617 
 1618         v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
 1619           | data | NPE_MII_WRITE
 1620           | NPE_MII_GO;
 1621         npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
 1622         /* XXX complain about timeout */
 1623         (void) npe_mii_mdio_wait(sc);
 1624 }
 1625 
 1626 static void
 1627 npe_miibus_statchg(device_t dev)
 1628 {
 1629         struct npe_softc *sc = device_get_softc(dev);
 1630         struct mii_data *mii = device_get_softc(sc->sc_mii);
 1631         uint32_t tx1, rx1;
 1632 
 1633         /* sync MAC duplex state */
 1634         tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
 1635         rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
 1636         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
 1637                 tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
 1638                 rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
 1639         } else {
 1640                 tx1 |= NPE_TX_CNTRL1_DUPLEX;
 1641                 rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
 1642         }
 1643         WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
 1644         WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
 1645 }
 1646 
 1647 static device_method_t npe_methods[] = {
 1648         /* Device interface */
 1649         DEVMETHOD(device_probe,         npe_probe),
 1650         DEVMETHOD(device_attach,        npe_attach),
 1651         DEVMETHOD(device_detach,        npe_detach),
 1652 
 1653         /* Bus interface */
 1654         DEVMETHOD(bus_child_detached,   npe_child_detached),
 1655 
 1656         /* MII interface */
 1657         DEVMETHOD(miibus_readreg,       npe_miibus_readreg),
 1658         DEVMETHOD(miibus_writereg,      npe_miibus_writereg),
 1659         DEVMETHOD(miibus_statchg,       npe_miibus_statchg),
 1660 
 1661         { 0, 0 }
 1662 };
 1663 
 1664 static driver_t npe_driver = {
 1665         "npe",
 1666         npe_methods,
 1667         sizeof(struct npe_softc),
 1668 };
 1669 
 1670 DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
 1671 DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
 1672 MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
 1673 MODULE_DEPEND(npe, miibus, 1, 1, 1);
 1674 MODULE_DEPEND(npe, ether, 1, 1, 1);

Cache object: d4311bee6876a60b1e9aed3e2b660752


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.