The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/pci/if_pcn.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: if_pcn.c,v 1.22 2003/10/25 18:31:11 christos Exp $     */
    2 
    3 /*
    4  * Copyright (c) 2001 Wasabi Systems, Inc.
    5  * All rights reserved.
    6  *
    7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed for the NetBSD Project by
   20  *      Wasabi Systems, Inc.
   21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   22  *    or promote products derived from this software without specific prior
   23  *    written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  */
   37 
   38 /*
   39  * Device driver for the AMD PCnet-PCI series of Ethernet
   40  * chips:
   41  *
   42  *      * Am79c970 PCnet-PCI Single-Chip Ethernet Controller for PCI
   43  *        Local Bus
   44  *
   45  *      * Am79c970A PCnet-PCI II Single-Chip Full-Duplex Ethernet Controller
   46  *        for PCI Local Bus
   47  *
   48  *      * Am79c971 PCnet-FAST Single-Chip Full-Duplex 10/100Mbps
   49  *        Ethernet Controller for PCI Local Bus
   50  *
   51  *      * Am79c972 PCnet-FAST+ Enhanced 10/100Mbps PCI Ethernet Controller
   52  *        with OnNow Support
   53  *
   54  *      * Am79c973/Am79c975 PCnet-FAST III Single-Chip 10/100Mbps PCI
   55  *        Ethernet Controller with Integrated PHY
   56  *
   57  * This also supports the virtual PCnet-PCI Ethernet interface found
   58  * in VMware.
   59  *
   60  * TODO:
   61  *
   62  *      * Split this into bus-specific and bus-independent portions.
   63  *        The core could also be used for the ILACC (Am79900) 32-bit
   64  *        Ethernet chip (XXX only if we use an ILACC-compatible SWSTYLE).
   65  */
   66 
   67 #include "opt_pcn.h"
   68 
   69 #include <sys/cdefs.h>
   70 __KERNEL_RCSID(0, "$NetBSD: if_pcn.c,v 1.22 2003/10/25 18:31:11 christos Exp $");
   71 
   72 #include "bpfilter.h"
   73 #include "rnd.h"
   74 
   75 #include <sys/param.h>
   76 #include <sys/systm.h>
   77 #include <sys/callout.h>
   78 #include <sys/mbuf.h>
   79 #include <sys/malloc.h>
   80 #include <sys/kernel.h>
   81 #include <sys/socket.h>
   82 #include <sys/ioctl.h>
   83 #include <sys/errno.h> 
   84 #include <sys/device.h>
   85 #include <sys/queue.h>
   86 
   87 #if NRND > 0
   88 #include <sys/rnd.h>
   89 #endif
   90 
   91 #include <uvm/uvm_extern.h>             /* for PAGE_SIZE */
   92 
   93 #include <net/if.h>
   94 #include <net/if_dl.h>
   95 #include <net/if_media.h>
   96 #include <net/if_ether.h>
   97 
   98 #if NBPFILTER > 0
   99 #include <net/bpf.h>
  100 #endif
  101 
  102 #include <machine/bus.h>
  103 #include <machine/intr.h>
  104 #include <machine/endian.h>
  105 
  106 #include <dev/mii/mii.h>
  107 #include <dev/mii/miivar.h>
  108 
  109 #include <dev/ic/am79900reg.h>
  110 #include <dev/ic/lancereg.h>
  111 
  112 #include <dev/pci/pcireg.h>
  113 #include <dev/pci/pcivar.h>
  114 #include <dev/pci/pcidevs.h>
  115 
  116 #include <dev/pci/if_pcnreg.h>
  117 
  118 /*
  119  * Transmit descriptor list size.  This is arbitrary, but allocate
  120  * enough descriptors for 128 pending transmissions, and 4 segments
  121  * per packet.  This MUST work out to a power of 2.
  122  *
  123  * NOTE: We can't have any more than 512 Tx descriptors, SO BE CAREFUL!
  124  *
  125  * So we play a little trick here.  We give each packet up to 16
  126  * DMA segments, but only allocate the max of 512 descriptors.  The
  127  * transmit logic can deal with this, we just are hoping to sneak by.
  128  */
  129 #define PCN_NTXSEGS             16
  130 
  131 #define PCN_TXQUEUELEN          128
  132 #define PCN_TXQUEUELEN_MASK     (PCN_TXQUEUELEN - 1)
  133 #define PCN_NTXDESC             512
  134 #define PCN_NTXDESC_MASK        (PCN_NTXDESC - 1)
  135 #define PCN_NEXTTX(x)           (((x) + 1) & PCN_NTXDESC_MASK)
  136 #define PCN_NEXTTXS(x)          (((x) + 1) & PCN_TXQUEUELEN_MASK)
  137 
  138 /* Tx interrupt every N + 1 packets. */
  139 #define PCN_TXINTR_MASK         7
  140 
  141 /*
  142  * Receive descriptor list size.  We have one Rx buffer per incoming
  143  * packet, so this logic is a little simpler.
  144  */
  145 #define PCN_NRXDESC             128
  146 #define PCN_NRXDESC_MASK        (PCN_NRXDESC - 1)
  147 #define PCN_NEXTRX(x)           (((x) + 1) & PCN_NRXDESC_MASK)
  148 
  149 /*
  150  * Control structures are DMA'd to the PCnet chip.  We allocate them in
  151  * a single clump that maps to a single DMA segment to make several things
  152  * easier.
  153  */
  154 struct pcn_control_data {
  155         /* The transmit descriptors. */
  156         struct letmd pcd_txdescs[PCN_NTXDESC];
  157 
  158         /* The receive descriptors. */
  159         struct lermd pcd_rxdescs[PCN_NRXDESC];
  160 
  161         /* The init block. */
  162         struct leinit pcd_initblock;
  163 };
  164 
  165 #define PCN_CDOFF(x)    offsetof(struct pcn_control_data, x)
  166 #define PCN_CDTXOFF(x)  PCN_CDOFF(pcd_txdescs[(x)])
  167 #define PCN_CDRXOFF(x)  PCN_CDOFF(pcd_rxdescs[(x)])
  168 #define PCN_CDINITOFF   PCN_CDOFF(pcd_initblock)
  169 
  170 /*
  171  * Software state for transmit jobs.
  172  */
  173 struct pcn_txsoft {
  174         struct mbuf *txs_mbuf;          /* head of our mbuf chain */
  175         bus_dmamap_t txs_dmamap;        /* our DMA map */
  176         int txs_firstdesc;              /* first descriptor in packet */
  177         int txs_lastdesc;               /* last descriptor in packet */
  178 };
  179 
  180 /*
  181  * Software state for receive jobs.
  182  */
  183 struct pcn_rxsoft {
  184         struct mbuf *rxs_mbuf;          /* head of our mbuf chain */
  185         bus_dmamap_t rxs_dmamap;        /* our DMA map */
  186 };
  187 
  188 /*
  189  * Description of Rx FIFO watermarks for various revisions.
  190  */
  191 const char * const pcn_79c970_rcvfw[] = {
  192         "16 bytes",
  193         "64 bytes",
  194         "128 bytes",
  195         NULL,
  196 };
  197 
  198 const char * const pcn_79c971_rcvfw[] = {
  199         "16 bytes",
  200         "64 bytes",
  201         "112 bytes",
  202         NULL,
  203 };
  204 
  205 /*
  206  * Description of Tx start points for various revisions.
  207  */
  208 const char * const pcn_79c970_xmtsp[] = {
  209         "8 bytes",
  210         "64 bytes",
  211         "128 bytes",
  212         "248 bytes",
  213 };
  214 
  215 const char * const pcn_79c971_xmtsp[] = {
  216         "20 bytes",
  217         "64 bytes",
  218         "128 bytes",
  219         "248 bytes",
  220 };
  221 
  222 const char * const pcn_79c971_xmtsp_sram[] = {
  223         "44 bytes",
  224         "64 bytes",
  225         "128 bytes",
  226         "store-and-forward",
  227 };
  228 
  229 /*
  230  * Description of Tx FIFO watermarks for various revisions.
  231  */
  232 const char * const pcn_79c970_xmtfw[] = {
  233         "16 bytes",
  234         "64 bytes",
  235         "128 bytes",
  236         NULL,
  237 };
  238 
  239 const char * const pcn_79c971_xmtfw[] = {
  240         "16 bytes",
  241         "64 bytes",
  242         "108 bytes",
  243         NULL,
  244 };
  245 
  246 /*
  247  * Software state per device.
  248  */
  249 struct pcn_softc {
  250         struct device sc_dev;           /* generic device information */
  251         bus_space_tag_t sc_st;          /* bus space tag */
  252         bus_space_handle_t sc_sh;       /* bus space handle */
  253         bus_dma_tag_t sc_dmat;          /* bus DMA tag */
  254         struct ethercom sc_ethercom;    /* Ethernet common data */
  255         void *sc_sdhook;                /* shutdown hook */
  256 
  257         /* Points to our media routines, etc. */
  258         const struct pcn_variant *sc_variant;
  259 
  260         void *sc_ih;                    /* interrupt cookie */
  261 
  262         struct mii_data sc_mii;         /* MII/media information */
  263 
  264         struct callout sc_tick_ch;      /* tick callout */
  265 
  266         bus_dmamap_t sc_cddmamap;       /* control data DMA map */
  267 #define sc_cddma        sc_cddmamap->dm_segs[0].ds_addr
  268 
  269         /* Software state for transmit and receive descriptors. */
  270         struct pcn_txsoft sc_txsoft[PCN_TXQUEUELEN];
  271         struct pcn_rxsoft sc_rxsoft[PCN_NRXDESC];
  272 
  273         /* Control data structures */
  274         struct pcn_control_data *sc_control_data;
  275 #define sc_txdescs      sc_control_data->pcd_txdescs
  276 #define sc_rxdescs      sc_control_data->pcd_rxdescs
  277 #define sc_initblock    sc_control_data->pcd_initblock
  278 
  279 #ifdef PCN_EVENT_COUNTERS
  280         /* Event counters. */
  281         struct evcnt sc_ev_txsstall;    /* Tx stalled due to no txs */
  282         struct evcnt sc_ev_txdstall;    /* Tx stalled due to no txd */
  283         struct evcnt sc_ev_txintr;      /* Tx interrupts */
  284         struct evcnt sc_ev_rxintr;      /* Rx interrupts */
  285         struct evcnt sc_ev_babl;        /* BABL in pcn_intr() */
  286         struct evcnt sc_ev_miss;        /* MISS in pcn_intr() */
  287         struct evcnt sc_ev_merr;        /* MERR in pcn_intr() */
  288 
  289         struct evcnt sc_ev_txseg1;      /* Tx packets w/ 1 segment */
  290         struct evcnt sc_ev_txseg2;      /* Tx packets w/ 2 segments */
  291         struct evcnt sc_ev_txseg3;      /* Tx packets w/ 3 segments */
  292         struct evcnt sc_ev_txseg4;      /* Tx packets w/ 4 segments */
  293         struct evcnt sc_ev_txseg5;      /* Tx packets w/ 5 segments */
  294         struct evcnt sc_ev_txsegmore;   /* Tx packets w/ more than 5 segments */
  295         struct evcnt sc_ev_txcopy;      /* Tx copies required */
  296 #endif /* PCN_EVENT_COUNTERS */
  297 
  298         const char * const *sc_rcvfw_desc;      /* Rx FIFO watermark info */
  299         int sc_rcvfw;
  300 
  301         const char * const *sc_xmtsp_desc;      /* Tx start point info */
  302         int sc_xmtsp;
  303 
  304         const char * const *sc_xmtfw_desc;      /* Tx FIFO watermark info */
  305         int sc_xmtfw;
  306 
  307         int sc_flags;                   /* misc. flags; see below */
  308         int sc_swstyle;                 /* the software style in use */
  309 
  310         int sc_txfree;                  /* number of free Tx descriptors */
  311         int sc_txnext;                  /* next ready Tx descriptor */
  312 
  313         int sc_txsfree;                 /* number of free Tx jobs */
  314         int sc_txsnext;                 /* next free Tx job */
  315         int sc_txsdirty;                /* dirty Tx jobs */
  316 
  317         int sc_rxptr;                   /* next ready Rx descriptor/job */
  318 
  319         uint32_t sc_csr5;               /* prototype CSR5 register */
  320         uint32_t sc_mode;               /* prototype MODE register */
  321         int sc_phyaddr;                 /* PHY address */
  322 
  323 #if NRND > 0
  324         rndsource_element_t rnd_source; /* random source */
  325 #endif
  326 };
  327 
  328 /* sc_flags */
  329 #define PCN_F_HAS_MII           0x0001  /* has MII */
  330 
  331 #ifdef PCN_EVENT_COUNTERS
  332 #define PCN_EVCNT_INCR(ev)      (ev)->ev_count++
  333 #else
  334 #define PCN_EVCNT_INCR(ev)      /* nothing */
  335 #endif
  336 
  337 #define PCN_CDTXADDR(sc, x)     ((sc)->sc_cddma + PCN_CDTXOFF((x)))
  338 #define PCN_CDRXADDR(sc, x)     ((sc)->sc_cddma + PCN_CDRXOFF((x)))
  339 #define PCN_CDINITADDR(sc)      ((sc)->sc_cddma + PCN_CDINITOFF)
  340 
  341 #define PCN_CDTXSYNC(sc, x, n, ops)                                     \
  342 do {                                                                    \
  343         int __x, __n;                                                   \
  344                                                                         \
  345         __x = (x);                                                      \
  346         __n = (n);                                                      \
  347                                                                         \
  348         /* If it will wrap around, sync to the end of the ring. */      \
  349         if ((__x + __n) > PCN_NTXDESC) {                                \
  350                 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,       \
  351                     PCN_CDTXOFF(__x), sizeof(struct letmd) *            \
  352                     (PCN_NTXDESC - __x), (ops));                        \
  353                 __n -= (PCN_NTXDESC - __x);                             \
  354                 __x = 0;                                                \
  355         }                                                               \
  356                                                                         \
  357         /* Now sync whatever is left. */                                \
  358         bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,               \
  359             PCN_CDTXOFF(__x), sizeof(struct letmd) * __n, (ops));       \
  360 } while (/*CONSTCOND*/0)
  361 
  362 #define PCN_CDRXSYNC(sc, x, ops)                                        \
  363         bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,               \
  364             PCN_CDRXOFF((x)), sizeof(struct lermd), (ops))
  365 
  366 #define PCN_CDINITSYNC(sc, ops)                                         \
  367         bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,               \
  368             PCN_CDINITOFF, sizeof(struct leinit), (ops))
  369 
  370 #define PCN_INIT_RXDESC(sc, x)                                          \
  371 do {                                                                    \
  372         struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];               \
  373         struct lermd *__rmd = &(sc)->sc_rxdescs[(x)];                   \
  374         struct mbuf *__m = __rxs->rxs_mbuf;                             \
  375                                                                         \
  376         /*                                                              \
  377          * Note: We scoot the packet forward 2 bytes in the buffer      \
  378          * so that the payload after the Ethernet header is aligned     \
  379          * to a 4-byte boundary.                                        \
  380          */                                                             \
  381         __m->m_data = __m->m_ext.ext_buf + 2;                           \
  382                                                                         \
  383         if ((sc)->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) {              \
  384                 __rmd->rmd2 =                                           \
  385                     htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
  386                 __rmd->rmd0 = 0;                                        \
  387         } else {                                                        \
  388                 __rmd->rmd2 = 0;                                        \
  389                 __rmd->rmd0 =                                           \
  390                     htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
  391         }                                                               \
  392         __rmd->rmd1 = htole32(LE_R1_OWN|LE_R1_ONES|                     \
  393             (LE_BCNT(MCLBYTES - 2) & LE_R1_BCNT_MASK));                 \
  394         PCN_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);\
  395 } while(/*CONSTCOND*/0)
  396 
  397 void    pcn_start(struct ifnet *);
  398 void    pcn_watchdog(struct ifnet *);
  399 int     pcn_ioctl(struct ifnet *, u_long, caddr_t);
  400 int     pcn_init(struct ifnet *);
  401 void    pcn_stop(struct ifnet *, int);
  402 
  403 void    pcn_shutdown(void *);
  404 
  405 void    pcn_reset(struct pcn_softc *);
  406 void    pcn_rxdrain(struct pcn_softc *);
  407 int     pcn_add_rxbuf(struct pcn_softc *, int);
  408 void    pcn_tick(void *);
  409 
  410 void    pcn_spnd(struct pcn_softc *);
  411 
  412 void    pcn_set_filter(struct pcn_softc *);
  413 
  414 int     pcn_intr(void *);
  415 void    pcn_txintr(struct pcn_softc *);
  416 int     pcn_rxintr(struct pcn_softc *);
  417 
  418 int     pcn_mii_readreg(struct device *, int, int);
  419 void    pcn_mii_writereg(struct device *, int, int, int);
  420 void    pcn_mii_statchg(struct device *);
  421 
  422 void    pcn_79c970_mediainit(struct pcn_softc *);
  423 int     pcn_79c970_mediachange(struct ifnet *);
  424 void    pcn_79c970_mediastatus(struct ifnet *, struct ifmediareq *);
  425 
  426 void    pcn_79c971_mediainit(struct pcn_softc *);
  427 int     pcn_79c971_mediachange(struct ifnet *);
  428 void    pcn_79c971_mediastatus(struct ifnet *, struct ifmediareq *);
  429 
  430 /*
  431  * Description of a PCnet-PCI variant.  Used to select media access
  432  * method, mostly, and to print a nice description of the chip.
  433  */
  434 const struct pcn_variant {
  435         const char *pcv_desc;
  436         void (*pcv_mediainit)(struct pcn_softc *);
  437         uint16_t pcv_chipid;
  438 } pcn_variants[] = {
  439         { "Am79c970 PCnet-PCI",
  440           pcn_79c970_mediainit,
  441           PARTID_Am79c970 },
  442 
  443         { "Am79c970A PCnet-PCI II",
  444           pcn_79c970_mediainit,
  445           PARTID_Am79c970A },
  446 
  447         { "Am79c971 PCnet-FAST",
  448           pcn_79c971_mediainit,
  449           PARTID_Am79c971 },
  450 
  451         { "Am79c972 PCnet-FAST+",
  452           pcn_79c971_mediainit,
  453           PARTID_Am79c972 },
  454 
  455         { "Am79c973 PCnet-FAST III",
  456           pcn_79c971_mediainit,
  457           PARTID_Am79c973 },
  458 
  459         { "Am79c975 PCnet-FAST III",
  460           pcn_79c971_mediainit,
  461           PARTID_Am79c975 },
  462 
  463         { "Unknown PCnet-PCI variant",
  464           pcn_79c971_mediainit,
  465           0 },
  466 };
  467 
  468 int     pcn_copy_small = 0;
  469 
  470 int     pcn_match(struct device *, struct cfdata *, void *);
  471 void    pcn_attach(struct device *, struct device *, void *);
  472 
  473 CFATTACH_DECL(pcn, sizeof(struct pcn_softc),
  474     pcn_match, pcn_attach, NULL, NULL);
  475 
  476 /*
  477  * Routines to read and write the PCnet-PCI CSR/BCR space.
  478  */
  479 
  480 static __inline uint32_t
  481 pcn_csr_read(struct pcn_softc *sc, int reg)
  482 {
  483 
  484         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
  485         return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RDP));
  486 }
  487 
  488 static __inline void
  489 pcn_csr_write(struct pcn_softc *sc, int reg, uint32_t val)
  490 {
  491 
  492         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
  493         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, val);
  494 }
  495 
  496 static __inline uint32_t
  497 pcn_bcr_read(struct pcn_softc *sc, int reg)
  498 {
  499 
  500         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
  501         return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_BDP));
  502 }
  503 
  504 static __inline void
  505 pcn_bcr_write(struct pcn_softc *sc, int reg, uint32_t val)
  506 {
  507 
  508         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
  509         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_BDP, val);
  510 }
  511 
  512 static const struct pcn_variant *
  513 pcn_lookup_variant(uint16_t chipid)
  514 {
  515         const struct pcn_variant *pcv;
  516 
  517         for (pcv = pcn_variants; pcv->pcv_chipid != 0; pcv++) {
  518                 if (chipid == pcv->pcv_chipid)
  519                         return (pcv);
  520         }
  521 
  522         /*
  523          * This covers unknown chips, which we simply treat like
  524          * a generic PCnet-FAST.
  525          */
  526         return (pcv);
  527 }
  528 
  529 int
  530 pcn_match(struct device *parent, struct cfdata *cf, void *aux)
  531 {
  532         struct pci_attach_args *pa = aux;
  533 
  534         if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_AMD)
  535                 return (0);
  536 
  537         switch (PCI_PRODUCT(pa->pa_id)) {
  538         case PCI_PRODUCT_AMD_PCNET_PCI:
  539                 /* Beat if_le_pci.c */
  540                 return (10);
  541         }
  542 
  543         return (0);
  544 }
  545 
  546 void
  547 pcn_attach(struct device *parent, struct device *self, void *aux)
  548 {
  549         struct pcn_softc *sc = (struct pcn_softc *) self;
  550         struct pci_attach_args *pa = aux;
  551         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
  552         pci_chipset_tag_t pc = pa->pa_pc;
  553         pci_intr_handle_t ih;
  554         const char *intrstr = NULL;
  555         bus_space_tag_t iot, memt;
  556         bus_space_handle_t ioh, memh;
  557         bus_dma_segment_t seg;
  558         int ioh_valid, memh_valid;
  559         int i, rseg, error;
  560         pcireg_t pmode;
  561         uint32_t chipid, reg;
  562         uint8_t enaddr[ETHER_ADDR_LEN];
  563         int pmreg;
  564 
  565         callout_init(&sc->sc_tick_ch);
  566 
  567         printf(": AMD PCnet-PCI Ethernet\n");
  568 
  569         /*
  570          * Map the device.
  571          */
  572         ioh_valid = (pci_mapreg_map(pa, PCN_PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
  573             &iot, &ioh, NULL, NULL) == 0);
  574         memh_valid = (pci_mapreg_map(pa, PCN_PCI_CBMEM,
  575             PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
  576             &memt, &memh, NULL, NULL) == 0);
  577 
  578         if (memh_valid) {
  579                 sc->sc_st = memt;
  580                 sc->sc_sh = memh;
  581         } else if (ioh_valid) {
  582                 sc->sc_st = iot;
  583                 sc->sc_sh = ioh;
  584         } else {
  585                 printf("%s: unable to map device registers\n",
  586                     sc->sc_dev.dv_xname);
  587                 return;
  588         }
  589 
  590         sc->sc_dmat = pa->pa_dmat;
  591 
  592         /* Make sure bus mastering is enabled. */
  593         pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
  594             pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
  595             PCI_COMMAND_MASTER_ENABLE);
  596 
  597         /* Get it out of power save mode, if needed. */
  598         if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
  599                 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
  600                     PCI_PMCSR_STATE_MASK;
  601                 if (pmode == PCI_PMCSR_STATE_D3) {
  602                         /*
  603                          * The card has lost all configuration data in
  604                          * this state, so punt.
  605                          */
  606                         printf("%s: unable to wake from power state D3\n",
  607                             sc->sc_dev.dv_xname);
  608                         return;
  609                 }
  610                 if (pmode != PCI_PMCSR_STATE_D0) {
  611                         printf("%s: waking up from power date D%d\n",
  612                             sc->sc_dev.dv_xname, pmode);
  613                         pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
  614                             PCI_PMCSR_STATE_D0);
  615                 }
  616         }
  617 
  618         /*
  619          * Reset the chip to a known state.  This also puts the
  620          * chip into 32-bit mode.
  621          */
  622         pcn_reset(sc);
  623 
  624 #if !defined(PCN_NO_PROM)
  625 
  626         /*
  627          * Read the Ethernet address from the EEPROM.
  628          */
  629         for (i = 0; i < ETHER_ADDR_LEN; i++)
  630                 enaddr[i] = bus_space_read_1(sc->sc_st, sc->sc_sh,
  631                     PCN32_APROM + i);
  632 #else
  633         /*
  634          * The PROM is not used; instead we assume that the MAC address
  635          * has been programmed into the device's physical address
  636          * registers by the boot firmware
  637          */
  638 
  639         for (i=0; i < 3; i++) {
  640                 uint32_t val;
  641                 val = pcn_csr_read(sc, LE_CSR12 + i);
  642                 enaddr[2*i] = val & 0x0ff;
  643                 enaddr[2*i+1] = (val >> 8) & 0x0ff;
  644         }
  645 #endif
  646 
  647         /*
  648          * Now that the device is mapped, attempt to figure out what
  649          * kind of chip we have.  Note that IDL has all 32 bits of
  650          * the chip ID when we're in 32-bit mode.
  651          */
  652         chipid = pcn_csr_read(sc, LE_CSR88);
  653         sc->sc_variant = pcn_lookup_variant(CHIPID_PARTID(chipid));
  654 
  655         printf("%s: %s rev %d, Ethernet address %s\n",
  656             sc->sc_dev.dv_xname, sc->sc_variant->pcv_desc, CHIPID_VER(chipid),
  657             ether_sprintf(enaddr));
  658 
  659         /*
  660          * Map and establish our interrupt.
  661          */
  662         if (pci_intr_map(pa, &ih)) {
  663                 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
  664                 return;
  665         }
  666         intrstr = pci_intr_string(pc, ih);
  667         sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, pcn_intr, sc);
  668         if (sc->sc_ih == NULL) {
  669                 printf("%s: unable to establish interrupt",
  670                     sc->sc_dev.dv_xname);
  671                 if (intrstr != NULL)
  672                         printf(" at %s", intrstr);
  673                 printf("\n");
  674                 return;
  675         }
  676         printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
  677 
  678         /*
  679          * Allocate the control data structures, and create and load the
  680          * DMA map for it.
  681          */
  682         if ((error = bus_dmamem_alloc(sc->sc_dmat,
  683              sizeof(struct pcn_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
  684              0)) != 0) {
  685                 printf("%s: unable to allocate control data, error = %d\n",
  686                     sc->sc_dev.dv_xname, error);
  687                 goto fail_0;
  688         }
  689 
  690         if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
  691              sizeof(struct pcn_control_data), (caddr_t *)&sc->sc_control_data,
  692              BUS_DMA_COHERENT)) != 0) {
  693                 printf("%s: unable to map control data, error = %d\n",
  694                     sc->sc_dev.dv_xname, error);
  695                 goto fail_1;
  696         }
  697 
  698         if ((error = bus_dmamap_create(sc->sc_dmat,
  699              sizeof(struct pcn_control_data), 1,
  700              sizeof(struct pcn_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
  701                 printf("%s: unable to create control data DMA map, "
  702                     "error = %d\n", sc->sc_dev.dv_xname, error);
  703                 goto fail_2;
  704         }
  705 
  706         if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
  707              sc->sc_control_data, sizeof(struct pcn_control_data), NULL,
  708              0)) != 0) {
  709                 printf("%s: unable to load control data DMA map, error = %d\n",
  710                     sc->sc_dev.dv_xname, error);
  711                 goto fail_3;
  712         }
  713 
  714         /* Create the transmit buffer DMA maps. */
  715         for (i = 0; i < PCN_TXQUEUELEN; i++) {
  716                 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
  717                      PCN_NTXSEGS, MCLBYTES, 0, 0,
  718                      &sc->sc_txsoft[i].txs_dmamap)) != 0) {
  719                         printf("%s: unable to create tx DMA map %d, "
  720                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  721                         goto fail_4;
  722                 }
  723         }
  724 
  725         /* Create the receive buffer DMA maps. */
  726         for (i = 0; i < PCN_NRXDESC; i++) {
  727                 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
  728                      MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
  729                         printf("%s: unable to create rx DMA map %d, "
  730                             "error = %d\n", sc->sc_dev.dv_xname, i, error);
  731                         goto fail_5;
  732                 }
  733                 sc->sc_rxsoft[i].rxs_mbuf = NULL;
  734         }
  735 
  736         /* Initialize our media structures. */
  737         (*sc->sc_variant->pcv_mediainit)(sc);
  738 
  739         /*
  740          * Initialize FIFO watermark info.
  741          */
  742         switch (sc->sc_variant->pcv_chipid) {
  743         case PARTID_Am79c970:
  744         case PARTID_Am79c970A:
  745                 sc->sc_rcvfw_desc = pcn_79c970_rcvfw;
  746                 sc->sc_xmtsp_desc = pcn_79c970_xmtsp;
  747                 sc->sc_xmtfw_desc = pcn_79c970_xmtfw;
  748                 break;
  749 
  750         default:
  751                 sc->sc_rcvfw_desc = pcn_79c971_rcvfw; 
  752                 /*
  753                  * Read BCR25 to determine how much SRAM is
  754                  * on the board.  If > 0, then we the chip
  755                  * uses different Start Point thresholds.
  756                  *
  757                  * Note BCR25 and BCR26 are loaded from the
  758                  * EEPROM on RST, and unaffected by S_RESET,
  759                  * so we don't really have to worry about
  760                  * them except for this.
  761                  */
  762                 reg = pcn_bcr_read(sc, LE_BCR25) & 0x00ff;
  763                 if (reg != 0)
  764                         sc->sc_xmtsp_desc = pcn_79c971_xmtsp_sram;
  765                 else
  766                         sc->sc_xmtsp_desc = pcn_79c971_xmtsp;
  767                 sc->sc_xmtfw_desc = pcn_79c971_xmtfw;
  768                 break;
  769         }
  770 
  771         /*
  772          * Set up defaults -- see the tables above for what these
  773          * values mean.
  774          *
  775          * XXX How should we tune RCVFW and XMTFW?
  776          */
  777         sc->sc_rcvfw = 1;       /* minimum for full-duplex */
  778         sc->sc_xmtsp = 1;
  779         sc->sc_xmtfw = 0;
  780 
  781         ifp = &sc->sc_ethercom.ec_if;
  782         strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
  783         ifp->if_softc = sc;
  784         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  785         ifp->if_ioctl = pcn_ioctl;
  786         ifp->if_start = pcn_start;
  787         ifp->if_watchdog = pcn_watchdog;
  788         ifp->if_init = pcn_init;
  789         ifp->if_stop = pcn_stop;
  790         IFQ_SET_READY(&ifp->if_snd);
  791 
  792         /* Attach the interface. */
  793         if_attach(ifp); 
  794         ether_ifattach(ifp, enaddr);
  795 #if NRND > 0
  796         rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
  797             RND_TYPE_NET, 0);
  798 #endif
  799 
  800 #ifdef PCN_EVENT_COUNTERS
  801         /* Attach event counters. */
  802         evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
  803             NULL, sc->sc_dev.dv_xname, "txsstall");
  804         evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
  805             NULL, sc->sc_dev.dv_xname, "txdstall");
  806         evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
  807             NULL, sc->sc_dev.dv_xname, "txintr");
  808         evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
  809             NULL, sc->sc_dev.dv_xname, "rxintr");
  810         evcnt_attach_dynamic(&sc->sc_ev_babl, EVCNT_TYPE_MISC,
  811             NULL, sc->sc_dev.dv_xname, "babl");
  812         evcnt_attach_dynamic(&sc->sc_ev_miss, EVCNT_TYPE_MISC,
  813             NULL, sc->sc_dev.dv_xname, "miss");
  814         evcnt_attach_dynamic(&sc->sc_ev_merr, EVCNT_TYPE_MISC,
  815             NULL, sc->sc_dev.dv_xname, "merr");
  816 
  817         evcnt_attach_dynamic(&sc->sc_ev_txseg1, EVCNT_TYPE_MISC,
  818             NULL, sc->sc_dev.dv_xname, "txseg1");
  819         evcnt_attach_dynamic(&sc->sc_ev_txseg2, EVCNT_TYPE_MISC,
  820             NULL, sc->sc_dev.dv_xname, "txseg2");
  821         evcnt_attach_dynamic(&sc->sc_ev_txseg3, EVCNT_TYPE_MISC,
  822             NULL, sc->sc_dev.dv_xname, "txseg3");
  823         evcnt_attach_dynamic(&sc->sc_ev_txseg4, EVCNT_TYPE_MISC,
  824             NULL, sc->sc_dev.dv_xname, "txseg4");
  825         evcnt_attach_dynamic(&sc->sc_ev_txseg5, EVCNT_TYPE_MISC,
  826             NULL, sc->sc_dev.dv_xname, "txseg5");
  827         evcnt_attach_dynamic(&sc->sc_ev_txsegmore, EVCNT_TYPE_MISC,
  828             NULL, sc->sc_dev.dv_xname, "txsegmore");
  829         evcnt_attach_dynamic(&sc->sc_ev_txcopy, EVCNT_TYPE_MISC,
  830             NULL, sc->sc_dev.dv_xname, "txcopy");
  831 #endif /* PCN_EVENT_COUNTERS */
  832 
  833         /* Make sure the interface is shutdown during reboot. */
  834         sc->sc_sdhook = shutdownhook_establish(pcn_shutdown, sc);
  835         if (sc->sc_sdhook == NULL)
  836                 printf("%s: WARNING: unable to establish shutdown hook\n",
  837                     sc->sc_dev.dv_xname);
  838         return;
  839 
  840         /*
  841          * Free any resources we've allocated during the failed attach
  842          * attempt.  Do this in reverse order and fall through.
  843          */
  844  fail_5:
  845         for (i = 0; i < PCN_NRXDESC; i++) {
  846                 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
  847                         bus_dmamap_destroy(sc->sc_dmat,
  848                             sc->sc_rxsoft[i].rxs_dmamap);
  849         }
  850  fail_4:
  851         for (i = 0; i < PCN_TXQUEUELEN; i++) {
  852                 if (sc->sc_txsoft[i].txs_dmamap != NULL)
  853                         bus_dmamap_destroy(sc->sc_dmat,
  854                             sc->sc_txsoft[i].txs_dmamap);
  855         }
  856         bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
  857  fail_3:
  858         bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
  859  fail_2:
  860         bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
  861             sizeof(struct pcn_control_data));
  862  fail_1:
  863         bus_dmamem_free(sc->sc_dmat, &seg, rseg);
  864  fail_0:
  865         return;
  866 }
  867 
  868 /*
  869  * pcn_shutdown:
  870  *
  871  *      Make sure the interface is stopped at reboot time.
  872  */
  873 void
  874 pcn_shutdown(void *arg)
  875 {
  876         struct pcn_softc *sc = arg;
  877 
  878         pcn_stop(&sc->sc_ethercom.ec_if, 1);
  879 }
  880 
  881 /*
  882  * pcn_start:           [ifnet interface function]
  883  *
  884  *      Start packet transmission on the interface.
  885  */
  886 void
  887 pcn_start(struct ifnet *ifp)
  888 {
  889         struct pcn_softc *sc = ifp->if_softc;
  890         struct mbuf *m0, *m;
  891         struct pcn_txsoft *txs;
  892         bus_dmamap_t dmamap;
  893         int error, nexttx, lasttx = -1, ofree, seg;
  894 
  895         if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
  896                 return;
  897 
  898         /*
  899          * Remember the previous number of free descriptors and
  900          * the first descriptor we'll use.
  901          */
  902         ofree = sc->sc_txfree;
  903 
  904         /*
  905          * Loop through the send queue, setting up transmit descriptors
  906          * until we drain the queue, or use up all available transmit
  907          * descriptors.
  908          */
  909         for (;;) {
  910                 /* Grab a packet off the queue. */
  911                 IFQ_POLL(&ifp->if_snd, m0);
  912                 if (m0 == NULL)
  913                         break;
  914                 m = NULL;
  915 
  916                 /* Get a work queue entry. */
  917                 if (sc->sc_txsfree == 0) {
  918                         PCN_EVCNT_INCR(&sc->sc_ev_txsstall);
  919                         break;
  920                 }
  921 
  922                 txs = &sc->sc_txsoft[sc->sc_txsnext];
  923                 dmamap = txs->txs_dmamap;
  924 
  925                 /*
  926                  * Load the DMA map.  If this fails, the packet either
  927                  * didn't fit in the alloted number of segments, or we
  928                  * were short on resources.  In this case, we'll copy
  929                  * and try again.
  930                  */
  931                 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
  932                     BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
  933                         PCN_EVCNT_INCR(&sc->sc_ev_txcopy);
  934                         MGETHDR(m, M_DONTWAIT, MT_DATA);
  935                         if (m == NULL) { 
  936                                 printf("%s: unable to allocate Tx mbuf\n",
  937                                     sc->sc_dev.dv_xname);
  938                                 break;
  939                         }
  940                         if (m0->m_pkthdr.len > MHLEN) {
  941                                 MCLGET(m, M_DONTWAIT);
  942                                 if ((m->m_flags & M_EXT) == 0) {
  943                                         printf("%s: unable to allocate Tx "
  944                                             "cluster\n", sc->sc_dev.dv_xname);
  945                                         m_freem(m);
  946                                         break;
  947                                 }
  948                         }
  949                         m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
  950                         m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
  951                         error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
  952                             m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
  953                         if (error) {
  954                                 printf("%s: unable to load Tx buffer, "
  955                                     "error = %d\n", sc->sc_dev.dv_xname, error);
  956                                 break;
  957                         }
  958                 }
  959 
  960                 /*
  961                  * Ensure we have enough descriptors free to describe
  962                  * the packet.  Note, we always reserve one descriptor
  963                  * at the end of the ring as a termination point, to
  964                  * prevent wrap-around.
  965                  */
  966                 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
  967                         /*
  968                          * Not enough free descriptors to transmit this
  969                          * packet.  We haven't committed anything yet,
  970                          * so just unload the DMA map, put the packet
  971                          * back on the queue, and punt.  Notify the upper
  972                          * layer that there are not more slots left.
  973                          *
  974                          * XXX We could allocate an mbuf and copy, but
  975                          * XXX is it worth it?
  976                          */
  977                         ifp->if_flags |= IFF_OACTIVE;
  978                         bus_dmamap_unload(sc->sc_dmat, dmamap);
  979                         if (m != NULL)
  980                                 m_freem(m);
  981                         PCN_EVCNT_INCR(&sc->sc_ev_txdstall);
  982                         break;
  983                 }
  984 
  985                 IFQ_DEQUEUE(&ifp->if_snd, m0);
  986                 if (m != NULL) {
  987                         m_freem(m0);
  988                         m0 = m;
  989                 }
  990 
  991                 /*
  992                  * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
  993                  */
  994 
  995                 /* Sync the DMA map. */
  996                 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
  997                     BUS_DMASYNC_PREWRITE);
  998 
  999 #ifdef PCN_EVENT_COUNTERS
 1000                 switch (dmamap->dm_nsegs) {
 1001                 case 1:
 1002                         PCN_EVCNT_INCR(&sc->sc_ev_txseg1);
 1003                         break;
 1004                 case 2:
 1005                         PCN_EVCNT_INCR(&sc->sc_ev_txseg2);
 1006                         break;
 1007                 case 3:
 1008                         PCN_EVCNT_INCR(&sc->sc_ev_txseg3);
 1009                         break;
 1010                 case 4:
 1011                         PCN_EVCNT_INCR(&sc->sc_ev_txseg4);
 1012                         break;
 1013                 case 5:
 1014                         PCN_EVCNT_INCR(&sc->sc_ev_txseg5);
 1015                         break;
 1016                 default:
 1017                         PCN_EVCNT_INCR(&sc->sc_ev_txsegmore);
 1018                         break;
 1019                 }
 1020 #endif /* PCN_EVENT_COUNTERS */
 1021 
 1022                 /*
 1023                  * Initialize the transmit descriptors.
 1024                  */
 1025                 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) {
 1026                         for (nexttx = sc->sc_txnext, seg = 0;
 1027                              seg < dmamap->dm_nsegs;
 1028                              seg++, nexttx = PCN_NEXTTX(nexttx)) {
 1029                                 /*
 1030                                  * If this is the first descriptor we're
 1031                                  * enqueueing, don't set the OWN bit just
 1032                                  * yet.  That could cause a race condition.
 1033                                  * We'll do it below.
 1034                                  */
 1035                                 sc->sc_txdescs[nexttx].tmd0 = 0;
 1036                                 sc->sc_txdescs[nexttx].tmd2 =
 1037                                     htole32(dmamap->dm_segs[seg].ds_addr);
 1038                                 sc->sc_txdescs[nexttx].tmd1 =
 1039                                     htole32(LE_T1_ONES |
 1040                                     (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |
 1041                                     (LE_BCNT(dmamap->dm_segs[seg].ds_len) &
 1042                                      LE_T1_BCNT_MASK));
 1043                                 lasttx = nexttx;
 1044                         }
 1045                 } else {
 1046                         for (nexttx = sc->sc_txnext, seg = 0;
 1047                              seg < dmamap->dm_nsegs;
 1048                              seg++, nexttx = PCN_NEXTTX(nexttx)) {
 1049                                 /*
 1050                                  * If this is the first descriptor we're
 1051                                  * enqueueing, don't set the OWN bit just
 1052                                  * yet.  That could cause a race condition.
 1053                                  * We'll do it below.
 1054                                  */
 1055                                 sc->sc_txdescs[nexttx].tmd0 =
 1056                                     htole32(dmamap->dm_segs[seg].ds_addr);
 1057                                 sc->sc_txdescs[nexttx].tmd2 = 0;
 1058                                 sc->sc_txdescs[nexttx].tmd1 =
 1059                                     htole32(LE_T1_ONES |
 1060                                     (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |
 1061                                     (LE_BCNT(dmamap->dm_segs[seg].ds_len) &
 1062                                      LE_T1_BCNT_MASK));
 1063                                 lasttx = nexttx;
 1064                         }
 1065                 }
 1066 
 1067                 KASSERT(lasttx != -1);
 1068                 /* Interrupt on the packet, if appropriate. */
 1069                 if ((sc->sc_txsnext & PCN_TXINTR_MASK) == 0)
 1070                         sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_LTINT);
 1071 
 1072                 /* Set `start of packet' and `end of packet' appropriately. */
 1073                 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_ENP);
 1074                 sc->sc_txdescs[sc->sc_txnext].tmd1 |=
 1075                     htole32(LE_T1_OWN|LE_T1_STP);
 1076 
 1077                 /* Sync the descriptors we're using. */
 1078                 PCN_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
 1079                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1080 
 1081                 /* Kick the transmitter. */
 1082                 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_TDMD);
 1083 
 1084                 /*
 1085                  * Store a pointer to the packet so we can free it later,
 1086                  * and remember what txdirty will be once the packet is
 1087                  * done.
 1088                  */
 1089                 txs->txs_mbuf = m0;
 1090                 txs->txs_firstdesc = sc->sc_txnext;
 1091                 txs->txs_lastdesc = lasttx;
 1092 
 1093                 /* Advance the tx pointer. */
 1094                 sc->sc_txfree -= dmamap->dm_nsegs;
 1095                 sc->sc_txnext = nexttx;
 1096 
 1097                 sc->sc_txsfree--;
 1098                 sc->sc_txsnext = PCN_NEXTTXS(sc->sc_txsnext);
 1099 
 1100 #if NBPFILTER > 0
 1101                 /* Pass the packet to any BPF listeners. */
 1102                 if (ifp->if_bpf)
 1103                         bpf_mtap(ifp->if_bpf, m0);
 1104 #endif /* NBPFILTER > 0 */
 1105         }
 1106 
 1107         if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
 1108                 /* No more slots left; notify upper layer. */
 1109                 ifp->if_flags |= IFF_OACTIVE;
 1110         }
 1111 
 1112         if (sc->sc_txfree != ofree) {
 1113                 /* Set a watchdog timer in case the chip flakes out. */
 1114                 ifp->if_timer = 5;
 1115         }
 1116 }
 1117 
 1118 /*
 1119  * pcn_watchdog:        [ifnet interface function]
 1120  *
 1121  *      Watchdog timer handler.
 1122  */
 1123 void
 1124 pcn_watchdog(struct ifnet *ifp)
 1125 {
 1126         struct pcn_softc *sc = ifp->if_softc;
 1127 
 1128         /*
 1129          * Since we're not interrupting every packet, sweep
 1130          * up before we report an error.
 1131          */
 1132         pcn_txintr(sc);
 1133 
 1134         if (sc->sc_txfree != PCN_NTXDESC) {
 1135                 printf("%s: device timeout (txfree %d txsfree %d)\n",
 1136                     sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree);
 1137                 ifp->if_oerrors++;
 1138 
 1139                 /* Reset the interface. */
 1140                 (void) pcn_init(ifp);
 1141         }
 1142 
 1143         /* Try to get more packets going. */
 1144         pcn_start(ifp);
 1145 }
 1146 
 1147 /*
 1148  * pcn_ioctl:           [ifnet interface function]
 1149  *
 1150  *      Handle control requests from the operator.
 1151  */
 1152 int
 1153 pcn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1154 {
 1155         struct pcn_softc *sc = ifp->if_softc;
 1156         struct ifreq *ifr = (struct ifreq *) data;
 1157         int s, error;
 1158 
 1159         s = splnet();
 1160 
 1161         switch (cmd) {
 1162         case SIOCSIFMEDIA:
 1163         case SIOCGIFMEDIA:
 1164                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
 1165                 break;
 1166 
 1167         default:
 1168                 error = ether_ioctl(ifp, cmd, data);
 1169                 if (error == ENETRESET) {
 1170                         /*
 1171                          * Multicast list has changed; set the hardware filter
 1172                          * accordingly.
 1173                          */
 1174                         error = pcn_init(ifp);
 1175                 }
 1176                 break;
 1177         }
 1178 
 1179         /* Try to get more packets going. */
 1180         pcn_start(ifp);
 1181 
 1182         splx(s);
 1183         return (error);
 1184 }
 1185 
 1186 /*
 1187  * pcn_intr:
 1188  *
 1189  *      Interrupt service routine.
 1190  */
 1191 int
 1192 pcn_intr(void *arg)
 1193 {
 1194         struct pcn_softc *sc = arg;
 1195         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1196         uint32_t csr0;
 1197         int wantinit, handled = 0;
 1198 
 1199         for (wantinit = 0; wantinit == 0;) {
 1200                 csr0 = pcn_csr_read(sc, LE_CSR0);
 1201                 if ((csr0 & LE_C0_INTR) == 0)
 1202                         break;
 1203 
 1204 #if NRND > 0
 1205                 if (RND_ENABLED(&sc->rnd_source))
 1206                         rnd_add_uint32(&sc->rnd_source, csr0);
 1207 #endif
 1208 
 1209                 /* ACK the bits and re-enable interrupts. */
 1210                 pcn_csr_write(sc, LE_CSR0, csr0 &
 1211                     (LE_C0_INEA|LE_C0_BABL|LE_C0_MISS|LE_C0_MERR|LE_C0_RINT|
 1212                      LE_C0_TINT|LE_C0_IDON));
 1213 
 1214                 handled = 1;
 1215 
 1216                 if (csr0 & LE_C0_RINT) {
 1217                         PCN_EVCNT_INCR(&sc->sc_ev_rxintr);
 1218                         wantinit = pcn_rxintr(sc);
 1219                 }
 1220 
 1221                 if (csr0 & LE_C0_TINT) {
 1222                         PCN_EVCNT_INCR(&sc->sc_ev_txintr);
 1223                         pcn_txintr(sc);
 1224                 }
 1225 
 1226                 if (csr0 & LE_C0_ERR) {
 1227                         if (csr0 & LE_C0_BABL) {
 1228                                 PCN_EVCNT_INCR(&sc->sc_ev_babl);
 1229                                 ifp->if_oerrors++;
 1230                         }
 1231                         if (csr0 & LE_C0_MISS) {
 1232                                 PCN_EVCNT_INCR(&sc->sc_ev_miss);
 1233                                 ifp->if_ierrors++;
 1234                         }
 1235                         if (csr0 & LE_C0_MERR) {
 1236                                 PCN_EVCNT_INCR(&sc->sc_ev_merr);
 1237                                 printf("%s: memory error\n",
 1238                                     sc->sc_dev.dv_xname);
 1239                                 wantinit = 1;
 1240                                 break;
 1241                         }
 1242                 }
 1243 
 1244                 if ((csr0 & LE_C0_RXON) == 0) {
 1245                         printf("%s: receiver disabled\n",
 1246                             sc->sc_dev.dv_xname);
 1247                         ifp->if_ierrors++;
 1248                         wantinit = 1;
 1249                 }
 1250 
 1251                 if ((csr0 & LE_C0_TXON) == 0) {
 1252                         printf("%s: transmitter disabled\n",
 1253                             sc->sc_dev.dv_xname);
 1254                         ifp->if_oerrors++;
 1255                         wantinit = 1;
 1256                 }
 1257         }
 1258 
 1259         if (handled) {
 1260                 if (wantinit)
 1261                         pcn_init(ifp);
 1262 
 1263                 /* Try to get more packets going. */
 1264                 pcn_start(ifp);
 1265         }
 1266 
 1267         return (handled);
 1268 }
 1269 
 1270 /*
 1271  * pcn_spnd:
 1272  *
 1273  *      Suspend the chip.
 1274  */
 1275 void
 1276 pcn_spnd(struct pcn_softc *sc)
 1277 {
 1278         int i;
 1279 
 1280         pcn_csr_write(sc, LE_CSR5, sc->sc_csr5 | LE_C5_SPND);
 1281 
 1282         for (i = 0; i < 10000; i++) {
 1283                 if (pcn_csr_read(sc, LE_CSR5) & LE_C5_SPND)
 1284                         return;
 1285                 delay(5);
 1286         }
 1287 
 1288         printf("%s: WARNING: chip failed to enter suspended state\n",
 1289             sc->sc_dev.dv_xname);
 1290 }
 1291 
 1292 /*
 1293  * pcn_txintr:
 1294  *
 1295  *      Helper; handle transmit interrupts.
 1296  */
 1297 void
 1298 pcn_txintr(struct pcn_softc *sc)
 1299 {
 1300         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1301         struct pcn_txsoft *txs;
 1302         uint32_t tmd1, tmd2, tmd;
 1303         int i, j;
 1304 
 1305         ifp->if_flags &= ~IFF_OACTIVE;
 1306 
 1307         /*
 1308          * Go through our Tx list and free mbufs for those
 1309          * frames which have been transmitted.
 1310          */
 1311         for (i = sc->sc_txsdirty; sc->sc_txsfree != PCN_TXQUEUELEN;
 1312              i = PCN_NEXTTXS(i), sc->sc_txsfree++) {
 1313                 txs = &sc->sc_txsoft[i];
 1314 
 1315                 PCN_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
 1316                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1317 
 1318                 tmd1 = le32toh(sc->sc_txdescs[txs->txs_lastdesc].tmd1);
 1319                 if (tmd1 & LE_T1_OWN)
 1320                         break;
 1321 
 1322                 /*
 1323                  * Slightly annoying -- we have to loop through the
 1324                  * descriptors we've used looking for ERR, since it
 1325                  * can appear on any descriptor in the chain.
 1326                  */
 1327                 for (j = txs->txs_firstdesc;; j = PCN_NEXTTX(j)) {
 1328                         tmd = le32toh(sc->sc_txdescs[j].tmd1);
 1329                         if (tmd & LE_T1_ERR) {
 1330                                 ifp->if_oerrors++;
 1331                                 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
 1332                                         tmd2 = le32toh(sc->sc_txdescs[j].tmd0);
 1333                                 else
 1334                                         tmd2 = le32toh(sc->sc_txdescs[j].tmd2);
 1335                                 if (tmd2 & LE_T2_UFLO) {
 1336                                         if (sc->sc_xmtsp < LE_C80_XMTSP_MAX) {
 1337                                                 sc->sc_xmtsp++;
 1338                                                 printf("%s: transmit "
 1339                                                     "underrun; new threshold: "
 1340                                                     "%s\n",
 1341                                                     sc->sc_dev.dv_xname,
 1342                                                     sc->sc_xmtsp_desc[
 1343                                                     sc->sc_xmtsp]);
 1344                                                 pcn_spnd(sc);
 1345                                                 pcn_csr_write(sc, LE_CSR80,
 1346                                                     LE_C80_RCVFW(sc->sc_rcvfw) |
 1347                                                     LE_C80_XMTSP(sc->sc_xmtsp) |
 1348                                                     LE_C80_XMTFW(sc->sc_xmtfw));
 1349                                                 pcn_csr_write(sc, LE_CSR5,
 1350                                                     sc->sc_csr5);
 1351                                         } else {
 1352                                                 printf("%s: transmit "
 1353                                                     "underrun\n",
 1354                                                     sc->sc_dev.dv_xname);
 1355                                         }
 1356                                 } else if (tmd2 & LE_T2_BUFF) {
 1357                                         printf("%s: transmit buffer error\n",
 1358                                             sc->sc_dev.dv_xname);
 1359                                 }
 1360                                 if (tmd2 & LE_T2_LCOL)
 1361                                         ifp->if_collisions++;
 1362                                 if (tmd2 & LE_T2_RTRY)
 1363                                         ifp->if_collisions += 16;
 1364                                 goto next_packet;
 1365                         }
 1366                         if (j == txs->txs_lastdesc)
 1367                                 break;
 1368                 }
 1369                 if (tmd1 & LE_T1_ONE)
 1370                         ifp->if_collisions++;
 1371                 else if (tmd & LE_T1_MORE) {
 1372                         /* Real number is unknown. */
 1373                         ifp->if_collisions += 2;
 1374                 }
 1375                 ifp->if_opackets++;
 1376  next_packet:
 1377                 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
 1378                 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
 1379                     0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
 1380                 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
 1381                 m_freem(txs->txs_mbuf);
 1382                 txs->txs_mbuf = NULL;
 1383         }
 1384 
 1385         /* Update the dirty transmit buffer pointer. */
 1386         sc->sc_txsdirty = i;
 1387 
 1388         /*
 1389          * If there are no more pending transmissions, cancel the watchdog
 1390          * timer.
 1391          */
 1392         if (sc->sc_txsfree == PCN_TXQUEUELEN)
 1393                 ifp->if_timer = 0;
 1394 }
 1395 
 1396 /*
 1397  * pcn_rxintr:
 1398  *
 1399  *      Helper; handle receive interrupts.
 1400  */
 1401 int
 1402 pcn_rxintr(struct pcn_softc *sc)
 1403 {
 1404         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1405         struct pcn_rxsoft *rxs;
 1406         struct mbuf *m;
 1407         uint32_t rmd1;
 1408         int i, len;
 1409 
 1410         for (i = sc->sc_rxptr;; i = PCN_NEXTRX(i)) {
 1411                 rxs = &sc->sc_rxsoft[i];
 1412 
 1413                 PCN_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1414 
 1415                 rmd1 = le32toh(sc->sc_rxdescs[i].rmd1);
 1416 
 1417                 if (rmd1 & LE_R1_OWN)
 1418                         break;
 1419 
 1420                 /*
 1421                  * Check for errors and make sure the packet fit into
 1422                  * a single buffer.  We have structured this block of
 1423                  * code the way it is in order to compress it into
 1424                  * one test in the common case (no error).
 1425                  */
 1426                 if (__predict_false((rmd1 & (LE_R1_STP|LE_R1_ENP|LE_R1_ERR)) !=
 1427                     (LE_R1_STP|LE_R1_ENP))) {
 1428                         /* Make sure the packet is in a single buffer. */
 1429                         if ((rmd1 & (LE_R1_STP|LE_R1_ENP)) !=
 1430                             (LE_R1_STP|LE_R1_ENP)) {
 1431                                 printf("%s: packet spilled into next buffer\n",
 1432                                     sc->sc_dev.dv_xname);
 1433                                 return (1);     /* pcn_intr() will re-init */
 1434                         }
 1435 
 1436                         /*
 1437                          * If the packet had an error, simple recycle the
 1438                          * buffer.
 1439                          */
 1440                         if (rmd1 & LE_R1_ERR) {
 1441                                 ifp->if_ierrors++;
 1442                                 /*
 1443                                  * If we got an overflow error, chances
 1444                                  * are there will be a CRC error.  In
 1445                                  * this case, just print the overflow
 1446                                  * error, and skip the others.
 1447                                  */
 1448                                 if (rmd1 & LE_R1_OFLO)
 1449                                         printf("%s: overflow error\n",
 1450                                             sc->sc_dev.dv_xname);
 1451                                 else {
 1452 #define PRINTIT(x, str)                                                 \
 1453                                         if (rmd1 & (x))                 \
 1454                                                 printf("%s: %s\n",      \
 1455                                                     sc->sc_dev.dv_xname, str);
 1456                                         PRINTIT(LE_R1_FRAM, "framing error");
 1457                                         PRINTIT(LE_R1_CRC, "CRC error");
 1458                                         PRINTIT(LE_R1_BUFF, "buffer error");
 1459                                 }
 1460 #undef PRINTIT
 1461                                 PCN_INIT_RXDESC(sc, i);
 1462                                 continue;
 1463                         }
 1464                 }
 1465 
 1466                 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 1467                     rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
 1468 
 1469                 /*
 1470                  * No errors; receive the packet.
 1471                  */
 1472                 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
 1473                         len = le32toh(sc->sc_rxdescs[i].rmd0) & LE_R1_BCNT_MASK;
 1474                 else
 1475                         len = le32toh(sc->sc_rxdescs[i].rmd2) & LE_R1_BCNT_MASK;
 1476 
 1477                 /*
 1478                  * The LANCE family includes the CRC with every packet;
 1479                  * trim it off here.
 1480                  */
 1481                 len -= ETHER_CRC_LEN;
 1482 
 1483                 /*
 1484                  * If the packet is small enough to fit in a
 1485                  * single header mbuf, allocate one and copy
 1486                  * the data into it.  This greatly reduces
 1487                  * memory consumption when we receive lots
 1488                  * of small packets.
 1489                  *
 1490                  * Otherwise, we add a new buffer to the receive
 1491                  * chain.  If this fails, we drop the packet and
 1492                  * recycle the old buffer.
 1493                  */
 1494                 if (pcn_copy_small != 0 && len <= (MHLEN - 2)) {
 1495                         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1496                         if (m == NULL)
 1497                                 goto dropit;
 1498                         m->m_data += 2;
 1499                         memcpy(mtod(m, caddr_t),
 1500                             mtod(rxs->rxs_mbuf, caddr_t), len);
 1501                         PCN_INIT_RXDESC(sc, i);
 1502                         bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 1503                             rxs->rxs_dmamap->dm_mapsize,
 1504                             BUS_DMASYNC_PREREAD);
 1505                 } else {
 1506                         m = rxs->rxs_mbuf;
 1507                         if (pcn_add_rxbuf(sc, i) != 0) {
 1508  dropit:
 1509                                 ifp->if_ierrors++;
 1510                                 PCN_INIT_RXDESC(sc, i);
 1511                                 bus_dmamap_sync(sc->sc_dmat,
 1512                                     rxs->rxs_dmamap, 0,
 1513                                     rxs->rxs_dmamap->dm_mapsize,
 1514                                     BUS_DMASYNC_PREREAD);
 1515                                 continue;
 1516                         }
 1517                 }
 1518 
 1519                 m->m_pkthdr.rcvif = ifp;
 1520                 m->m_pkthdr.len = m->m_len = len;
 1521 
 1522 #if NBPFILTER > 0
 1523                 /* Pass this up to any BPF listeners. */
 1524                 if (ifp->if_bpf)
 1525                         bpf_mtap(ifp->if_bpf, m);
 1526 #endif /* NBPFILTER > 0 */
 1527 
 1528                 /* Pass it on. */
 1529                 (*ifp->if_input)(ifp, m);
 1530                 ifp->if_ipackets++;
 1531         }
 1532 
 1533         /* Update the receive pointer. */
 1534         sc->sc_rxptr = i;
 1535         return (0);
 1536 }
 1537 
 1538 /*
 1539  * pcn_tick:
 1540  *
 1541  *      One second timer, used to tick the MII.
 1542  */
 1543 void
 1544 pcn_tick(void *arg)
 1545 {
 1546         struct pcn_softc *sc = arg;
 1547         int s;
 1548 
 1549         s = splnet();
 1550         mii_tick(&sc->sc_mii);
 1551         splx(s);
 1552 
 1553         callout_reset(&sc->sc_tick_ch, hz, pcn_tick, sc);
 1554 }
 1555 
 1556 /*
 1557  * pcn_reset:
 1558  *
 1559  *      Perform a soft reset on the PCnet-PCI.
 1560  */
 1561 void
 1562 pcn_reset(struct pcn_softc *sc)
 1563 {
 1564 
 1565         /*
 1566          * The PCnet-PCI chip is reset by reading from the
 1567          * RESET register.  Note that while the NE2100 LANCE
 1568          * boards require a write after the read, the PCnet-PCI
 1569          * chips do not require this.
 1570          *
 1571          * Since we don't know if we're in 16-bit or 32-bit
 1572          * mode right now, issue both (it's safe) in the
 1573          * hopes that one will succeed.
 1574          */
 1575         (void) bus_space_read_2(sc->sc_st, sc->sc_sh, PCN16_RESET);
 1576         (void) bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RESET);
 1577 
 1578         /* Wait 1ms for it to finish. */
 1579         delay(1000);
 1580 
 1581         /*
 1582          * Select 32-bit I/O mode by issuing a 32-bit write to the
 1583          * RDP.  Since the RAP is 0 after a reset, writing a 0
 1584          * to RDP is safe (since it simply clears CSR0).
 1585          */
 1586         bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, 0);
 1587 }
 1588 
 1589 /*
 1590  * pcn_init:            [ifnet interface function]
 1591  *
 1592  *      Initialize the interface.  Must be called at splnet().
 1593  */
 1594 int
 1595 pcn_init(struct ifnet *ifp)
 1596 {
 1597         struct pcn_softc *sc = ifp->if_softc;
 1598         struct pcn_rxsoft *rxs;
 1599         uint8_t *enaddr = LLADDR(ifp->if_sadl);
 1600         int i, error = 0;
 1601         uint32_t reg;
 1602 
 1603         /* Cancel any pending I/O. */
 1604         pcn_stop(ifp, 0);
 1605 
 1606         /* Reset the chip to a known state. */
 1607         pcn_reset(sc);
 1608 
 1609         /*
 1610          * On the Am79c970, select SSTYLE 2, and SSTYLE 3 on everything
 1611          * else.
 1612          *
 1613          * XXX It'd be really nice to use SSTYLE 2 on all the chips,
 1614          * because the structure layout is compatible with ILACC,
 1615          * but the burst mode is only available in SSTYLE 3, and
 1616          * burst mode should provide some performance enhancement.
 1617          */
 1618         if (sc->sc_variant->pcv_chipid == PARTID_Am79c970)
 1619                 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI2;
 1620         else
 1621                 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI3;
 1622         pcn_bcr_write(sc, LE_BCR20, sc->sc_swstyle);
 1623 
 1624         /* Initialize the transmit descriptor ring. */
 1625         memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
 1626         PCN_CDTXSYNC(sc, 0, PCN_NTXDESC,
 1627             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1628         sc->sc_txfree = PCN_NTXDESC;
 1629         sc->sc_txnext = 0;
 1630 
 1631         /* Initialize the transmit job descriptors. */
 1632         for (i = 0; i < PCN_TXQUEUELEN; i++)
 1633                 sc->sc_txsoft[i].txs_mbuf = NULL;
 1634         sc->sc_txsfree = PCN_TXQUEUELEN;
 1635         sc->sc_txsnext = 0;
 1636         sc->sc_txsdirty = 0;
 1637 
 1638         /*
 1639          * Initialize the receive descriptor and receive job
 1640          * descriptor rings.
 1641          */
 1642         for (i = 0; i < PCN_NRXDESC; i++) {
 1643                 rxs = &sc->sc_rxsoft[i];
 1644                 if (rxs->rxs_mbuf == NULL) {
 1645                         if ((error = pcn_add_rxbuf(sc, i)) != 0) {
 1646                                 printf("%s: unable to allocate or map rx "
 1647                                     "buffer %d, error = %d\n",
 1648                                     sc->sc_dev.dv_xname, i, error);
 1649                                 /*
 1650                                  * XXX Should attempt to run with fewer receive
 1651                                  * XXX buffers instead of just failing.
 1652                                  */
 1653                                 pcn_rxdrain(sc);
 1654                                 goto out;
 1655                         }
 1656                 } else
 1657                         PCN_INIT_RXDESC(sc, i);
 1658         }
 1659         sc->sc_rxptr = 0;
 1660 
 1661         /* Initialize MODE for the initialization block. */
 1662         sc->sc_mode = 0;
 1663         if (ifp->if_flags & IFF_PROMISC)
 1664                 sc->sc_mode |= LE_C15_PROM;
 1665         if ((ifp->if_flags & IFF_BROADCAST) == 0)
 1666                 sc->sc_mode |= LE_C15_DRCVBC;
 1667 
 1668         /*
 1669          * If we have MII, simply select MII in the MODE register,
 1670          * and clear ASEL.  Otherwise, let ASEL stand (for now),
 1671          * and leave PORTSEL alone (it is ignored with ASEL is set).
 1672          */
 1673         if (sc->sc_flags & PCN_F_HAS_MII) {
 1674                 pcn_bcr_write(sc, LE_BCR2,
 1675                     pcn_bcr_read(sc, LE_BCR2) & ~LE_B2_ASEL);
 1676                 sc->sc_mode |= LE_C15_PORTSEL(PORTSEL_MII);
 1677 
 1678                 /*
 1679                  * Disable MII auto-negotiation.  We handle that in
 1680                  * our own MII layer.
 1681                  */
 1682                 pcn_bcr_write(sc, LE_BCR32,
 1683                     pcn_bcr_read(sc, LE_BCR32) | LE_B32_DANAS);
 1684         }
 1685 
 1686         /*
 1687          * Set the Tx and Rx descriptor ring addresses in the init
 1688          * block, the TLEN and RLEN other fields of the init block
 1689          * MODE register.
 1690          */
 1691         sc->sc_initblock.init_rdra = htole32(PCN_CDRXADDR(sc, 0));
 1692         sc->sc_initblock.init_tdra = htole32(PCN_CDTXADDR(sc, 0));
 1693         sc->sc_initblock.init_mode = htole32(sc->sc_mode |
 1694             ((ffs(PCN_NTXDESC) - 1) << 28) |
 1695             ((ffs(PCN_NRXDESC) - 1) << 20));
 1696 
 1697         /* Set the station address in the init block. */
 1698         sc->sc_initblock.init_padr[0] = htole32(enaddr[0] |
 1699             (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24));
 1700         sc->sc_initblock.init_padr[1] = htole32(enaddr[4] |
 1701             (enaddr[5] << 8));
 1702 
 1703         /* Set the multicast filter in the init block. */
 1704         pcn_set_filter(sc);
 1705 
 1706         /* Initialize CSR3. */
 1707         pcn_csr_write(sc, LE_CSR3, LE_C3_MISSM|LE_C3_IDONM|LE_C3_DXSUFLO);
 1708 
 1709         /* Initialize CSR4. */
 1710         pcn_csr_write(sc, LE_CSR4, LE_C4_DMAPLUS|LE_C4_APAD_XMT|
 1711             LE_C4_MFCOM|LE_C4_RCVCCOM|LE_C4_TXSTRTM);
 1712 
 1713         /* Initialize CSR5. */
 1714         sc->sc_csr5 = LE_C5_LTINTEN|LE_C5_SINTE;
 1715         pcn_csr_write(sc, LE_CSR5, sc->sc_csr5);
 1716 
 1717         /*
 1718          * If we have an Am79c971 or greater, initialize CSR7.
 1719          *
 1720          * XXX Might be nice to use the MII auto-poll interrupt someday.
 1721          */
 1722         switch (sc->sc_variant->pcv_chipid) {
 1723         case PARTID_Am79c970:
 1724         case PARTID_Am79c970A:
 1725                 /* Not available on these chips. */
 1726                 break;
 1727 
 1728         default:
 1729                 pcn_csr_write(sc, LE_CSR7, LE_C7_FASTSPNDE);
 1730                 break;
 1731         }
 1732 
 1733         /*
 1734          * On the Am79c970A and greater, initialize BCR18 to
 1735          * enable burst mode.
 1736          *
 1737          * Also enable the "no underflow" option on the Am79c971 and
 1738          * higher, which prevents the chip from generating transmit
 1739          * underflows, yet sill provides decent performance.  Note if
 1740          * chip is not connected to external SRAM, then we still have
 1741          * to handle underflow errors (the NOUFLO bit is ignored in
 1742          * that case).
 1743          */
 1744         reg = pcn_bcr_read(sc, LE_BCR18);
 1745         switch (sc->sc_variant->pcv_chipid) {
 1746         case PARTID_Am79c970:
 1747                 break;
 1748 
 1749         case PARTID_Am79c970A:
 1750                 reg |= LE_B18_BREADE|LE_B18_BWRITE;
 1751                 break;
 1752 
 1753         default:
 1754                 reg |= LE_B18_BREADE|LE_B18_BWRITE|LE_B18_NOUFLO;
 1755                 break;
 1756         }
 1757         pcn_bcr_write(sc, LE_BCR18, reg);
 1758 
 1759         /*
 1760          * Initialize CSR80 (FIFO thresholds for Tx and Rx).
 1761          */
 1762         pcn_csr_write(sc, LE_CSR80, LE_C80_RCVFW(sc->sc_rcvfw) |
 1763             LE_C80_XMTSP(sc->sc_xmtsp) | LE_C80_XMTFW(sc->sc_xmtfw));
 1764 
 1765         /*
 1766          * Send the init block to the chip, and wait for it
 1767          * to be processed.
 1768          */
 1769         PCN_CDINITSYNC(sc, BUS_DMASYNC_PREWRITE);
 1770         pcn_csr_write(sc, LE_CSR1, PCN_CDINITADDR(sc) & 0xffff);
 1771         pcn_csr_write(sc, LE_CSR2, (PCN_CDINITADDR(sc) >> 16) & 0xffff);
 1772         pcn_csr_write(sc, LE_CSR0, LE_C0_INIT);
 1773         delay(100);
 1774         for (i = 0; i < 10000; i++) {
 1775                 if (pcn_csr_read(sc, LE_CSR0) & LE_C0_IDON)
 1776                         break;
 1777                 delay(10);
 1778         }
 1779         PCN_CDINITSYNC(sc, BUS_DMASYNC_POSTWRITE);
 1780         if (i == 10000) {
 1781                 printf("%s: timeout processing init block\n",
 1782                     sc->sc_dev.dv_xname);
 1783                 error = EIO;
 1784                 goto out;
 1785         }
 1786 
 1787         /* Set the media. */
 1788         (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
 1789 
 1790         /* Enable interrupts and external activity (and ACK IDON). */
 1791         pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_STRT|LE_C0_IDON);
 1792 
 1793         if (sc->sc_flags & PCN_F_HAS_MII) {
 1794                 /* Start the one second MII clock. */
 1795                 callout_reset(&sc->sc_tick_ch, hz, pcn_tick, sc);
 1796         }
 1797 
 1798         /* ...all done! */
 1799         ifp->if_flags |= IFF_RUNNING;
 1800         ifp->if_flags &= ~IFF_OACTIVE;
 1801 
 1802  out:
 1803         if (error)
 1804                 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
 1805         return (error);
 1806 }
 1807 
 1808 /*
 1809  * pcn_rxdrain:
 1810  *
 1811  *      Drain the receive queue.
 1812  */
 1813 void
 1814 pcn_rxdrain(struct pcn_softc *sc)
 1815 {
 1816         struct pcn_rxsoft *rxs;
 1817         int i;
 1818 
 1819         for (i = 0; i < PCN_NRXDESC; i++) {
 1820                 rxs = &sc->sc_rxsoft[i];
 1821                 if (rxs->rxs_mbuf != NULL) {
 1822                         bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
 1823                         m_freem(rxs->rxs_mbuf);
 1824                         rxs->rxs_mbuf = NULL;
 1825                 }
 1826         }
 1827 }
 1828 
 1829 /*
 1830  * pcn_stop:            [ifnet interface function]
 1831  *
 1832  *      Stop transmission on the interface.
 1833  */
 1834 void
 1835 pcn_stop(struct ifnet *ifp, int disable)
 1836 {
 1837         struct pcn_softc *sc = ifp->if_softc;
 1838         struct pcn_txsoft *txs;
 1839         int i;
 1840 
 1841         if (sc->sc_flags & PCN_F_HAS_MII) {
 1842                 /* Stop the one second clock. */
 1843                 callout_stop(&sc->sc_tick_ch);
 1844 
 1845                 /* Down the MII. */
 1846                 mii_down(&sc->sc_mii);
 1847         }
 1848 
 1849         /* Stop the chip. */
 1850         pcn_csr_write(sc, LE_CSR0, LE_C0_STOP);
 1851 
 1852         /* Release any queued transmit buffers. */
 1853         for (i = 0; i < PCN_TXQUEUELEN; i++) {
 1854                 txs = &sc->sc_txsoft[i];
 1855                 if (txs->txs_mbuf != NULL) {
 1856                         bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
 1857                         m_freem(txs->txs_mbuf);
 1858                         txs->txs_mbuf = NULL;
 1859                 }
 1860         }
 1861 
 1862         if (disable)
 1863                 pcn_rxdrain(sc);
 1864 
 1865         /* Mark the interface as down and cancel the watchdog timer. */
 1866         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);  
 1867         ifp->if_timer = 0;
 1868 }
 1869 
 1870 /*
 1871  * pcn_add_rxbuf:
 1872  *
 1873  *      Add a receive buffer to the indicated descriptor.
 1874  */
 1875 int
 1876 pcn_add_rxbuf(struct pcn_softc *sc, int idx)
 1877 {
 1878         struct pcn_rxsoft *rxs = &sc->sc_rxsoft[idx];
 1879         struct mbuf *m;
 1880         int error;
 1881 
 1882         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1883         if (m == NULL)
 1884                 return (ENOBUFS);
 1885 
 1886         MCLGET(m, M_DONTWAIT);
 1887         if ((m->m_flags & M_EXT) == 0) {  
 1888                 m_freem(m);
 1889                 return (ENOBUFS); 
 1890         }
 1891 
 1892         if (rxs->rxs_mbuf != NULL)
 1893                 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
 1894 
 1895         rxs->rxs_mbuf = m;
 1896 
 1897         error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
 1898             m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
 1899             BUS_DMA_READ|BUS_DMA_NOWAIT);
 1900         if (error) {
 1901                 printf("%s: can't load rx DMA map %d, error = %d\n",
 1902                     sc->sc_dev.dv_xname, idx, error);
 1903                 panic("pcn_add_rxbuf");
 1904         }
 1905 
 1906         bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 1907             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 1908 
 1909         PCN_INIT_RXDESC(sc, idx);
 1910 
 1911         return (0);
 1912 }
 1913 
 1914 /*
 1915  * pcn_set_filter:
 1916  *
 1917  *      Set up the receive filter.
 1918  */
 1919 void
 1920 pcn_set_filter(struct pcn_softc *sc)
 1921 {
 1922         struct ethercom *ec = &sc->sc_ethercom;
 1923         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1924         struct ether_multi *enm;
 1925         struct ether_multistep step;
 1926         uint32_t crc;
 1927 
 1928         /*
 1929          * Set up the multicast address filter by passing all multicast
 1930          * addresses through a CRC generator, and then using the high
 1931          * order 6 bits as an index into the 64-bit logical address
 1932          * filter.  The high order bits select the word, while the rest
 1933          * of the bits select the bit within the word.
 1934          */
 1935         
 1936         if (ifp->if_flags & IFF_PROMISC)
 1937                 goto allmulti;
 1938 
 1939         sc->sc_initblock.init_ladrf[0] =
 1940             sc->sc_initblock.init_ladrf[1] =
 1941             sc->sc_initblock.init_ladrf[2] =
 1942             sc->sc_initblock.init_ladrf[3] = 0;
 1943         
 1944         ETHER_FIRST_MULTI(step, ec, enm);
 1945         while (enm != NULL) { 
 1946                 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
 1947                         /*
 1948                          * We must listen to a range of multicast addresses.
 1949                          * For now, just accept all multicasts, rather than
 1950                          * trying to set only those filter bits needed to match
 1951                          * the range.  (At this time, the only use of address
 1952                          * ranges is for IP multicast routing, for which the
 1953                          * range is big enough to require all bits set.)
 1954                          */
 1955                         goto allmulti;
 1956                 }
 1957 
 1958                 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
 1959 
 1960                 /* Just want the 6 most significant bits. */
 1961                 crc >>= 26;
 1962 
 1963                 /* Set the corresponding bit in the filter. */
 1964                 sc->sc_initblock.init_ladrf[crc >> 4] |=
 1965                     htole16(1 << (crc & 0xf));
 1966 
 1967                 ETHER_NEXT_MULTI(step, enm);
 1968         }
 1969 
 1970         ifp->if_flags &= ~IFF_ALLMULTI;
 1971         return;
 1972 
 1973  allmulti:
 1974         ifp->if_flags |= IFF_ALLMULTI;
 1975         sc->sc_initblock.init_ladrf[0] =
 1976             sc->sc_initblock.init_ladrf[1] =
 1977             sc->sc_initblock.init_ladrf[2] =
 1978             sc->sc_initblock.init_ladrf[3] = 0xffff;
 1979 }
 1980 
 1981 /*
 1982  * pcn_79c970_mediainit:
 1983  *
 1984  *      Initialize media for the Am79c970.
 1985  */
 1986 void
 1987 pcn_79c970_mediainit(struct pcn_softc *sc)
 1988 {
 1989         const char *sep = "";
 1990 
 1991         ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, pcn_79c970_mediachange,
 1992             pcn_79c970_mediastatus);
 1993 
 1994 #define ADD(str, m, d)                                                  \
 1995 do {                                                                    \
 1996         printf("%s%s", sep, str);                                       \
 1997         ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL);   \
 1998         sep = ", ";                                                     \
 1999 } while (/*CONSTCOND*/0)
 2000 
 2001         printf("%s: ", sc->sc_dev.dv_xname);
 2002         ADD("10base5", IFM_10_5, PORTSEL_AUI);
 2003         if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
 2004                 ADD("10base5-FDX", IFM_10_5|IFM_FDX, PORTSEL_AUI);
 2005         ADD("10baseT", IFM_10_T, PORTSEL_10T);
 2006         if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
 2007                 ADD("10baseT-FDX", IFM_10_T|IFM_FDX, PORTSEL_10T);
 2008         ADD("auto", IFM_AUTO, 0);
 2009         if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
 2010                 ADD("auto-FDX", IFM_AUTO|IFM_FDX, 0);
 2011         printf("\n");
 2012 
 2013         ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
 2014 }
 2015 
 2016 /*
 2017  * pcn_79c970_mediastatus:      [ifmedia interface function]
 2018  *
 2019  *      Get the current interface media status (Am79c970 version).
 2020  */
 2021 void
 2022 pcn_79c970_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 2023 {
 2024         struct pcn_softc *sc = ifp->if_softc;
 2025 
 2026         /*
 2027          * The currently selected media is always the active media.
 2028          * Note: We have no way to determine what media the AUTO
 2029          * process picked.
 2030          */
 2031         ifmr->ifm_active = sc->sc_mii.mii_media.ifm_media;
 2032 }
 2033 
 2034 /*
 2035  * pcn_79c970_mediachange:      [ifmedia interface function]
 2036  *
 2037  *      Set hardware to newly-selected media (Am79c970 version).
 2038  */
 2039 int
 2040 pcn_79c970_mediachange(struct ifnet *ifp)
 2041 {
 2042         struct pcn_softc *sc = ifp->if_softc;
 2043         uint32_t reg;
 2044 
 2045         if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_AUTO) {
 2046                 /*
 2047                  * CSR15:PORTSEL doesn't matter.  Just set BCR2:ASEL.
 2048                  */
 2049                 reg = pcn_bcr_read(sc, LE_BCR2);
 2050                 reg |= LE_B2_ASEL;
 2051                 pcn_bcr_write(sc, LE_BCR2, reg);
 2052         } else {
 2053                 /*
 2054                  * Clear BCR2:ASEL and set the new CSR15:PORTSEL value.
 2055                  */
 2056                 reg = pcn_bcr_read(sc, LE_BCR2);
 2057                 reg &= ~LE_B2_ASEL;
 2058                 pcn_bcr_write(sc, LE_BCR2, reg);
 2059 
 2060                 reg = pcn_csr_read(sc, LE_CSR15);
 2061                 reg = (reg & ~LE_C15_PORTSEL(PORTSEL_MASK)) |
 2062                     LE_C15_PORTSEL(sc->sc_mii.mii_media.ifm_cur->ifm_data);
 2063                 pcn_csr_write(sc, LE_CSR15, reg);
 2064         }
 2065 
 2066         if ((sc->sc_mii.mii_media.ifm_media & IFM_FDX) != 0) {
 2067                 reg = LE_B9_FDEN;
 2068                 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_10_5)
 2069                         reg |= LE_B9_AUIFD;
 2070                 pcn_bcr_write(sc, LE_BCR9, reg);
 2071         } else
 2072                 pcn_bcr_write(sc, LE_BCR9, 0);
 2073 
 2074         return (0);
 2075 }
 2076 
 2077 /*
 2078  * pcn_79c971_mediainit:
 2079  *
 2080  *      Initialize media for the Am79c971.
 2081  */
 2082 void
 2083 pcn_79c971_mediainit(struct pcn_softc *sc)
 2084 {
 2085         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2086 
 2087         /* We have MII. */
 2088         sc->sc_flags |= PCN_F_HAS_MII;
 2089 
 2090         /*
 2091          * The built-in 10BASE-T interface is mapped to the MII
 2092          * on the PCNet-FAST.  Unfortunately, there's no EEPROM
 2093          * word that tells us which PHY to use.  Since the 10BASE-T
 2094          * interface is always at PHY 31, we make a note of the
 2095          * first PHY that responds, and disallow any PHYs after
 2096          * it.  This is all handled in the MII read routine.
 2097          */
 2098         sc->sc_phyaddr = -1;
 2099 
 2100         /* Initialize our media structures and probe the MII. */
 2101         sc->sc_mii.mii_ifp = ifp;
 2102         sc->sc_mii.mii_readreg = pcn_mii_readreg;
 2103         sc->sc_mii.mii_writereg = pcn_mii_writereg;
 2104         sc->sc_mii.mii_statchg = pcn_mii_statchg;
 2105         ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c971_mediachange,
 2106             pcn_79c971_mediastatus);
 2107 
 2108         mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
 2109             MII_OFFSET_ANY, 0);
 2110         if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
 2111                 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
 2112                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
 2113         } else
 2114                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
 2115 }
 2116 
 2117 /*
 2118  * pcn_79c971_mediastatus:      [ifmedia interface function]
 2119  *
 2120  *      Get the current interface media status (Am79c971 version).
 2121  */
 2122 void
 2123 pcn_79c971_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 2124 {
 2125         struct pcn_softc *sc = ifp->if_softc;
 2126 
 2127         mii_pollstat(&sc->sc_mii);
 2128         ifmr->ifm_status = sc->sc_mii.mii_media_status;
 2129         ifmr->ifm_active = sc->sc_mii.mii_media_active;
 2130 }
 2131 
 2132 /*
 2133  * pcn_79c971_mediachange:      [ifmedia interface function]
 2134  *
 2135  *      Set hardware to newly-selected media (Am79c971 version).
 2136  */
 2137 int
 2138 pcn_79c971_mediachange(struct ifnet *ifp)
 2139 {
 2140         struct pcn_softc *sc = ifp->if_softc;
 2141 
 2142         if (ifp->if_flags & IFF_UP)
 2143                 mii_mediachg(&sc->sc_mii);
 2144         return (0);
 2145 }
 2146 
 2147 /*
 2148  * pcn_mii_readreg:     [mii interface function]
 2149  *
 2150  *      Read a PHY register on the MII.
 2151  */
 2152 int
 2153 pcn_mii_readreg(struct device *self, int phy, int reg)
 2154 {
 2155         struct pcn_softc *sc = (void *) self;
 2156         uint32_t rv;
 2157 
 2158         if (sc->sc_phyaddr != -1 && phy != sc->sc_phyaddr)
 2159                 return (0);
 2160 
 2161         pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
 2162         rv = pcn_bcr_read(sc, LE_BCR34) & LE_B34_MIIMD;
 2163         if (rv == 0xffff)
 2164                 return (0);
 2165 
 2166         if (sc->sc_phyaddr == -1)
 2167                 sc->sc_phyaddr = phy;
 2168 
 2169         return (rv);
 2170 }
 2171 
 2172 /*
 2173  * pcn_mii_writereg:    [mii interface function]
 2174  *
 2175  *      Write a PHY register on the MII.
 2176  */
 2177 void
 2178 pcn_mii_writereg(struct device *self, int phy, int reg, int val)
 2179 {
 2180         struct pcn_softc *sc = (void *) self;
 2181 
 2182         pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
 2183         pcn_bcr_write(sc, LE_BCR34, val);
 2184 }
 2185 
 2186 /*
 2187  * pcn_mii_statchg:     [mii interface function]
 2188  *
 2189  *      Callback from MII layer when media changes.
 2190  */
 2191 void
 2192 pcn_mii_statchg(struct device *self)
 2193 {
 2194         struct pcn_softc *sc = (void *) self;
 2195 
 2196         if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
 2197                 pcn_bcr_write(sc, LE_BCR9, LE_B9_FDEN);
 2198         else
 2199                 pcn_bcr_write(sc, LE_BCR9, 0);
 2200 }

Cache object: 904ee35967569e529f68d79ec5310d14


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.