The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System, Second Edition

[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/pci/if_wm.c

Version: -  FREEBSD  -  FREEBSD10  -  FREEBSD9  -  FREEBSD92  -  FREEBSD91  -  FREEBSD90  -  FREEBSD8  -  FREEBSD82  -  FREEBSD81  -  FREEBSD80  -  FREEBSD7  -  FREEBSD74  -  FREEBSD73  -  FREEBSD72  -  FREEBSD71  -  FREEBSD70  -  FREEBSD6  -  FREEBSD64  -  FREEBSD63  -  FREEBSD62  -  FREEBSD61  -  FREEBSD60  -  FREEBSD5  -  FREEBSD55  -  FREEBSD54  -  FREEBSD53  -  FREEBSD52  -  FREEBSD51  -  FREEBSD50  -  FREEBSD4  -  FREEBSD3  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: if_wm.c,v 1.162.4.15 2011/03/07 04:14:19 riz Exp $     */
    2 
    3 /*
    4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
    5  * All rights reserved.
    6  *
    7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed for the NetBSD Project by
   20  *      Wasabi Systems, Inc.
   21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   22  *    or promote products derived from this software without specific prior
   23  *    written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  */
   37 
   38 /*******************************************************************************
   39 
   40   Copyright (c) 2001-2005, Intel Corporation 
   41   All rights reserved.
   42   
   43   Redistribution and use in source and binary forms, with or without 
   44   modification, are permitted provided that the following conditions are met:
   45   
   46    1. Redistributions of source code must retain the above copyright notice, 
   47       this list of conditions and the following disclaimer.
   48   
   49    2. Redistributions in binary form must reproduce the above copyright 
   50       notice, this list of conditions and the following disclaimer in the 
   51       documentation and/or other materials provided with the distribution.
   52   
   53    3. Neither the name of the Intel Corporation nor the names of its 
   54       contributors may be used to endorse or promote products derived from 
   55       this software without specific prior written permission.
   56   
   57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
   59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
   60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
   61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
   62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
   63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
   64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
   65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
   66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   67   POSSIBILITY OF SUCH DAMAGE.
   68 
   69 *******************************************************************************/
   70 /*
   71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
   72  *
   73  * TODO (in order of importance):
   74  *
   75  *      - Rework how parameters are loaded from the EEPROM.
   76  */
   77 
   78 #include <sys/cdefs.h>
   79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.162.4.15 2011/03/07 04:14:19 riz Exp $");
   80 
   81 #include "bpfilter.h"
   82 #include "rnd.h"
   83 
   84 #include <sys/param.h>
   85 #include <sys/systm.h>
   86 #include <sys/callout.h>
   87 #include <sys/mbuf.h>
   88 #include <sys/malloc.h>
   89 #include <sys/kernel.h>
   90 #include <sys/socket.h>
   91 #include <sys/ioctl.h>
   92 #include <sys/errno.h>
   93 #include <sys/device.h>
   94 #include <sys/queue.h>
   95 #include <sys/syslog.h>
   96 
   97 #include <uvm/uvm_extern.h>             /* for PAGE_SIZE */
   98 
   99 #if NRND > 0
  100 #include <sys/rnd.h>
  101 #endif
  102 
  103 #include <net/if.h>
  104 #include <net/if_dl.h>
  105 #include <net/if_media.h>
  106 #include <net/if_ether.h>
  107 
  108 #if NBPFILTER > 0
  109 #include <net/bpf.h>
  110 #endif
  111 
  112 #include <netinet/in.h>                 /* XXX for struct ip */
  113 #include <netinet/in_systm.h>           /* XXX for struct ip */
  114 #include <netinet/ip.h>                 /* XXX for struct ip */
  115 #include <netinet/ip6.h>                /* XXX for struct ip6_hdr */
  116 #include <netinet/tcp.h>                /* XXX for struct tcphdr */
  117 
  118 #include <sys/bus.h>
  119 #include <sys/intr.h>
  120 #include <machine/endian.h>
  121 
  122 #include <dev/mii/mii.h>
  123 #include <dev/mii/miivar.h>
  124 #include <dev/mii/miidevs.h>
  125 #include <dev/mii/mii_bitbang.h>
  126 #include <dev/mii/ikphyreg.h>
  127 #include <dev/mii/igphyreg.h>
  128 #include <dev/mii/igphyvar.h>
  129 #include <dev/mii/inbmphyreg.h>
  130 
  131 #include <dev/pci/pcireg.h>
  132 #include <dev/pci/pcivar.h>
  133 #include <dev/pci/pcidevs.h>
  134 
  135 #include <dev/pci/if_wmreg.h>
  136 #include <dev/pci/if_wmvar.h>
  137 
  138 #ifdef WM_DEBUG
  139 #define WM_DEBUG_LINK           0x01
  140 #define WM_DEBUG_TX             0x02
  141 #define WM_DEBUG_RX             0x04
  142 #define WM_DEBUG_GMII           0x08
  143 int     wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
  144 
  145 #define DPRINTF(x, y)   if (wm_debug & (x)) printf y
  146 #else
  147 #define DPRINTF(x, y)   /* nothing */
  148 #endif /* WM_DEBUG */
  149 
  150 /*
  151  * Transmit descriptor list size.  Due to errata, we can only have
  152  * 256 hardware descriptors in the ring on < 82544, but we use 4096
  153  * on >= 82544.  We tell the upper layers that they can queue a lot
  154  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
  155  * of them at a time.
  156  *
  157  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
  158  * chains containing many small mbufs have been observed in zero-copy
  159  * situations with jumbo frames.
  160  */
  161 #define WM_NTXSEGS              256
  162 #define WM_IFQUEUELEN           256
  163 #define WM_TXQUEUELEN_MAX       64
  164 #define WM_TXQUEUELEN_MAX_82547 16
  165 #define WM_TXQUEUELEN(sc)       ((sc)->sc_txnum)
  166 #define WM_TXQUEUELEN_MASK(sc)  (WM_TXQUEUELEN(sc) - 1)
  167 #define WM_TXQUEUE_GC(sc)       (WM_TXQUEUELEN(sc) / 8)
  168 #define WM_NTXDESC_82542        256
  169 #define WM_NTXDESC_82544        4096
  170 #define WM_NTXDESC(sc)          ((sc)->sc_ntxdesc)
  171 #define WM_NTXDESC_MASK(sc)     (WM_NTXDESC(sc) - 1)
  172 #define WM_TXDESCSIZE(sc)       (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
  173 #define WM_NEXTTX(sc, x)        (((x) + 1) & WM_NTXDESC_MASK(sc))
  174 #define WM_NEXTTXS(sc, x)       (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
  175 
  176 #define WM_MAXTXDMA             round_page(IP_MAXPACKET) /* for TSO */
  177 
  178 /*
  179  * Receive descriptor list size.  We have one Rx buffer for normal
  180  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
  181  * packet.  We allocate 256 receive descriptors, each with a 2k
  182  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
  183  */
  184 #define WM_NRXDESC              256
  185 #define WM_NRXDESC_MASK         (WM_NRXDESC - 1)
  186 #define WM_NEXTRX(x)            (((x) + 1) & WM_NRXDESC_MASK)
  187 #define WM_PREVRX(x)            (((x) - 1) & WM_NRXDESC_MASK)
  188 
  189 /*
  190  * Control structures are DMA'd to the i82542 chip.  We allocate them in
  191  * a single clump that maps to a single DMA segment to make several things
  192  * easier.
  193  */
  194 struct wm_control_data_82544 {
  195         /*
  196          * The receive descriptors.
  197          */
  198         wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
  199 
  200         /*
  201          * The transmit descriptors.  Put these at the end, because
  202          * we might use a smaller number of them.
  203          */
  204         wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
  205 };
  206 
  207 struct wm_control_data_82542 {
  208         wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
  209         wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
  210 };
  211 
  212 #define WM_CDOFF(x)     offsetof(struct wm_control_data_82544, x)
  213 #define WM_CDTXOFF(x)   WM_CDOFF(wcd_txdescs[(x)])
  214 #define WM_CDRXOFF(x)   WM_CDOFF(wcd_rxdescs[(x)])
  215 
  216 /*
  217  * Software state for transmit jobs.
  218  */
  219 struct wm_txsoft {
  220         struct mbuf *txs_mbuf;          /* head of our mbuf chain */
  221         bus_dmamap_t txs_dmamap;        /* our DMA map */
  222         int txs_firstdesc;              /* first descriptor in packet */
  223         int txs_lastdesc;               /* last descriptor in packet */
  224         int txs_ndesc;                  /* # of descriptors used */
  225 };
  226 
  227 /*
  228  * Software state for receive buffers.  Each descriptor gets a
  229  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
  230  * more than one buffer, we chain them together.
  231  */
  232 struct wm_rxsoft {
  233         struct mbuf *rxs_mbuf;          /* head of our mbuf chain */
  234         bus_dmamap_t rxs_dmamap;        /* our DMA map */
  235 };
  236 
  237 #define WM_LINKUP_TIMEOUT       50
  238 
  239 static uint16_t swfwphysem[] = {
  240         SWFW_PHY0_SM,
  241         SWFW_PHY1_SM,
  242         SWFW_PHY2_SM,
  243         SWFW_PHY3_SM
  244 };
  245 
  246 /*
  247  * Software state per device.
  248  */
  249 struct wm_softc {
  250         device_t sc_dev;                /* generic device information */
  251         bus_space_tag_t sc_st;          /* bus space tag */
  252         bus_space_handle_t sc_sh;       /* bus space handle */
  253         bus_space_tag_t sc_iot;         /* I/O space tag */
  254         bus_space_handle_t sc_ioh;      /* I/O space handle */
  255         bus_space_tag_t sc_flasht;      /* flash registers space tag */
  256         bus_space_handle_t sc_flashh;   /* flash registers space handle */
  257         bus_dma_tag_t sc_dmat;          /* bus DMA tag */
  258         bus_dmamap_t sc_cddmamap;       /* control data DMA map */
  259 #define sc_cddma        sc_cddmamap->dm_segs[0].ds_addr
  260 
  261         struct ethercom sc_ethercom;    /* ethernet common data */
  262         struct mii_data sc_mii;         /* MII/media information */
  263 
  264         pci_chipset_tag_t sc_pc;
  265         pcitag_t sc_pcitag;
  266         int sc_bus_speed;               /* PCI/PCIX bus speed */
  267         int sc_pcixe_capoff;            /* PCI[Xe] capability register offset */
  268 
  269         wm_chip_type sc_type;           /* MAC type */
  270         int sc_rev;                     /* MAC revision */
  271         wm_phy_type sc_phytype;         /* PHY type */
  272         int sc_funcid;                  /* unit number of the chip (0 to 3) */
  273         int sc_flags;                   /* flags; see below */
  274         int sc_if_flags;                /* last if_flags */
  275         int sc_flowflags;               /* 802.3x flow control flags */
  276         int sc_align_tweak;
  277 
  278         void *sc_ih;                    /* interrupt cookie */
  279         callout_t sc_tick_ch;           /* tick callout */
  280 
  281         int sc_ee_addrbits;             /* EEPROM address bits */
  282         int sc_ich8_flash_base;
  283         int sc_ich8_flash_bank_size;
  284         int sc_nvm_k1_enabled;
  285 
  286         /*
  287          * Software state for the transmit and receive descriptors.
  288          */
  289         int                     sc_txnum;       /* must be a power of two */
  290         struct wm_txsoft        sc_txsoft[WM_TXQUEUELEN_MAX];
  291         struct wm_rxsoft        sc_rxsoft[WM_NRXDESC];
  292 
  293         /*
  294          * Control data structures.
  295          */
  296         int                     sc_ntxdesc;     /* must be a power of two */
  297         struct wm_control_data_82544 *sc_control_data;
  298 #define sc_txdescs      sc_control_data->wcd_txdescs
  299 #define sc_rxdescs      sc_control_data->wcd_rxdescs
  300 
  301 #ifdef WM_EVENT_COUNTERS
  302         /* Event counters. */
  303         struct evcnt sc_ev_txsstall;    /* Tx stalled due to no txs */
  304         struct evcnt sc_ev_txdstall;    /* Tx stalled due to no txd */
  305         struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
  306         struct evcnt sc_ev_txdw;        /* Tx descriptor interrupts */
  307         struct evcnt sc_ev_txqe;        /* Tx queue empty interrupts */
  308         struct evcnt sc_ev_rxintr;      /* Rx interrupts */
  309         struct evcnt sc_ev_linkintr;    /* Link interrupts */
  310 
  311         struct evcnt sc_ev_rxipsum;     /* IP checksums checked in-bound */
  312         struct evcnt sc_ev_rxtusum;     /* TCP/UDP cksums checked in-bound */
  313         struct evcnt sc_ev_txipsum;     /* IP checksums comp. out-bound */
  314         struct evcnt sc_ev_txtusum;     /* TCP/UDP cksums comp. out-bound */
  315         struct evcnt sc_ev_txtusum6;    /* TCP/UDP v6 cksums comp. out-bound */
  316         struct evcnt sc_ev_txtso;       /* TCP seg offload out-bound (IPv4) */
  317         struct evcnt sc_ev_txtso6;      /* TCP seg offload out-bound (IPv6) */
  318         struct evcnt sc_ev_txtsopain;   /* painful header manip. for TSO */
  319 
  320         struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
  321         struct evcnt sc_ev_txdrop;      /* Tx packets dropped (too many segs) */
  322 
  323         struct evcnt sc_ev_tu;          /* Tx underrun */
  324 
  325         struct evcnt sc_ev_tx_xoff;     /* Tx PAUSE(!0) frames */
  326         struct evcnt sc_ev_tx_xon;      /* Tx PAUSE(0) frames */
  327         struct evcnt sc_ev_rx_xoff;     /* Rx PAUSE(!0) frames */
  328         struct evcnt sc_ev_rx_xon;      /* Rx PAUSE(0) frames */
  329         struct evcnt sc_ev_rx_macctl;   /* Rx Unsupported */
  330 #endif /* WM_EVENT_COUNTERS */
  331 
  332         bus_addr_t sc_tdt_reg;          /* offset of TDT register */
  333 
  334         int     sc_txfree;              /* number of free Tx descriptors */
  335         int     sc_txnext;              /* next ready Tx descriptor */
  336 
  337         int     sc_txsfree;             /* number of free Tx jobs */
  338         int     sc_txsnext;             /* next free Tx job */
  339         int     sc_txsdirty;            /* dirty Tx jobs */
  340 
  341         /* These 5 variables are used only on the 82547. */
  342         int     sc_txfifo_size;         /* Tx FIFO size */
  343         int     sc_txfifo_head;         /* current head of FIFO */
  344         uint32_t sc_txfifo_addr;        /* internal address of start of FIFO */
  345         int     sc_txfifo_stall;        /* Tx FIFO is stalled */
  346         callout_t sc_txfifo_ch;         /* Tx FIFO stall work-around timer */
  347 
  348         bus_addr_t sc_rdt_reg;          /* offset of RDT register */
  349 
  350         int     sc_rxptr;               /* next ready Rx descriptor/queue ent */
  351         int     sc_rxdiscard;
  352         int     sc_rxlen;
  353         struct mbuf *sc_rxhead;
  354         struct mbuf *sc_rxtail;
  355         struct mbuf **sc_rxtailp;
  356 
  357         uint32_t sc_ctrl;               /* prototype CTRL register */
  358 #if 0
  359         uint32_t sc_ctrl_ext;           /* prototype CTRL_EXT register */
  360 #endif
  361         uint32_t sc_icr;                /* prototype interrupt bits */
  362         uint32_t sc_itr;                /* prototype intr throttling reg */
  363         uint32_t sc_tctl;               /* prototype TCTL register */
  364         uint32_t sc_rctl;               /* prototype RCTL register */
  365         uint32_t sc_txcw;               /* prototype TXCW register */
  366         uint32_t sc_tipg;               /* prototype TIPG register */
  367         uint32_t sc_fcrtl;              /* prototype FCRTL register */
  368         uint32_t sc_pba;                /* prototype PBA register */
  369 
  370         int sc_tbi_linkup;              /* TBI link status */
  371         int sc_tbi_anegticks;           /* autonegotiation ticks */
  372         int sc_tbi_ticks;               /* tbi ticks */
  373         int sc_tbi_nrxcfg;              /* count of ICR_RXCFG */
  374         int sc_tbi_lastnrxcfg;          /* count of ICR_RXCFG (on last tick) */
  375 
  376         int sc_mchash_type;             /* multicast filter offset */
  377 
  378 #if NRND > 0
  379         rndsource_element_t rnd_source; /* random source */
  380 #endif
  381 };
  382 
  383 #define WM_RXCHAIN_RESET(sc)                                            \
  384 do {                                                                    \
  385         (sc)->sc_rxtailp = &(sc)->sc_rxhead;                            \
  386         *(sc)->sc_rxtailp = NULL;                                       \
  387         (sc)->sc_rxlen = 0;                                             \
  388 } while (/*CONSTCOND*/0)
  389 
  390 #define WM_RXCHAIN_LINK(sc, m)                                          \
  391 do {                                                                    \
  392         *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);                      \
  393         (sc)->sc_rxtailp = &(m)->m_next;                                \
  394 } while (/*CONSTCOND*/0)
  395 
  396 #ifdef WM_EVENT_COUNTERS
  397 #define WM_EVCNT_INCR(ev)       (ev)->ev_count++
  398 #define WM_EVCNT_ADD(ev, val)   (ev)->ev_count += (val)
  399 #else
  400 #define WM_EVCNT_INCR(ev)       /* nothing */
  401 #define WM_EVCNT_ADD(ev, val)   /* nothing */
  402 #endif
  403 
  404 #define CSR_READ(sc, reg)                                               \
  405         bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
  406 #define CSR_WRITE(sc, reg, val)                                         \
  407         bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
  408 #define CSR_WRITE_FLUSH(sc)                                             \
  409         (void) CSR_READ((sc), WMREG_STATUS)
  410 
  411 #define ICH8_FLASH_READ32(sc, reg) \
  412         bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
  413 #define ICH8_FLASH_WRITE32(sc, reg, data) \
  414         bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
  415 
  416 #define ICH8_FLASH_READ16(sc, reg) \
  417         bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
  418 #define ICH8_FLASH_WRITE16(sc, reg, data) \
  419         bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
  420 
  421 #define WM_CDTXADDR(sc, x)      ((sc)->sc_cddma + WM_CDTXOFF((x)))
  422 #define WM_CDRXADDR(sc, x)      ((sc)->sc_cddma + WM_CDRXOFF((x)))
  423 
  424 #define WM_CDTXADDR_LO(sc, x)   (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
  425 #define WM_CDTXADDR_HI(sc, x)                                           \
  426         (sizeof(bus_addr_t) == 8 ?                                      \
  427          (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
  428 
  429 #define WM_CDRXADDR_LO(sc, x)   (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
  430 #define WM_CDRXADDR_HI(sc, x)                                           \
  431         (sizeof(bus_addr_t) == 8 ?                                      \
  432          (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
  433 
  434 #define WM_CDTXSYNC(sc, x, n, ops)                                      \
  435 do {                                                                    \
  436         int __x, __n;                                                   \
  437                                                                         \
  438         __x = (x);                                                      \
  439         __n = (n);                                                      \
  440                                                                         \
  441         /* If it will wrap around, sync to the end of the ring. */      \
  442         if ((__x + __n) > WM_NTXDESC(sc)) {                             \
  443                 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,       \
  444                     WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *         \
  445                     (WM_NTXDESC(sc) - __x), (ops));                     \
  446                 __n -= (WM_NTXDESC(sc) - __x);                          \
  447                 __x = 0;                                                \
  448         }                                                               \
  449                                                                         \
  450         /* Now sync whatever is left. */                                \
  451         bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,               \
  452             WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));    \
  453 } while (/*CONSTCOND*/0)
  454 
  455 #define WM_CDRXSYNC(sc, x, ops)                                         \
  456 do {                                                                    \
  457         bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,               \
  458            WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));           \
  459 } while (/*CONSTCOND*/0)
  460 
  461 #define WM_INIT_RXDESC(sc, x)                                           \
  462 do {                                                                    \
  463         struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];                \
  464         wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];               \
  465         struct mbuf *__m = __rxs->rxs_mbuf;                             \
  466                                                                         \
  467         /*                                                              \
  468          * Note: We scoot the packet forward 2 bytes in the buffer      \
  469          * so that the payload after the Ethernet header is aligned     \
  470          * to a 4-byte boundary.                                        \
  471          *                                                              \
  472          * XXX BRAINDAMAGE ALERT!                                       \
  473          * The stupid chip uses the same size for every buffer, which   \
  474          * is set in the Receive Control register.  We are using the 2K \
  475          * size option, but what we REALLY want is (2K - 2)!  For this  \
  476          * reason, we can't "scoot" packets longer than the standard    \
  477          * Ethernet MTU.  On strict-alignment platforms, if the total   \
  478          * size exceeds (2K - 2) we set align_tweak to 0 and let        \
  479          * the upper layer copy the headers.                            \
  480          */                                                             \
  481         __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;        \
  482                                                                         \
  483         wm_set_dma_addr(&__rxd->wrx_addr,                               \
  484             __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
  485         __rxd->wrx_len = 0;                                             \
  486         __rxd->wrx_cksum = 0;                                           \
  487         __rxd->wrx_status = 0;                                          \
  488         __rxd->wrx_errors = 0;                                          \
  489         __rxd->wrx_special = 0;                                         \
  490         WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
  491                                                                         \
  492         CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));                         \
  493 } while (/*CONSTCOND*/0)
  494 
  495 static void     wm_start(struct ifnet *);
  496 static void     wm_watchdog(struct ifnet *);
  497 static int      wm_ioctl(struct ifnet *, u_long, void *);
  498 static int      wm_init(struct ifnet *);
  499 static void     wm_stop(struct ifnet *, int);
  500 
  501 static void     wm_reset(struct wm_softc *);
  502 static void     wm_rxdrain(struct wm_softc *);
  503 static int      wm_add_rxbuf(struct wm_softc *, int);
  504 static int      wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
  505 static int      wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
  506 static int      wm_validate_eeprom_checksum(struct wm_softc *);
  507 static void     wm_tick(void *);
  508 
  509 static void     wm_set_filter(struct wm_softc *);
  510 
  511 static int      wm_intr(void *);
  512 static void     wm_txintr(struct wm_softc *);
  513 static void     wm_rxintr(struct wm_softc *);
  514 static void     wm_linkintr(struct wm_softc *, uint32_t);
  515 
  516 static void     wm_tbi_mediainit(struct wm_softc *);
  517 static int      wm_tbi_mediachange(struct ifnet *);
  518 static void     wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
  519 
  520 static void     wm_tbi_set_linkled(struct wm_softc *);
  521 static void     wm_tbi_check_link(struct wm_softc *);
  522 
  523 static void     wm_gmii_reset(struct wm_softc *);
  524 
  525 static int      wm_gmii_i82543_readreg(device_t, int, int);
  526 static void     wm_gmii_i82543_writereg(device_t, int, int, int);
  527 
  528 static int      wm_gmii_i82544_readreg(device_t, int, int);
  529 static void     wm_gmii_i82544_writereg(device_t, int, int, int);
  530 
  531 static int      wm_gmii_i80003_readreg(device_t, int, int);
  532 static void     wm_gmii_i80003_writereg(device_t, int, int, int);
  533 static int      wm_gmii_bm_readreg(device_t, int, int);
  534 static void     wm_gmii_bm_writereg(device_t, int, int, int);
  535 static int      wm_gmii_hv_readreg(device_t, int, int);
  536 static void     wm_gmii_hv_writereg(device_t, int, int, int);
  537 static int      wm_sgmii_readreg(device_t, int, int);
  538 static void     wm_sgmii_writereg(device_t, int, int, int);
  539 
  540 static void     wm_gmii_statchg(device_t);
  541 
  542 static void     wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
  543 static int      wm_gmii_mediachange(struct ifnet *);
  544 static void     wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
  545 
  546 static int      wm_kmrn_readreg(struct wm_softc *, int);
  547 static void     wm_kmrn_writereg(struct wm_softc *, int, int);
  548 
  549 static void     wm_set_spiaddrbits(struct wm_softc *);
  550 static int      wm_match(device_t, cfdata_t, void *);
  551 static void     wm_attach(device_t, device_t, void *);
  552 static int      wm_is_onboard_nvm_eeprom(struct wm_softc *);
  553 static void     wm_get_auto_rd_done(struct wm_softc *);
  554 static void     wm_lan_init_done(struct wm_softc *);
  555 static void     wm_get_cfg_done(struct wm_softc *);
  556 static int      wm_get_swsm_semaphore(struct wm_softc *);
  557 static void     wm_put_swsm_semaphore(struct wm_softc *);
  558 static int      wm_poll_eerd_eewr_done(struct wm_softc *, int);
  559 static int      wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
  560 static void     wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
  561 static int      wm_get_swfwhw_semaphore(struct wm_softc *);
  562 static void     wm_put_swfwhw_semaphore(struct wm_softc *);
  563 
  564 static int      wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
  565 static int32_t  wm_ich8_cycle_init(struct wm_softc *);
  566 static int32_t  wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
  567 static int32_t  wm_read_ich8_data(struct wm_softc *, uint32_t,
  568                      uint32_t, uint16_t *);
  569 static int32_t  wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
  570 static int32_t  wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
  571 static void     wm_82547_txfifo_stall(void *);
  572 static int      wm_check_mng_mode(struct wm_softc *);
  573 static int      wm_check_mng_mode_ich8lan(struct wm_softc *);
  574 static int      wm_check_mng_mode_82574(struct wm_softc *);
  575 static int      wm_check_mng_mode_generic(struct wm_softc *);
  576 static int      wm_check_reset_block(struct wm_softc *);
  577 static void     wm_get_hw_control(struct wm_softc *);
  578 static int      wm_check_for_link(struct wm_softc *);
  579 static void     wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
  580 static void     wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
  581 static void     wm_hv_phy_workaround_ich8lan(struct wm_softc *);
  582 static void     wm_k1_gig_workaround_hv(struct wm_softc *, int);
  583 static void     wm_configure_k1_ich8lan(struct wm_softc *, int);
  584 static void     wm_set_pcie_completion_timeout(struct wm_softc *);
  585 static void     wm_reset_init_script_82575(struct wm_softc *);
  586 
  587 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
  588     wm_match, wm_attach, NULL, NULL);
  589 
  590 /*
  591  * Devices supported by this driver.
  592  */
  593 static const struct wm_product {
  594         pci_vendor_id_t         wmp_vendor;
  595         pci_product_id_t        wmp_product;
  596         const char              *wmp_name;
  597         wm_chip_type            wmp_type;
  598         int                     wmp_flags;
  599 #define WMP_F_1000X             0x01
  600 #define WMP_F_1000T             0x02
  601 } wm_products[] = {
  602         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82542,
  603           "Intel i82542 1000BASE-X Ethernet",
  604           WM_T_82542_2_1,       WMP_F_1000X },
  605 
  606         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82543GC_FIBER,
  607           "Intel i82543GC 1000BASE-X Ethernet",
  608           WM_T_82543,           WMP_F_1000X },
  609 
  610         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82543GC_COPPER,
  611           "Intel i82543GC 1000BASE-T Ethernet",
  612           WM_T_82543,           WMP_F_1000T },
  613 
  614         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82544EI_COPPER,
  615           "Intel i82544EI 1000BASE-T Ethernet",
  616           WM_T_82544,           WMP_F_1000T },
  617 
  618         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82544EI_FIBER,
  619           "Intel i82544EI 1000BASE-X Ethernet",
  620           WM_T_82544,           WMP_F_1000X },
  621 
  622         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82544GC_COPPER,
  623           "Intel i82544GC 1000BASE-T Ethernet",
  624           WM_T_82544,           WMP_F_1000T },
  625 
  626         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82544GC_LOM,
  627           "Intel i82544GC (LOM) 1000BASE-T Ethernet",
  628           WM_T_82544,           WMP_F_1000T },
  629 
  630         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82540EM,
  631           "Intel i82540EM 1000BASE-T Ethernet",
  632           WM_T_82540,           WMP_F_1000T },
  633 
  634         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82540EM_LOM,
  635           "Intel i82540EM (LOM) 1000BASE-T Ethernet",
  636           WM_T_82540,           WMP_F_1000T },
  637 
  638         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82540EP_LOM,
  639           "Intel i82540EP 1000BASE-T Ethernet",
  640           WM_T_82540,           WMP_F_1000T },
  641 
  642         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82540EP,
  643           "Intel i82540EP 1000BASE-T Ethernet",
  644           WM_T_82540,           WMP_F_1000T },
  645 
  646         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82540EP_LP,
  647           "Intel i82540EP 1000BASE-T Ethernet",
  648           WM_T_82540,           WMP_F_1000T },
  649 
  650         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82545EM_COPPER,
  651           "Intel i82545EM 1000BASE-T Ethernet",
  652           WM_T_82545,           WMP_F_1000T },
  653 
  654         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82545GM_COPPER,
  655           "Intel i82545GM 1000BASE-T Ethernet",
  656           WM_T_82545_3,         WMP_F_1000T },
  657 
  658         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82545GM_FIBER,
  659           "Intel i82545GM 1000BASE-X Ethernet",
  660           WM_T_82545_3,         WMP_F_1000X },
  661 #if 0
  662         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82545GM_SERDES,
  663           "Intel i82545GM Gigabit Ethernet (SERDES)",
  664           WM_T_82545_3,         WMP_F_SERDES },
  665 #endif
  666         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_COPPER,
  667           "Intel i82546EB 1000BASE-T Ethernet",
  668           WM_T_82546,           WMP_F_1000T },
  669 
  670         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
  671           "Intel i82546EB 1000BASE-T Ethernet",
  672           WM_T_82546,           WMP_F_1000T },
  673 
  674         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82545EM_FIBER,
  675           "Intel i82545EM 1000BASE-X Ethernet",
  676           WM_T_82545,           WMP_F_1000X },
  677 
  678         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_FIBER,
  679           "Intel i82546EB 1000BASE-X Ethernet",
  680           WM_T_82546,           WMP_F_1000X },
  681 
  682         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546GB_COPPER,
  683           "Intel i82546GB 1000BASE-T Ethernet",
  684           WM_T_82546_3,         WMP_F_1000T },
  685 
  686         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546GB_FIBER,
  687           "Intel i82546GB 1000BASE-X Ethernet",
  688           WM_T_82546_3,         WMP_F_1000X },
  689 #if 0
  690         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546GB_SERDES,
  691           "Intel i82546GB Gigabit Ethernet (SERDES)",
  692           WM_T_82546_3,         WMP_F_SERDES },
  693 #endif
  694         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
  695           "i82546GB quad-port Gigabit Ethernet",
  696           WM_T_82546_3,         WMP_F_1000T },
  697 
  698         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
  699           "i82546GB quad-port Gigabit Ethernet (KSP3)",
  700           WM_T_82546_3,         WMP_F_1000T },
  701 
  702         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546GB_PCIE,
  703           "Intel PRO/1000MT (82546GB)",
  704           WM_T_82546_3,         WMP_F_1000T },
  705 
  706         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82541EI,
  707           "Intel i82541EI 1000BASE-T Ethernet",
  708           WM_T_82541,           WMP_F_1000T },
  709 
  710         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82541ER_LOM,
  711           "Intel i82541ER (LOM) 1000BASE-T Ethernet",
  712           WM_T_82541,           WMP_F_1000T },
  713 
  714         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82541EI_MOBILE,
  715           "Intel i82541EI Mobile 1000BASE-T Ethernet",
  716           WM_T_82541,           WMP_F_1000T },
  717 
  718         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82541ER,
  719           "Intel i82541ER 1000BASE-T Ethernet",
  720           WM_T_82541_2,         WMP_F_1000T },
  721 
  722         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82541GI,
  723           "Intel i82541GI 1000BASE-T Ethernet",
  724           WM_T_82541_2,         WMP_F_1000T },
  725 
  726         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82541GI_MOBILE,
  727           "Intel i82541GI Mobile 1000BASE-T Ethernet",
  728           WM_T_82541_2,         WMP_F_1000T },
  729 
  730         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82541PI,
  731           "Intel i82541PI 1000BASE-T Ethernet",
  732           WM_T_82541_2,         WMP_F_1000T },
  733 
  734         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82547EI,
  735           "Intel i82547EI 1000BASE-T Ethernet",
  736           WM_T_82547,           WMP_F_1000T },
  737 
  738         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82547EI_MOBILE,
  739           "Intel i82547EI Mobile 1000BASE-T Ethernet",
  740           WM_T_82547,           WMP_F_1000T },
  741 
  742         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82547GI,
  743           "Intel i82547GI 1000BASE-T Ethernet",
  744           WM_T_82547_2,         WMP_F_1000T },
  745 
  746         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82571EB_COPPER,
  747           "Intel PRO/1000 PT (82571EB)",
  748           WM_T_82571,           WMP_F_1000T },
  749 
  750         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82571EB_FIBER,
  751           "Intel PRO/1000 PF (82571EB)",
  752           WM_T_82571,           WMP_F_1000X },
  753 #if 0
  754         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82571EB_SERDES,
  755           "Intel PRO/1000 PB (82571EB)",
  756           WM_T_82571,           WMP_F_SERDES },
  757 #endif
  758         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
  759           "Intel PRO/1000 QT (82571EB)",
  760           WM_T_82571,           WMP_F_1000T },
  761 
  762         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82572EI_COPPER,
  763           "Intel i82572EI 1000baseT Ethernet",
  764           WM_T_82572,           WMP_F_1000T },
  765 
  766         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
  767           "Intel® PRO/1000 PT Quad Port Server Adapter",
  768           WM_T_82571,           WMP_F_1000T, },
  769 
  770         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82572EI_FIBER,
  771           "Intel i82572EI 1000baseX Ethernet",
  772           WM_T_82572,           WMP_F_1000X },
  773 #if 0
  774         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82572EI_SERDES,
  775           "Intel i82572EI Gigabit Ethernet (SERDES)",
  776           WM_T_82572,           WMP_F_SERDES },
  777 #endif
  778 
  779         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82572EI,
  780           "Intel i82572EI 1000baseT Ethernet",
  781           WM_T_82572,           WMP_F_1000T },
  782 
  783         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82573E,
  784           "Intel i82573E",
  785           WM_T_82573,           WMP_F_1000T },
  786 
  787         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82573E_IAMT,
  788           "Intel i82573E IAMT",
  789           WM_T_82573,           WMP_F_1000T },
  790 
  791         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82573L,
  792           "Intel i82573L Gigabit Ethernet",
  793           WM_T_82573,           WMP_F_1000T },
  794 
  795         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82574L,
  796           "Intel i82574L",
  797           WM_T_82574,           WMP_F_1000T },
  798 
  799         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82583V,
  800           "Intel i82583V",
  801           WM_T_82583,           WMP_F_1000T },
  802 
  803         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
  804           "i80003 dual 1000baseT Ethernet",
  805           WM_T_80003,           WMP_F_1000T },
  806 
  807         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
  808           "i80003 dual 1000baseX Ethernet",
  809           WM_T_80003,           WMP_F_1000T },
  810 #if 0
  811         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
  812           "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
  813           WM_T_80003,           WMP_F_SERDES },
  814 #endif
  815 
  816         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
  817           "Intel i80003 1000baseT Ethernet",
  818           WM_T_80003,           WMP_F_1000T },
  819 #if 0
  820         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
  821           "Intel i80003 Gigabit Ethernet (SERDES)",
  822           WM_T_80003,           WMP_F_SERDES },
  823 #endif
  824         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801H_M_AMT,
  825           "Intel i82801H (M_AMT) LAN Controller",
  826           WM_T_ICH8,            WMP_F_1000T },
  827         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801H_AMT,
  828           "Intel i82801H (AMT) LAN Controller",
  829           WM_T_ICH8,            WMP_F_1000T },
  830         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801H_LAN,
  831           "Intel i82801H LAN Controller",
  832           WM_T_ICH8,            WMP_F_1000T },
  833         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801H_IFE_LAN,
  834           "Intel i82801H (IFE) LAN Controller",
  835           WM_T_ICH8,            WMP_F_1000T },
  836         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801H_M_LAN,
  837           "Intel i82801H (M) LAN Controller",
  838           WM_T_ICH8,            WMP_F_1000T },
  839         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801H_IFE_GT,
  840           "Intel i82801H IFE (GT) LAN Controller",
  841           WM_T_ICH8,            WMP_F_1000T },
  842         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801H_IFE_G,
  843           "Intel i82801H IFE (G) LAN Controller",
  844           WM_T_ICH8,            WMP_F_1000T },
  845         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801I_IGP_AMT,
  846           "82801I (AMT) LAN Controller",
  847           WM_T_ICH9,            WMP_F_1000T },
  848         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801I_IFE,
  849           "82801I LAN Controller",
  850           WM_T_ICH9,            WMP_F_1000T },
  851         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801I_IFE_G,
  852           "82801I (G) LAN Controller",
  853           WM_T_ICH9,            WMP_F_1000T },
  854         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801I_IFE_GT,
  855           "82801I (GT) LAN Controller",
  856           WM_T_ICH9,            WMP_F_1000T },
  857         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801I_IGP_C,
  858           "82801I (C) LAN Controller",
  859           WM_T_ICH9,            WMP_F_1000T },
  860         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801I_IGP_M,
  861           "82801I mobile LAN Controller",
  862           WM_T_ICH9,            WMP_F_1000T },
  863         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801H_IGP_M_V,
  864           "82801I mobile (V) LAN Controller",
  865           WM_T_ICH9,            WMP_F_1000T },
  866         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
  867           "82801I mobile (AMT) LAN Controller",
  868           WM_T_ICH9,            WMP_F_1000T },
  869         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801I_BM,
  870           "82567LM-4 LAN Controller",
  871           WM_T_ICH9,            WMP_F_1000T },
  872         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801I_82567V_3,
  873           "82567V-3 LAN Controller",
  874           WM_T_ICH9,            WMP_F_1000T },
  875         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801J_R_BM_LM,
  876           "82567LM-2 LAN Controller",
  877           WM_T_ICH10,           WMP_F_1000T },
  878         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801J_R_BM_LF,
  879           "82567LF-2 LAN Controller",
  880           WM_T_ICH10,           WMP_F_1000T },
  881         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801J_D_BM_LM,
  882           "82567LM-3 LAN Controller",
  883           WM_T_ICH10,           WMP_F_1000T },
  884         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801J_D_BM_LF,
  885           "82567LF-3 LAN Controller",
  886           WM_T_ICH10,           WMP_F_1000T },
  887         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82801J_R_BM_V,
  888           "82567V-2 LAN Controller",
  889           WM_T_ICH10,           WMP_F_1000T },
  890         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_PCH_M_LM,
  891           "PCH LAN (82578LM) Controller",
  892           WM_T_PCH,             WMP_F_1000T },
  893         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_PCH_M_LC,
  894           "PCH LAN (82578LC) Controller",
  895           WM_T_PCH,             WMP_F_1000T },
  896         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_PCH_D_DM,
  897           "PCH LAN (82578DM) Controller",
  898           WM_T_PCH,             WMP_F_1000T },
  899         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_PCH_D_DC,
  900           "PCH LAN (82578DC) Controller",
  901           WM_T_PCH,             WMP_F_1000T },
  902         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82575EB_COPPER,
  903           "82575EB dual-1000baseT Ethernet",
  904           WM_T_82575,           WMP_F_1000T },
  905 #if 0
  906         /*
  907          * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
  908          * disabled for now ...
  909          */
  910         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
  911           "82575EB dual-1000baseX Ethernet (SERDES)",
  912           WM_T_82575,           WMP_F_SERDES },
  913 #endif
  914         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
  915           "82575GB quad-1000baseT Ethernet",
  916           WM_T_82575,           WMP_F_1000T },
  917         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
  918           "82575GB quad-1000baseT Ethernet (PM)",
  919           WM_T_82575,           WMP_F_1000T },
  920         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82576_COPPER,
  921           "82576 1000BaseT Ethernet",
  922           WM_T_82576,           WMP_F_1000T },
  923         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82576_FIBER,
  924           "82576 1000BaseX Ethernet",
  925           WM_T_82576,           WMP_F_1000X },
  926 #if 0
  927         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82576_SERDES,
  928           "82576 gigabit Ethernet (SERDES)",
  929           WM_T_82576,           WMP_F_SERDES },
  930 #endif
  931         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
  932           "82576 quad-1000BaseT Ethernet",
  933           WM_T_82576,           WMP_F_1000T },
  934         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82576_NS,
  935           "82576 gigabit Ethernet",
  936           WM_T_82576,           WMP_F_1000T },
  937 #if 0
  938         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82576_NS_SERDES,
  939           "82576 gigabit Ethernet (SERDES)",
  940           WM_T_82576,           WMP_F_SERDES },
  941         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
  942           "82576 quad-gigabit Ethernet (SERDES)",
  943           WM_T_82576,           WMP_F_SERDES },
  944 #endif
  945         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82580_COPPER,
  946           "82580 1000BaseT Ethernet",
  947           WM_T_82580,           WMP_F_1000T },
  948         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82580_FIBER,
  949           "82580 1000BaseX Ethernet",
  950           WM_T_82580,           WMP_F_1000X },
  951 #if 0
  952         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82580_SERDES,
  953           "82580 1000BaseT Ethernet (SERDES)",
  954           WM_T_82580,           WMP_F_SERDES },
  955 #endif
  956         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82580_SGMII,
  957           "82580 gigabit Ethernet (SGMII)",
  958           WM_T_82580,           WMP_F_1000T },
  959         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
  960           "82580 dual-1000BaseT Ethernet",
  961           WM_T_82580,           WMP_F_1000T },
  962         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82580_ER,
  963           "82580 1000BaseT Ethernet",
  964           WM_T_82580ER,         WMP_F_1000T },
  965         { PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82580_ER_DUAL,
  966           "82580 dual-1000BaseT Ethernet",
  967           WM_T_82580ER,         WMP_F_1000T },
  968         { 0,                    0,
  969           NULL,
  970           0,                    0 },
  971 };
  972 
  973 #ifdef WM_EVENT_COUNTERS
  974 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
  975 #endif /* WM_EVENT_COUNTERS */
  976 
  977 #if 0 /* Not currently used */
  978 static inline uint32_t
  979 wm_io_read(struct wm_softc *sc, int reg)
  980 {
  981 
  982         bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
  983         return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
  984 }
  985 #endif
  986 
  987 static inline void
  988 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
  989 {
  990 
  991         bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
  992         bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
  993 }
  994 
  995 static inline void
  996 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
  997     uint32_t data)
  998 {
  999         uint32_t regval;
 1000         int i;
 1001 
 1002         regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
 1003 
 1004         CSR_WRITE(sc, reg, regval);
 1005 
 1006         for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
 1007                 delay(5);
 1008                 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
 1009                         break;
 1010         }
 1011         if (i == SCTL_CTL_POLL_TIMEOUT) {
 1012                 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
 1013                     device_xname(sc->sc_dev), reg);
 1014         }
 1015 }
 1016 
 1017 static inline void
 1018 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
 1019 {
 1020         wa->wa_low = htole32(v & 0xffffffffU);
 1021         if (sizeof(bus_addr_t) == 8)
 1022                 wa->wa_high = htole32((uint64_t) v >> 32);
 1023         else
 1024                 wa->wa_high = 0;
 1025 }
 1026 
 1027 static void
 1028 wm_set_spiaddrbits(struct wm_softc *sc)
 1029 {
 1030         uint32_t reg;
 1031 
 1032         sc->sc_flags |= WM_F_EEPROM_SPI;
 1033         reg = CSR_READ(sc, WMREG_EECD);
 1034         sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
 1035 }
 1036 
 1037 static const struct wm_product *
 1038 wm_lookup(const struct pci_attach_args *pa)
 1039 {
 1040         const struct wm_product *wmp;
 1041 
 1042         for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
 1043                 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
 1044                     PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
 1045                         return wmp;
 1046         }
 1047         return NULL;
 1048 }
 1049 
 1050 static int
 1051 wm_match(device_t parent, cfdata_t cf, void *aux)
 1052 {
 1053         struct pci_attach_args *pa = aux;
 1054 
 1055         if (wm_lookup(pa) != NULL)
 1056                 return 1;
 1057 
 1058         return 0;
 1059 }
 1060 
 1061 static void
 1062 wm_attach(device_t parent, device_t self, void *aux)
 1063 {
 1064         struct wm_softc *sc = device_private(self);
 1065         struct pci_attach_args *pa = aux;
 1066         prop_dictionary_t dict;
 1067         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 1068         pci_chipset_tag_t pc = pa->pa_pc;
 1069         pci_intr_handle_t ih;
 1070         size_t cdata_size;
 1071         const char *intrstr = NULL;
 1072         const char *eetype, *xname;
 1073         bus_space_tag_t memt;
 1074         bus_space_handle_t memh;
 1075         bus_dma_segment_t seg;
 1076         int memh_valid;
 1077         int i, rseg, error;
 1078         const struct wm_product *wmp;
 1079         prop_data_t ea;
 1080         prop_number_t pn;
 1081         uint8_t enaddr[ETHER_ADDR_LEN];
 1082         uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
 1083         pcireg_t preg, memtype;
 1084         uint32_t reg;
 1085 
 1086         sc->sc_dev = self;
 1087         callout_init(&sc->sc_tick_ch, 0);
 1088 
 1089         wmp = wm_lookup(pa);
 1090         if (wmp == NULL) {
 1091                 printf("\n");
 1092                 panic("wm_attach: impossible");
 1093         }
 1094 
 1095         sc->sc_pc = pa->pa_pc;
 1096         sc->sc_pcitag = pa->pa_tag;
 1097 
 1098         if (pci_dma64_available(pa))
 1099                 sc->sc_dmat = pa->pa_dmat64;
 1100         else
 1101                 sc->sc_dmat = pa->pa_dmat;
 1102 
 1103         sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
 1104         aprint_naive(": Ethernet controller\n");
 1105         aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
 1106 
 1107         sc->sc_type = wmp->wmp_type;
 1108         if (sc->sc_type < WM_T_82543) {
 1109                 if (sc->sc_rev < 2) {
 1110                         aprint_error_dev(sc->sc_dev,
 1111                             "i82542 must be at least rev. 2\n");
 1112                         return;
 1113                 }
 1114                 if (sc->sc_rev < 3)
 1115                         sc->sc_type = WM_T_82542_2_0;
 1116         }
 1117 
 1118         if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
 1119             || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
 1120           sc->sc_flags |= WM_F_NEWQUEUE;
 1121 
 1122         /* Set device properties (mactype) */
 1123         dict = device_properties(sc->sc_dev);
 1124         prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
 1125 
 1126         /*
 1127          * Map the device.  All devices support memory-mapped acccess,
 1128          * and it is really required for normal operation.
 1129          */
 1130         memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
 1131         switch (memtype) {
 1132         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
 1133         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
 1134                 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
 1135                     memtype, 0, &memt, &memh, NULL, NULL) == 0);
 1136                 break;
 1137         default:
 1138                 memh_valid = 0;
 1139                 break;
 1140         }
 1141 
 1142         if (memh_valid) {
 1143                 sc->sc_st = memt;
 1144                 sc->sc_sh = memh;
 1145         } else {
 1146                 aprint_error_dev(sc->sc_dev,
 1147                     "unable to map device registers\n");
 1148                 return;
 1149         }
 1150 
 1151         /*
 1152          * In addition, i82544 and later support I/O mapped indirect
 1153          * register access.  It is not desirable (nor supported in
 1154          * this driver) to use it for normal operation, though it is
 1155          * required to work around bugs in some chip versions.
 1156          */
 1157         if (sc->sc_type >= WM_T_82544) {
 1158                 /* First we have to find the I/O BAR. */
 1159                 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
 1160                         if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
 1161                             PCI_MAPREG_TYPE_IO)
 1162                                 break;
 1163                 }
 1164                 if (i == PCI_MAPREG_END)
 1165                         aprint_error_dev(sc->sc_dev,
 1166                             "WARNING: unable to find I/O BAR\n");
 1167                 else {
 1168                         /*
 1169                          * The i8254x doesn't apparently respond when the
 1170                          * I/O BAR is 0, which looks somewhat like it's not
 1171                          * been configured.
 1172                          */
 1173                         preg = pci_conf_read(pc, pa->pa_tag, i);
 1174                         if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
 1175                                 aprint_error_dev(sc->sc_dev,
 1176                                     "WARNING: I/O BAR at zero.\n");
 1177                         } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
 1178                                         0, &sc->sc_iot, &sc->sc_ioh,
 1179                                         NULL, NULL) == 0) {
 1180                                 sc->sc_flags |= WM_F_IOH_VALID;
 1181                         } else {
 1182                                 aprint_error_dev(sc->sc_dev,
 1183                                     "WARNING: unable to map I/O space\n");
 1184                         }
 1185                 }
 1186 
 1187         }
 1188 
 1189         /* Enable bus mastering.  Disable MWI on the i82542 2.0. */
 1190         preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
 1191         preg |= PCI_COMMAND_MASTER_ENABLE;
 1192         if (sc->sc_type < WM_T_82542_2_1)
 1193                 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
 1194         pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
 1195 
 1196         /* power up chip */
 1197         if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
 1198             NULL)) && error != EOPNOTSUPP) {
 1199                 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
 1200                 return;
 1201         }
 1202 
 1203         /*
 1204          * Map and establish our interrupt.
 1205          */
 1206         if (pci_intr_map(pa, &ih)) {
 1207                 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
 1208                 return;
 1209         }
 1210         intrstr = pci_intr_string(pc, ih);
 1211         sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
 1212         if (sc->sc_ih == NULL) {
 1213                 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
 1214                 if (intrstr != NULL)
 1215                         aprint_error(" at %s", intrstr);
 1216                 aprint_error("\n");
 1217                 return;
 1218         }
 1219         aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
 1220 
 1221         /*
 1222          * Check the function ID (unit number of the chip).
 1223          */
 1224         if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
 1225             || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
 1226             || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
 1227                 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
 1228                     >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
 1229         else
 1230                 sc->sc_funcid = 0;
 1231 
 1232         /*
 1233          * Determine a few things about the bus we're connected to.
 1234          */
 1235         if (sc->sc_type < WM_T_82543) {
 1236                 /* We don't really know the bus characteristics here. */
 1237                 sc->sc_bus_speed = 33;
 1238         } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
 1239                 /*
 1240                  * CSA (Communication Streaming Architecture) is about as fast
 1241                  * a 32-bit 66MHz PCI Bus.
 1242                  */
 1243                 sc->sc_flags |= WM_F_CSA;
 1244                 sc->sc_bus_speed = 66;
 1245                 aprint_verbose_dev(sc->sc_dev,
 1246                     "Communication Streaming Architecture\n");
 1247                 if (sc->sc_type == WM_T_82547) {
 1248                         callout_init(&sc->sc_txfifo_ch, 0);
 1249                         callout_setfunc(&sc->sc_txfifo_ch,
 1250                                         wm_82547_txfifo_stall, sc);
 1251                         aprint_verbose_dev(sc->sc_dev,
 1252                             "using 82547 Tx FIFO stall work-around\n");
 1253                 }
 1254         } else if (sc->sc_type >= WM_T_82571) {
 1255                 sc->sc_flags |= WM_F_PCIE;
 1256                 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
 1257                     && (sc->sc_type != WM_T_ICH10)
 1258                     && (sc->sc_type != WM_T_PCH)) {
 1259                         sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
 1260                         /* ICH* and PCH have no PCIe capability registers */
 1261                         if (pci_get_capability(pa->pa_pc, pa->pa_tag,
 1262                                 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
 1263                                 NULL) == 0)
 1264                                 aprint_error_dev(sc->sc_dev,
 1265                                     "unable to find PCIe capability\n");
 1266                 }
 1267                 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
 1268         } else {
 1269                 reg = CSR_READ(sc, WMREG_STATUS);
 1270                 if (reg & STATUS_BUS64)
 1271                         sc->sc_flags |= WM_F_BUS64;
 1272                 if ((reg & STATUS_PCIX_MODE) != 0) {
 1273                         pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
 1274 
 1275                         sc->sc_flags |= WM_F_PCIX;
 1276                         if (pci_get_capability(pa->pa_pc, pa->pa_tag,
 1277                                 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
 1278                                 aprint_error_dev(sc->sc_dev,
 1279                                     "unable to find PCIX capability\n");
 1280                         else if (sc->sc_type != WM_T_82545_3 &&
 1281                                  sc->sc_type != WM_T_82546_3) {
 1282                                 /*
 1283                                  * Work around a problem caused by the BIOS
 1284                                  * setting the max memory read byte count
 1285                                  * incorrectly.
 1286                                  */
 1287                                 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
 1288                                     sc->sc_pcixe_capoff + PCI_PCIX_CMD);
 1289                                 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
 1290                                     sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
 1291 
 1292                                 bytecnt =
 1293                                     (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
 1294                                     PCI_PCIX_CMD_BYTECNT_SHIFT;
 1295                                 maxb =
 1296                                     (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
 1297                                     PCI_PCIX_STATUS_MAXB_SHIFT;
 1298                                 if (bytecnt > maxb) {
 1299                                         aprint_verbose_dev(sc->sc_dev,
 1300                                             "resetting PCI-X MMRBC: %d -> %d\n",
 1301                                             512 << bytecnt, 512 << maxb);
 1302                                         pcix_cmd = (pcix_cmd &
 1303                                             ~PCI_PCIX_CMD_BYTECNT_MASK) |
 1304                                            (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
 1305                                         pci_conf_write(pa->pa_pc, pa->pa_tag,
 1306                                             sc->sc_pcixe_capoff + PCI_PCIX_CMD,
 1307                                             pcix_cmd);
 1308                                 }
 1309                         }
 1310                 }
 1311                 /*
 1312                  * The quad port adapter is special; it has a PCIX-PCIX
 1313                  * bridge on the board, and can run the secondary bus at
 1314                  * a higher speed.
 1315                  */
 1316                 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
 1317                         sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
 1318                                                                       : 66;
 1319                 } else if (sc->sc_flags & WM_F_PCIX) {
 1320                         switch (reg & STATUS_PCIXSPD_MASK) {
 1321                         case STATUS_PCIXSPD_50_66:
 1322                                 sc->sc_bus_speed = 66;
 1323                                 break;
 1324                         case STATUS_PCIXSPD_66_100:
 1325                                 sc->sc_bus_speed = 100;
 1326                                 break;
 1327                         case STATUS_PCIXSPD_100_133:
 1328                                 sc->sc_bus_speed = 133;
 1329                                 break;
 1330                         default:
 1331                                 aprint_error_dev(sc->sc_dev,
 1332                                     "unknown PCIXSPD %d; assuming 66MHz\n",
 1333                                     reg & STATUS_PCIXSPD_MASK);
 1334                                 sc->sc_bus_speed = 66;
 1335                                 break;
 1336                         }
 1337                 } else
 1338                         sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
 1339                 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
 1340                     (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
 1341                     (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
 1342         }
 1343 
 1344         /*
 1345          * Allocate the control data structures, and create and load the
 1346          * DMA map for it.
 1347          *
 1348          * NOTE: All Tx descriptors must be in the same 4G segment of
 1349          * memory.  So must Rx descriptors.  We simplify by allocating
 1350          * both sets within the same 4G segment.
 1351          */
 1352         WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
 1353             WM_NTXDESC_82542 : WM_NTXDESC_82544;
 1354         cdata_size = sc->sc_type < WM_T_82544 ?
 1355             sizeof(struct wm_control_data_82542) :
 1356             sizeof(struct wm_control_data_82544);
 1357         if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
 1358                     (bus_size_t) 0x100000000ULL, &seg, 1, &rseg, 0)) != 0) {
 1359                 aprint_error_dev(sc->sc_dev,
 1360                     "unable to allocate control data, error = %d\n",
 1361                     error);
 1362                 goto fail_0;
 1363         }
 1364 
 1365         if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
 1366                     (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
 1367                 aprint_error_dev(sc->sc_dev,
 1368                     "unable to map control data, error = %d\n", error);
 1369                 goto fail_1;
 1370         }
 1371 
 1372         if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
 1373                     0, 0, &sc->sc_cddmamap)) != 0) {
 1374                 aprint_error_dev(sc->sc_dev,
 1375                     "unable to create control data DMA map, error = %d\n",
 1376                     error);
 1377                 goto fail_2;
 1378         }
 1379 
 1380         if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
 1381                     sc->sc_control_data, cdata_size, NULL, 0)) != 0) {
 1382                 aprint_error_dev(sc->sc_dev,
 1383                     "unable to load control data DMA map, error = %d\n",
 1384                     error);
 1385                 goto fail_3;
 1386         }
 1387 
 1388         /*
 1389          * Create the transmit buffer DMA maps.
 1390          */
 1391         WM_TXQUEUELEN(sc) =
 1392             (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
 1393             WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
 1394         for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
 1395                 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
 1396                             WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
 1397                             &sc->sc_txsoft[i].txs_dmamap)) != 0) {
 1398                         aprint_error_dev(sc->sc_dev,
 1399                             "unable to create Tx DMA map %d, error = %d\n",
 1400                             i, error);
 1401                         goto fail_4;
 1402                 }
 1403         }
 1404 
 1405         /*
 1406          * Create the receive buffer DMA maps.
 1407          */
 1408         for (i = 0; i < WM_NRXDESC; i++) {
 1409                 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
 1410                             MCLBYTES, 0, 0,
 1411                             &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
 1412                         aprint_error_dev(sc->sc_dev,
 1413                             "unable to create Rx DMA map %d error = %d\n",
 1414                             i, error);
 1415                         goto fail_5;
 1416                 }
 1417                 sc->sc_rxsoft[i].rxs_mbuf = NULL;
 1418         }
 1419 
 1420         /* clear interesting stat counters */
 1421         CSR_READ(sc, WMREG_COLC);
 1422         CSR_READ(sc, WMREG_RXERRC);
 1423 
 1424         /*
 1425          * Reset the chip to a known state.
 1426          */
 1427         wm_reset(sc);
 1428 
 1429         switch (sc->sc_type) {
 1430         case WM_T_82571:
 1431         case WM_T_82572:
 1432         case WM_T_82573:
 1433         case WM_T_82574:
 1434         case WM_T_82583:
 1435         case WM_T_80003:
 1436         case WM_T_ICH8:
 1437         case WM_T_ICH9:
 1438         case WM_T_ICH10:
 1439         case WM_T_PCH:
 1440                 if (wm_check_mng_mode(sc) != 0)
 1441                         wm_get_hw_control(sc);
 1442                 break;
 1443         default:
 1444                 break;
 1445         }
 1446 
 1447         /*
 1448          * Get some information about the EEPROM.
 1449          */
 1450         switch (sc->sc_type) {
 1451         case WM_T_82542_2_0:
 1452         case WM_T_82542_2_1:
 1453         case WM_T_82543:
 1454         case WM_T_82544:
 1455                 /* Microwire */
 1456                 sc->sc_ee_addrbits = 6;
 1457                 break;
 1458         case WM_T_82540:
 1459         case WM_T_82545:
 1460         case WM_T_82545_3:
 1461         case WM_T_82546:
 1462         case WM_T_82546_3:
 1463                 /* Microwire */
 1464                 reg = CSR_READ(sc, WMREG_EECD);
 1465                 if (reg & EECD_EE_SIZE)
 1466                         sc->sc_ee_addrbits = 8;
 1467                 else
 1468                         sc->sc_ee_addrbits = 6;
 1469                 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
 1470                 break;
 1471         case WM_T_82541:
 1472         case WM_T_82541_2:
 1473         case WM_T_82547:
 1474         case WM_T_82547_2:
 1475                 reg = CSR_READ(sc, WMREG_EECD);
 1476                 if (reg & EECD_EE_TYPE) {
 1477                         /* SPI */
 1478                         wm_set_spiaddrbits(sc);
 1479                 } else
 1480                         /* Microwire */
 1481                         sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
 1482                 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
 1483                 break;
 1484         case WM_T_82571:
 1485         case WM_T_82572:
 1486                 /* SPI */
 1487                 wm_set_spiaddrbits(sc);
 1488                 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
 1489                 break;
 1490         case WM_T_82573:
 1491         case WM_T_82574:
 1492         case WM_T_82583:
 1493                 if (wm_is_onboard_nvm_eeprom(sc) == 0)
 1494                         sc->sc_flags |= WM_F_EEPROM_FLASH;
 1495                 else {
 1496                         /* SPI */
 1497                         wm_set_spiaddrbits(sc);
 1498                 }
 1499                 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
 1500                 break;
 1501         case WM_T_82575:
 1502         case WM_T_82576:
 1503         case WM_T_82580:
 1504         case WM_T_82580ER:
 1505         case WM_T_80003:
 1506                 /* SPI */
 1507                 wm_set_spiaddrbits(sc);
 1508                 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
 1509                 break;
 1510         case WM_T_ICH8:
 1511         case WM_T_ICH9:
 1512         case WM_T_ICH10:
 1513         case WM_T_PCH:
 1514                 /* FLASH */
 1515                 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
 1516                 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
 1517                 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
 1518                     &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
 1519                         aprint_error_dev(sc->sc_dev,
 1520                             "can't map FLASH registers\n");
 1521                         return;
 1522                 }
 1523                 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
 1524                 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
 1525                                                 ICH_FLASH_SECTOR_SIZE;
 1526                 sc->sc_ich8_flash_bank_size =
 1527                     ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
 1528                 sc->sc_ich8_flash_bank_size -=
 1529                     (reg & ICH_GFPREG_BASE_MASK);
 1530                 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
 1531                 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
 1532                 break;
 1533         default:
 1534                 break;
 1535         }
 1536 
 1537         /*
 1538          * Defer printing the EEPROM type until after verifying the checksum
 1539          * This allows the EEPROM type to be printed correctly in the case
 1540          * that no EEPROM is attached.
 1541          */
 1542         /*
 1543          * Validate the EEPROM checksum. If the checksum fails, flag
 1544          * this for later, so we can fail future reads from the EEPROM.
 1545          */
 1546         if (wm_validate_eeprom_checksum(sc)) {
 1547                 /*
 1548                  * Read twice again because some PCI-e parts fail the
 1549                  * first check due to the link being in sleep state.
 1550                  */
 1551                 if (wm_validate_eeprom_checksum(sc))
 1552                         sc->sc_flags |= WM_F_EEPROM_INVALID;
 1553         }
 1554 
 1555         /* Set device properties (macflags) */
 1556         prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
 1557 
 1558         if (sc->sc_flags & WM_F_EEPROM_INVALID)
 1559                 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
 1560         else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
 1561                 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
 1562         } else {
 1563                 if (sc->sc_flags & WM_F_EEPROM_SPI)
 1564                         eetype = "SPI";
 1565                 else
 1566                         eetype = "MicroWire";
 1567                 aprint_verbose_dev(sc->sc_dev,
 1568                     "%u word (%d address bits) %s EEPROM\n",
 1569                     1U << sc->sc_ee_addrbits,
 1570                     sc->sc_ee_addrbits, eetype);
 1571         }
 1572 
 1573         /*
 1574          * Read the Ethernet address from the EEPROM, if not first found
 1575          * in device properties.
 1576          */
 1577         ea = prop_dictionary_get(dict, "mac-addr");
 1578         if (ea != NULL) {
 1579                 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
 1580                 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
 1581                 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
 1582         } else {
 1583                 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
 1584                     sizeof(myea) / sizeof(myea[0]), myea)) {
 1585                         aprint_error_dev(sc->sc_dev,
 1586                             "unable to read Ethernet address\n");
 1587                         return;
 1588                 }
 1589                 enaddr[0] = myea[0] & 0xff;
 1590                 enaddr[1] = myea[0] >> 8;
 1591                 enaddr[2] = myea[1] & 0xff;
 1592                 enaddr[3] = myea[1] >> 8;
 1593                 enaddr[4] = myea[2] & 0xff;
 1594                 enaddr[5] = myea[2] >> 8;
 1595         }
 1596 
 1597         /*
 1598          * Toggle the LSB of the MAC address on the second port
 1599          * of the dual port controller.
 1600          */
 1601         if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
 1602             || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
 1603             || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
 1604                 if (sc->sc_funcid == 1)
 1605                         enaddr[5] ^= 1;
 1606         }
 1607 
 1608         aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
 1609             ether_sprintf(enaddr));
 1610 
 1611         /*
 1612          * Read the config info from the EEPROM, and set up various
 1613          * bits in the control registers based on their contents.
 1614          */
 1615         pn = prop_dictionary_get(dict, "i82543-cfg1");
 1616         if (pn != NULL) {
 1617                 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
 1618                 cfg1 = (uint16_t) prop_number_integer_value(pn);
 1619         } else {
 1620                 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
 1621                         aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
 1622                         return;
 1623                 }
 1624         }
 1625 
 1626         pn = prop_dictionary_get(dict, "i82543-cfg2");
 1627         if (pn != NULL) {
 1628                 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
 1629                 cfg2 = (uint16_t) prop_number_integer_value(pn);
 1630         } else {
 1631                 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
 1632                         aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
 1633                         return;
 1634                 }
 1635         }
 1636 
 1637         if (sc->sc_type >= WM_T_82544) {
 1638                 pn = prop_dictionary_get(dict, "i82543-swdpin");
 1639                 if (pn != NULL) {
 1640                         KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
 1641                         swdpin = (uint16_t) prop_number_integer_value(pn);
 1642                 } else {
 1643                         if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
 1644                                 aprint_error_dev(sc->sc_dev,
 1645                                     "unable to read SWDPIN\n");
 1646                                 return;
 1647                         }
 1648                 }
 1649         }
 1650 
 1651         if (cfg1 & EEPROM_CFG1_ILOS)
 1652                 sc->sc_ctrl |= CTRL_ILOS;
 1653         if (sc->sc_type >= WM_T_82544) {
 1654                 sc->sc_ctrl |=
 1655                     ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
 1656                     CTRL_SWDPIO_SHIFT;
 1657                 sc->sc_ctrl |=
 1658                     ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
 1659                     CTRL_SWDPINS_SHIFT;
 1660         } else {
 1661                 sc->sc_ctrl |=
 1662                     ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
 1663                     CTRL_SWDPIO_SHIFT;
 1664         }
 1665 
 1666 #if 0
 1667         if (sc->sc_type >= WM_T_82544) {
 1668                 if (cfg1 & EEPROM_CFG1_IPS0)
 1669                         sc->sc_ctrl_ext |= CTRL_EXT_IPS;
 1670                 if (cfg1 & EEPROM_CFG1_IPS1)
 1671                         sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
 1672                 sc->sc_ctrl_ext |=
 1673                     ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
 1674                     CTRL_EXT_SWDPIO_SHIFT;
 1675                 sc->sc_ctrl_ext |=
 1676                     ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
 1677                     CTRL_EXT_SWDPINS_SHIFT;
 1678         } else {
 1679                 sc->sc_ctrl_ext |=
 1680                     ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
 1681                     CTRL_EXT_SWDPIO_SHIFT;
 1682         }
 1683 #endif
 1684 
 1685         CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 1686 #if 0
 1687         CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
 1688 #endif
 1689 
 1690         /*
 1691          * Set up some register offsets that are different between
 1692          * the i82542 and the i82543 and later chips.
 1693          */
 1694         if (sc->sc_type < WM_T_82543) {
 1695                 sc->sc_rdt_reg = WMREG_OLD_RDT0;
 1696                 sc->sc_tdt_reg = WMREG_OLD_TDT;
 1697         } else {
 1698                 sc->sc_rdt_reg = WMREG_RDT;
 1699                 sc->sc_tdt_reg = WMREG_TDT;
 1700         }
 1701 
 1702         if (sc->sc_type == WM_T_PCH) {
 1703                 uint16_t val;
 1704 
 1705                 /* Save the NVM K1 bit setting */
 1706                 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
 1707 
 1708                 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
 1709                         sc->sc_nvm_k1_enabled = 1;
 1710                 else
 1711                         sc->sc_nvm_k1_enabled = 0;
 1712         }
 1713 
 1714         /*
 1715          * Determine if we're TBI,GMII or SGMII mode, and initialize the
 1716          * media structures accordingly.
 1717          */
 1718         if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
 1719             || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
 1720             || sc->sc_type == WM_T_82573
 1721             || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
 1722                 /* STATUS_TBIMODE reserved/reused, can't rely on it */
 1723                 wm_gmii_mediainit(sc, wmp->wmp_product);
 1724         } else if (sc->sc_type < WM_T_82543 ||
 1725             (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
 1726                 if (wmp->wmp_flags & WMP_F_1000T)
 1727                         aprint_error_dev(sc->sc_dev,
 1728                             "WARNING: TBIMODE set on 1000BASE-T product!\n");
 1729                 wm_tbi_mediainit(sc);
 1730         } else {
 1731                 switch (sc->sc_type) {
 1732                 case WM_T_82575:
 1733                 case WM_T_82576:
 1734                 case WM_T_82580:
 1735                 case WM_T_82580ER:
 1736                         reg = CSR_READ(sc, WMREG_CTRL_EXT);
 1737                         switch (reg & CTRL_EXT_LINK_MODE_MASK) {
 1738                         case CTRL_EXT_LINK_MODE_SGMII:
 1739                                 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
 1740                                 sc->sc_flags |= WM_F_SGMII;
 1741                                 CSR_WRITE(sc, WMREG_CTRL_EXT,
 1742                                     reg | CTRL_EXT_I2C_ENA);
 1743                                 wm_gmii_mediainit(sc, wmp->wmp_product);
 1744                                 break;
 1745                         case CTRL_EXT_LINK_MODE_1000KX:
 1746                         case CTRL_EXT_LINK_MODE_PCIE_SERDES:
 1747                                 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
 1748                                 CSR_WRITE(sc, WMREG_CTRL_EXT,
 1749                                     reg | CTRL_EXT_I2C_ENA);
 1750                                 panic("not supported yet\n");
 1751                                 break;
 1752                         case CTRL_EXT_LINK_MODE_GMII:
 1753                         default:
 1754                                 CSR_WRITE(sc, WMREG_CTRL_EXT,
 1755                                     reg & ~CTRL_EXT_I2C_ENA);
 1756                                 wm_gmii_mediainit(sc, wmp->wmp_product);
 1757                                 break;
 1758                         }
 1759                         break;
 1760                 default:
 1761                         if (wmp->wmp_flags & WMP_F_1000X)
 1762                                 aprint_error_dev(sc->sc_dev,
 1763                                     "WARNING: TBIMODE clear on 1000BASE-X product!\n");
 1764                         wm_gmii_mediainit(sc, wmp->wmp_product);
 1765                 }
 1766         }
 1767 
 1768         ifp = &sc->sc_ethercom.ec_if;
 1769         xname = device_xname(sc->sc_dev);
 1770         strlcpy(ifp->if_xname, xname, IFNAMSIZ);
 1771         ifp->if_softc = sc;
 1772         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1773         ifp->if_ioctl = wm_ioctl;
 1774         ifp->if_start = wm_start;
 1775         ifp->if_watchdog = wm_watchdog;
 1776         ifp->if_init = wm_init;
 1777         ifp->if_stop = wm_stop;
 1778         IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
 1779         IFQ_SET_READY(&ifp->if_snd);
 1780 
 1781         /* Check for jumbo frame */
 1782         switch (sc->sc_type) {
 1783         case WM_T_82573:
 1784                 /* XXX limited to 9234 if ASPM is disabled */
 1785                 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
 1786                 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
 1787                         sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
 1788                 break;
 1789         case WM_T_82571:
 1790         case WM_T_82572:
 1791         case WM_T_82574:
 1792         case WM_T_82575:
 1793         case WM_T_82576:
 1794         case WM_T_82580:
 1795         case WM_T_82580ER:
 1796         case WM_T_80003:
 1797         case WM_T_ICH9:
 1798         case WM_T_ICH10:
 1799                 /* XXX limited to 9234 */
 1800                 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
 1801                 break;
 1802         case WM_T_PCH:
 1803                 /* XXX limited to 4096 */
 1804                 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
 1805                 break;
 1806         case WM_T_82542_2_0:
 1807         case WM_T_82542_2_1:
 1808         case WM_T_82583:
 1809         case WM_T_ICH8:
 1810                 /* No support for jumbo frame */
 1811                 break;
 1812         default:
 1813                 /* ETHER_MAX_LEN_JUMBO */
 1814                 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
 1815                 break;
 1816         }
 1817 
 1818         /*
 1819          * If we're a i82543 or greater, we can support VLANs.
 1820          */
 1821         if (sc->sc_type >= WM_T_82543)
 1822                 sc->sc_ethercom.ec_capabilities |=
 1823                     ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
 1824 
 1825         /*
 1826          * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
 1827          * on i82543 and later.
 1828          */
 1829         if (sc->sc_type >= WM_T_82543) {
 1830                 ifp->if_capabilities |=
 1831                     IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
 1832                     IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
 1833                     IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
 1834                     IFCAP_CSUM_TCPv6_Tx |
 1835                     IFCAP_CSUM_UDPv6_Tx;
 1836         }
 1837 
 1838         /*
 1839          * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
 1840          *
 1841          *      82541GI (8086:1076) ... no
 1842          *      82572EI (8086:10b9) ... yes
 1843          */
 1844         if (sc->sc_type >= WM_T_82571) {
 1845                 ifp->if_capabilities |=
 1846                     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
 1847         }
 1848 
 1849         /*
 1850          * If we're a i82544 or greater (except i82547), we can do
 1851          * TCP segmentation offload.
 1852          */
 1853         if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
 1854                 ifp->if_capabilities |= IFCAP_TSOv4;
 1855         }
 1856 
 1857         if (sc->sc_type >= WM_T_82571) {
 1858                 ifp->if_capabilities |= IFCAP_TSOv6;
 1859         }
 1860 
 1861         /*
 1862          * Attach the interface.
 1863          */
 1864         if_attach(ifp);
 1865         ether_ifattach(ifp, enaddr);
 1866 #if NRND > 0
 1867         rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
 1868 #endif
 1869 
 1870 #ifdef WM_EVENT_COUNTERS
 1871         /* Attach event counters. */
 1872         evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
 1873             NULL, xname, "txsstall");
 1874         evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
 1875             NULL, xname, "txdstall");
 1876         evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
 1877             NULL, xname, "txfifo_stall");
 1878         evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
 1879             NULL, xname, "txdw");
 1880         evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
 1881             NULL, xname, "txqe");
 1882         evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
 1883             NULL, xname, "rxintr");
 1884         evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
 1885             NULL, xname, "linkintr");
 1886 
 1887         evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
 1888             NULL, xname, "rxipsum");
 1889         evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
 1890             NULL, xname, "rxtusum");
 1891         evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
 1892             NULL, xname, "txipsum");
 1893         evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
 1894             NULL, xname, "txtusum");
 1895         evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
 1896             NULL, xname, "txtusum6");
 1897 
 1898         evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
 1899             NULL, xname, "txtso");
 1900         evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
 1901             NULL, xname, "txtso6");
 1902         evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
 1903             NULL, xname, "txtsopain");
 1904 
 1905         for (i = 0; i < WM_NTXSEGS; i++) {
 1906                 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
 1907                 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
 1908                     NULL, xname, wm_txseg_evcnt_names[i]);
 1909         }
 1910 
 1911         evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
 1912             NULL, xname, "txdrop");
 1913 
 1914         evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
 1915             NULL, xname, "tu");
 1916 
 1917         evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
 1918             NULL, xname, "tx_xoff");
 1919         evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
 1920             NULL, xname, "tx_xon");
 1921         evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
 1922             NULL, xname, "rx_xoff");
 1923         evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
 1924             NULL, xname, "rx_xon");
 1925         evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
 1926             NULL, xname, "rx_macctl");
 1927 #endif /* WM_EVENT_COUNTERS */
 1928 
 1929         if (!pmf_device_register(self, NULL, NULL))
 1930                 aprint_error_dev(self, "couldn't establish power handler\n");
 1931         else
 1932                 pmf_class_network_register(self, ifp);
 1933 
 1934         return;
 1935 
 1936         /*
 1937          * Free any resources we've allocated during the failed attach
 1938          * attempt.  Do this in reverse order and fall through.
 1939          */
 1940  fail_5:
 1941         for (i = 0; i < WM_NRXDESC; i++) {
 1942                 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
 1943                         bus_dmamap_destroy(sc->sc_dmat,
 1944                             sc->sc_rxsoft[i].rxs_dmamap);
 1945         }
 1946  fail_4:
 1947         for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
 1948                 if (sc->sc_txsoft[i].txs_dmamap != NULL)
 1949                         bus_dmamap_destroy(sc->sc_dmat,
 1950                             sc->sc_txsoft[i].txs_dmamap);
 1951         }
 1952         bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
 1953  fail_3:
 1954         bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
 1955  fail_2:
 1956         bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
 1957             cdata_size);
 1958  fail_1:
 1959         bus_dmamem_free(sc->sc_dmat, &seg, rseg);
 1960  fail_0:
 1961         return;
 1962 }
 1963 
 1964 /*
 1965  * wm_tx_offload:
 1966  *
 1967  *      Set up TCP/IP checksumming parameters for the
 1968  *      specified packet.
 1969  */
 1970 static int
 1971 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
 1972     uint8_t *fieldsp)
 1973 {
 1974         struct mbuf *m0 = txs->txs_mbuf;
 1975         struct livengood_tcpip_ctxdesc *t;
 1976         uint32_t ipcs, tucs, cmd, cmdlen, seg;
 1977         uint32_t ipcse;
 1978         struct ether_header *eh;
 1979         int offset, iphl;
 1980         uint8_t fields;
 1981 
 1982         /*
 1983          * XXX It would be nice if the mbuf pkthdr had offset
 1984          * fields for the protocol headers.
 1985          */
 1986 
 1987         eh = mtod(m0, struct ether_header *);
 1988         switch (htons(eh->ether_type)) {
 1989         case ETHERTYPE_IP:
 1990         case ETHERTYPE_IPV6:
 1991                 offset = ETHER_HDR_LEN;
 1992                 break;
 1993 
 1994         case ETHERTYPE_VLAN:
 1995                 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 1996                 break;
 1997 
 1998         default:
 1999                 /*
 2000                  * Don't support this protocol or encapsulation.
 2001                  */
 2002                 *fieldsp = 0;
 2003                 *cmdp = 0;
 2004                 return 0;
 2005         }
 2006 
 2007         if ((m0->m_pkthdr.csum_flags &
 2008             (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
 2009                 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
 2010         } else {
 2011                 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
 2012         }
 2013         ipcse = offset + iphl - 1;
 2014 
 2015         cmd = WTX_CMD_DEXT | WTX_DTYP_D;
 2016         cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
 2017         seg = 0;
 2018         fields = 0;
 2019 
 2020         if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
 2021                 int hlen = offset + iphl;
 2022                 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
 2023 
 2024                 if (__predict_false(m0->m_len <
 2025                                     (hlen + sizeof(struct tcphdr)))) {
 2026                         /*
 2027                          * TCP/IP headers are not in the first mbuf; we need
 2028                          * to do this the slow and painful way.  Let's just
 2029                          * hope this doesn't happen very often.
 2030                          */
 2031                         struct tcphdr th;
 2032 
 2033                         WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
 2034 
 2035                         m_copydata(m0, hlen, sizeof(th), &th);
 2036                         if (v4) {
 2037                                 struct ip ip;
 2038 
 2039                                 m_copydata(m0, offset, sizeof(ip), &ip);
 2040                                 ip.ip_len = 0;
 2041                                 m_copyback(m0,
 2042                                     offset + offsetof(struct ip, ip_len),
 2043                                     sizeof(ip.ip_len), &ip.ip_len);
 2044                                 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
 2045                                     ip.ip_dst.s_addr, htons(IPPROTO_TCP));
 2046                         } else {
 2047                                 struct ip6_hdr ip6;
 2048 
 2049                                 m_copydata(m0, offset, sizeof(ip6), &ip6);
 2050                                 ip6.ip6_plen = 0;
 2051                                 m_copyback(m0,
 2052                                     offset + offsetof(struct ip6_hdr, ip6_plen),
 2053                                     sizeof(ip6.ip6_plen), &ip6.ip6_plen);
 2054                                 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
 2055                                     &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
 2056                         }
 2057                         m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
 2058                             sizeof(th.th_sum), &th.th_sum);
 2059 
 2060                         hlen += th.th_off << 2;
 2061                 } else {
 2062                         /*
 2063                          * TCP/IP headers are in the first mbuf; we can do
 2064                          * this the easy way.
 2065                          */
 2066                         struct tcphdr *th;
 2067 
 2068                         if (v4) {
 2069                                 struct ip *ip =
 2070                                     (void *)(mtod(m0, char *) + offset);
 2071                                 th = (void *)(mtod(m0, char *) + hlen);
 2072 
 2073                                 ip->ip_len = 0;
 2074                                 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
 2075                                     ip->ip_dst.s_addr, htons(IPPROTO_TCP));
 2076                         } else {
 2077                                 struct ip6_hdr *ip6 =
 2078                                     (void *)(mtod(m0, char *) + offset);
 2079                                 th = (void *)(mtod(m0, char *) + hlen);
 2080 
 2081                                 ip6->ip6_plen = 0;
 2082                                 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
 2083                                     &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
 2084                         }
 2085                         hlen += th->th_off << 2;
 2086                 }
 2087 
 2088                 if (v4) {
 2089                         WM_EVCNT_INCR(&sc->sc_ev_txtso);
 2090                         cmdlen |= WTX_TCPIP_CMD_IP;
 2091                 } else {
 2092                         WM_EVCNT_INCR(&sc->sc_ev_txtso6);
 2093                         ipcse = 0;
 2094                 }
 2095                 cmd |= WTX_TCPIP_CMD_TSE;
 2096                 cmdlen |= WTX_TCPIP_CMD_TSE |
 2097                     WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
 2098                 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
 2099                     WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
 2100         }
 2101 
 2102         /*
 2103          * NOTE: Even if we're not using the IP or TCP/UDP checksum
 2104          * offload feature, if we load the context descriptor, we
 2105          * MUST provide valid values for IPCSS and TUCSS fields.
 2106          */
 2107 
 2108         ipcs = WTX_TCPIP_IPCSS(offset) |
 2109             WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
 2110             WTX_TCPIP_IPCSE(ipcse);
 2111         if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
 2112                 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
 2113                 fields |= WTX_IXSM;
 2114         }
 2115 
 2116         offset += iphl;
 2117 
 2118         if (m0->m_pkthdr.csum_flags &
 2119             (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
 2120                 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
 2121                 fields |= WTX_TXSM;
 2122                 tucs = WTX_TCPIP_TUCSS(offset) |
 2123                     WTX_TCPIP_TUCSO(offset +
 2124                     M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
 2125                     WTX_TCPIP_TUCSE(0) /* rest of packet */;
 2126         } else if ((m0->m_pkthdr.csum_flags &
 2127             (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
 2128                 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
 2129                 fields |= WTX_TXSM;
 2130                 tucs = WTX_TCPIP_TUCSS(offset) |
 2131                     WTX_TCPIP_TUCSO(offset +
 2132                     M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
 2133                     WTX_TCPIP_TUCSE(0) /* rest of packet */;
 2134         } else {
 2135                 /* Just initialize it to a valid TCP context. */
 2136                 tucs = WTX_TCPIP_TUCSS(offset) |
 2137                     WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
 2138                     WTX_TCPIP_TUCSE(0) /* rest of packet */;
 2139         }
 2140 
 2141         /* Fill in the context descriptor. */
 2142         t = (struct livengood_tcpip_ctxdesc *)
 2143             &sc->sc_txdescs[sc->sc_txnext];
 2144         t->tcpip_ipcs = htole32(ipcs);
 2145         t->tcpip_tucs = htole32(tucs);
 2146         t->tcpip_cmdlen = htole32(cmdlen);
 2147         t->tcpip_seg = htole32(seg);
 2148         WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
 2149 
 2150         sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
 2151         txs->txs_ndesc++;
 2152 
 2153         *cmdp = cmd;
 2154         *fieldsp = fields;
 2155 
 2156         return 0;
 2157 }
 2158 
 2159 static void
 2160 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
 2161 {
 2162         struct mbuf *m;
 2163         int i;
 2164 
 2165         log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
 2166         for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
 2167                 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
 2168                     "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
 2169                     m->m_data, m->m_len, m->m_flags);
 2170         log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
 2171             i, i == 1 ? "" : "s");
 2172 }
 2173 
 2174 /*
 2175  * wm_82547_txfifo_stall:
 2176  *
 2177  *      Callout used to wait for the 82547 Tx FIFO to drain,
 2178  *      reset the FIFO pointers, and restart packet transmission.
 2179  */
 2180 static void
 2181 wm_82547_txfifo_stall(void *arg)
 2182 {
 2183         struct wm_softc *sc = arg;
 2184         int s;
 2185 
 2186         s = splnet();
 2187 
 2188         if (sc->sc_txfifo_stall) {
 2189                 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
 2190                     CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
 2191                     CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
 2192                         /*
 2193                          * Packets have drained.  Stop transmitter, reset
 2194                          * FIFO pointers, restart transmitter, and kick
 2195                          * the packet queue.
 2196                          */
 2197                         uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
 2198                         CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
 2199                         CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
 2200                         CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
 2201                         CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
 2202                         CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
 2203                         CSR_WRITE(sc, WMREG_TCTL, tctl);
 2204                         CSR_WRITE_FLUSH(sc);
 2205 
 2206                         sc->sc_txfifo_head = 0;
 2207                         sc->sc_txfifo_stall = 0;
 2208                         wm_start(&sc->sc_ethercom.ec_if);
 2209                 } else {
 2210                         /*
 2211                          * Still waiting for packets to drain; try again in
 2212                          * another tick.
 2213                          */
 2214                         callout_schedule(&sc->sc_txfifo_ch, 1);
 2215                 }
 2216         }
 2217 
 2218         splx(s);
 2219 }
 2220 
 2221 /*
 2222  * wm_82547_txfifo_bugchk:
 2223  *
 2224  *      Check for bug condition in the 82547 Tx FIFO.  We need to
 2225  *      prevent enqueueing a packet that would wrap around the end
 2226  *      if the Tx FIFO ring buffer, otherwise the chip will croak.
 2227  *
 2228  *      We do this by checking the amount of space before the end
 2229  *      of the Tx FIFO buffer.  If the packet will not fit, we "stall"
 2230  *      the Tx FIFO, wait for all remaining packets to drain, reset
 2231  *      the internal FIFO pointers to the beginning, and restart
 2232  *      transmission on the interface.
 2233  */
 2234 #define WM_FIFO_HDR             0x10
 2235 #define WM_82547_PAD_LEN        0x3e0
 2236 static int
 2237 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
 2238 {
 2239         int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
 2240         int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
 2241 
 2242         /* Just return if already stalled. */
 2243         if (sc->sc_txfifo_stall)
 2244                 return 1;
 2245 
 2246         if (sc->sc_mii.mii_media_active & IFM_FDX) {
 2247                 /* Stall only occurs in half-duplex mode. */
 2248                 goto send_packet;
 2249         }
 2250 
 2251         if (len >= WM_82547_PAD_LEN + space) {
 2252                 sc->sc_txfifo_stall = 1;
 2253                 callout_schedule(&sc->sc_txfifo_ch, 1);
 2254                 return 1;
 2255         }
 2256 
 2257  send_packet:
 2258         sc->sc_txfifo_head += len;
 2259         if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
 2260                 sc->sc_txfifo_head -= sc->sc_txfifo_size;
 2261 
 2262         return 0;
 2263 }
 2264 
 2265 /*
 2266  * wm_start:            [ifnet interface function]
 2267  *
 2268  *      Start packet transmission on the interface.
 2269  */
 2270 static void
 2271 wm_start(struct ifnet *ifp)
 2272 {
 2273         struct wm_softc *sc = ifp->if_softc;
 2274         struct mbuf *m0;
 2275         struct m_tag *mtag;
 2276         struct wm_txsoft *txs;
 2277         bus_dmamap_t dmamap;
 2278         int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
 2279         bus_addr_t curaddr;
 2280         bus_size_t seglen, curlen;
 2281         uint32_t cksumcmd;
 2282         uint8_t cksumfields;
 2283 
 2284         if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
 2285                 return;
 2286 
 2287         /*
 2288          * Remember the previous number of free descriptors.
 2289          */
 2290         ofree = sc->sc_txfree;
 2291 
 2292         /*
 2293          * Loop through the send queue, setting up transmit descriptors
 2294          * until we drain the queue, or use up all available transmit
 2295          * descriptors.
 2296          */
 2297         for (;;) {
 2298                 /* Grab a packet off the queue. */
 2299                 IFQ_POLL(&ifp->if_snd, m0);
 2300                 if (m0 == NULL)
 2301                         break;
 2302 
 2303                 DPRINTF(WM_DEBUG_TX,
 2304                     ("%s: TX: have packet to transmit: %p\n",
 2305                     device_xname(sc->sc_dev), m0));
 2306 
 2307                 /* Get a work queue entry. */
 2308                 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
 2309                         wm_txintr(sc);
 2310                         if (sc->sc_txsfree == 0) {
 2311                                 DPRINTF(WM_DEBUG_TX,
 2312                                     ("%s: TX: no free job descriptors\n",
 2313                                         device_xname(sc->sc_dev)));
 2314                                 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
 2315                                 break;
 2316                         }
 2317                 }
 2318 
 2319                 txs = &sc->sc_txsoft[sc->sc_txsnext];
 2320                 dmamap = txs->txs_dmamap;
 2321 
 2322                 use_tso = (m0->m_pkthdr.csum_flags &
 2323                     (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
 2324 
 2325                 /*
 2326                  * So says the Linux driver:
 2327                  * The controller does a simple calculation to make sure
 2328                  * there is enough room in the FIFO before initiating the
 2329                  * DMA for each buffer.  The calc is:
 2330                  *      4 = ceil(buffer len / MSS)
 2331                  * To make sure we don't overrun the FIFO, adjust the max
 2332                  * buffer len if the MSS drops.
 2333                  */
 2334                 dmamap->dm_maxsegsz =
 2335                     (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
 2336                     ? m0->m_pkthdr.segsz << 2
 2337                     : WTX_MAX_LEN;
 2338 
 2339                 /*
 2340                  * Load the DMA map.  If this fails, the packet either
 2341                  * didn't fit in the allotted number of segments, or we
 2342                  * were short on resources.  For the too-many-segments
 2343                  * case, we simply report an error and drop the packet,
 2344                  * since we can't sanely copy a jumbo packet to a single
 2345                  * buffer.
 2346                  */
 2347                 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
 2348                     BUS_DMA_WRITE|BUS_DMA_NOWAIT);
 2349                 if (error) {
 2350                         if (error == EFBIG) {
 2351                                 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
 2352                                 log(LOG_ERR, "%s: Tx packet consumes too many "
 2353                                     "DMA segments, dropping...\n",
 2354                                     device_xname(sc->sc_dev));
 2355                                 IFQ_DEQUEUE(&ifp->if_snd, m0);
 2356                                 wm_dump_mbuf_chain(sc, m0);
 2357                                 m_freem(m0);
 2358                                 continue;
 2359                         }
 2360                         /*
 2361                          * Short on resources, just stop for now.
 2362                          */
 2363                         DPRINTF(WM_DEBUG_TX,
 2364                             ("%s: TX: dmamap load failed: %d\n",
 2365                             device_xname(sc->sc_dev), error));
 2366                         break;
 2367                 }
 2368 
 2369                 segs_needed = dmamap->dm_nsegs;
 2370                 if (use_tso) {
 2371                         /* For sentinel descriptor; see below. */
 2372                         segs_needed++;
 2373                 }
 2374 
 2375                 /*
 2376                  * Ensure we have enough descriptors free to describe
 2377                  * the packet.  Note, we always reserve one descriptor
 2378                  * at the end of the ring due to the semantics of the
 2379                  * TDT register, plus one more in the event we need
 2380                  * to load offload context.
 2381                  */
 2382                 if (segs_needed > sc->sc_txfree - 2) {
 2383                         /*
 2384                          * Not enough free descriptors to transmit this
 2385                          * packet.  We haven't committed anything yet,
 2386                          * so just unload the DMA map, put the packet
 2387                          * pack on the queue, and punt.  Notify the upper
 2388                          * layer that there are no more slots left.
 2389                          */
 2390                         DPRINTF(WM_DEBUG_TX,
 2391                             ("%s: TX: need %d (%d) descriptors, have %d\n",
 2392                             device_xname(sc->sc_dev), dmamap->dm_nsegs,
 2393                             segs_needed, sc->sc_txfree - 1));
 2394                         ifp->if_flags |= IFF_OACTIVE;
 2395                         bus_dmamap_unload(sc->sc_dmat, dmamap);
 2396                         WM_EVCNT_INCR(&sc->sc_ev_txdstall);
 2397                         break;
 2398                 }
 2399 
 2400                 /*
 2401                  * Check for 82547 Tx FIFO bug.  We need to do this
 2402                  * once we know we can transmit the packet, since we
 2403                  * do some internal FIFO space accounting here.
 2404                  */
 2405                 if (sc->sc_type == WM_T_82547 &&
 2406                     wm_82547_txfifo_bugchk(sc, m0)) {
 2407                         DPRINTF(WM_DEBUG_TX,
 2408                             ("%s: TX: 82547 Tx FIFO bug detected\n",
 2409                             device_xname(sc->sc_dev)));
 2410                         ifp->if_flags |= IFF_OACTIVE;
 2411                         bus_dmamap_unload(sc->sc_dmat, dmamap);
 2412                         WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
 2413                         break;
 2414                 }
 2415 
 2416                 IFQ_DEQUEUE(&ifp->if_snd, m0);
 2417 
 2418                 /*
 2419                  * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
 2420                  */
 2421 
 2422                 DPRINTF(WM_DEBUG_TX,
 2423                     ("%s: TX: packet has %d (%d) DMA segments\n",
 2424                     device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
 2425 
 2426                 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
 2427 
 2428                 /*
 2429                  * Store a pointer to the packet so that we can free it
 2430                  * later.
 2431                  *
 2432                  * Initially, we consider the number of descriptors the
 2433                  * packet uses the number of DMA segments.  This may be
 2434                  * incremented by 1 if we do checksum offload (a descriptor
 2435                  * is used to set the checksum context).
 2436                  */
 2437                 txs->txs_mbuf = m0;
 2438                 txs->txs_firstdesc = sc->sc_txnext;
 2439                 txs->txs_ndesc = segs_needed;
 2440 
 2441                 /* Set up offload parameters for this packet. */
 2442                 if (m0->m_pkthdr.csum_flags &
 2443                     (M_CSUM_TSOv4|M_CSUM_TSOv6|
 2444                     M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
 2445                     M_CSUM_TCPv6|M_CSUM_UDPv6)) {
 2446                         if (wm_tx_offload(sc, txs, &cksumcmd,
 2447                                           &cksumfields) != 0) {
 2448                                 /* Error message already displayed. */
 2449                                 bus_dmamap_unload(sc->sc_dmat, dmamap);
 2450                                 continue;
 2451                         }
 2452                 } else {
 2453                         cksumcmd = 0;
 2454                         cksumfields = 0;
 2455                 }
 2456 
 2457                 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
 2458 
 2459                 /* Sync the DMA map. */
 2460                 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
 2461                     BUS_DMASYNC_PREWRITE);
 2462 
 2463                 /*
 2464                  * Initialize the transmit descriptor.
 2465                  */
 2466                 for (nexttx = sc->sc_txnext, seg = 0;
 2467                      seg < dmamap->dm_nsegs; seg++) {
 2468                         for (seglen = dmamap->dm_segs[seg].ds_len,
 2469                              curaddr = dmamap->dm_segs[seg].ds_addr;
 2470                              seglen != 0;
 2471                              curaddr += curlen, seglen -= curlen,
 2472                              nexttx = WM_NEXTTX(sc, nexttx)) {
 2473                                 curlen = seglen;
 2474 
 2475                                 /*
 2476                                  * So says the Linux driver:
 2477                                  * Work around for premature descriptor
 2478                                  * write-backs in TSO mode.  Append a
 2479                                  * 4-byte sentinel descriptor.
 2480                                  */
 2481                                 if (use_tso &&
 2482                                     seg == dmamap->dm_nsegs - 1 &&
 2483                                     curlen > 8)
 2484                                         curlen -= 4;
 2485 
 2486                                 wm_set_dma_addr(
 2487                                     &sc->sc_txdescs[nexttx].wtx_addr,
 2488                                     curaddr);
 2489                                 sc->sc_txdescs[nexttx].wtx_cmdlen =
 2490                                     htole32(cksumcmd | curlen);
 2491                                 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
 2492                                     0;
 2493                                 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
 2494                                     cksumfields;
 2495                                 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
 2496                                 lasttx = nexttx;
 2497 
 2498                                 DPRINTF(WM_DEBUG_TX,
 2499                                     ("%s: TX: desc %d: low 0x%08lx, "
 2500                                      "len 0x%04x\n",
 2501                                     device_xname(sc->sc_dev), nexttx,
 2502                                     curaddr & 0xffffffffUL, (unsigned)curlen));
 2503                         }
 2504                 }
 2505 
 2506                 KASSERT(lasttx != -1);
 2507 
 2508                 /*
 2509                  * Set up the command byte on the last descriptor of
 2510                  * the packet.  If we're in the interrupt delay window,
 2511                  * delay the interrupt.
 2512                  */
 2513                 sc->sc_txdescs[lasttx].wtx_cmdlen |=
 2514                     htole32(WTX_CMD_EOP | WTX_CMD_RS);
 2515 
 2516                 /*
 2517                  * If VLANs are enabled and the packet has a VLAN tag, set
 2518                  * up the descriptor to encapsulate the packet for us.
 2519                  *
 2520                  * This is only valid on the last descriptor of the packet.
 2521                  */
 2522                 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
 2523                         sc->sc_txdescs[lasttx].wtx_cmdlen |=
 2524                             htole32(WTX_CMD_VLE);
 2525                         sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
 2526                             = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
 2527                 }
 2528 
 2529                 txs->txs_lastdesc = lasttx;
 2530 
 2531                 DPRINTF(WM_DEBUG_TX,
 2532                     ("%s: TX: desc %d: cmdlen 0x%08x\n",
 2533                     device_xname(sc->sc_dev),
 2534                     lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
 2535 
 2536                 /* Sync the descriptors we're using. */
 2537                 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
 2538                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 2539 
 2540                 /* Give the packet to the chip. */
 2541                 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
 2542 
 2543                 DPRINTF(WM_DEBUG_TX,
 2544                     ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
 2545 
 2546                 DPRINTF(WM_DEBUG_TX,
 2547                     ("%s: TX: finished transmitting packet, job %d\n",
 2548                     device_xname(sc->sc_dev), sc->sc_txsnext));
 2549 
 2550                 /* Advance the tx pointer. */
 2551                 sc->sc_txfree -= txs->txs_ndesc;
 2552                 sc->sc_txnext = nexttx;
 2553 
 2554                 sc->sc_txsfree--;
 2555                 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
 2556 
 2557 #if NBPFILTER > 0
 2558                 /* Pass the packet to any BPF listeners. */
 2559                 if (ifp->if_bpf)
 2560                         bpf_mtap(ifp->if_bpf, m0);
 2561 #endif /* NBPFILTER > 0 */
 2562         }
 2563 
 2564         if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
 2565                 /* No more slots; notify upper layer. */
 2566                 ifp->if_flags |= IFF_OACTIVE;
 2567         }
 2568 
 2569         if (sc->sc_txfree != ofree) {
 2570                 /* Set a watchdog timer in case the chip flakes out. */
 2571                 ifp->if_timer = 5;
 2572         }
 2573 }
 2574 
 2575 /*
 2576  * wm_watchdog:         [ifnet interface function]
 2577  *
 2578  *      Watchdog timer handler.
 2579  */
 2580 static void
 2581 wm_watchdog(struct ifnet *ifp)
 2582 {
 2583         struct wm_softc *sc = ifp->if_softc;
 2584 
 2585         /*
 2586          * Since we're using delayed interrupts, sweep up
 2587          * before we report an error.
 2588          */
 2589         wm_txintr(sc);
 2590 
 2591         if (sc->sc_txfree != WM_NTXDESC(sc)) {
 2592                 log(LOG_ERR,
 2593                     "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
 2594                     device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
 2595                     sc->sc_txnext);
 2596                 ifp->if_oerrors++;
 2597 
 2598                 /* Reset the interface. */
 2599                 (void) wm_init(ifp);
 2600         }
 2601 
 2602         /* Try to get more packets going. */
 2603         wm_start(ifp);
 2604 }
 2605 
 2606 /*
 2607  * wm_ioctl:            [ifnet interface function]
 2608  *
 2609  *      Handle control requests from the operator.
 2610  */
 2611 static int
 2612 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
 2613 {
 2614         struct wm_softc *sc = ifp->if_softc;
 2615         struct ifreq *ifr = (struct ifreq *) data;
 2616         struct ifaddr *ifa = (struct ifaddr *)data;
 2617         struct sockaddr_dl *sdl;
 2618         int diff, s, error;
 2619 
 2620         s = splnet();
 2621 
 2622         switch (cmd) {
 2623         case SIOCSIFFLAGS:
 2624                 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
 2625                         break;
 2626                 if (ifp->if_flags & IFF_UP) {
 2627                         diff = (ifp->if_flags ^ sc->sc_if_flags)
 2628                             & (IFF_PROMISC | IFF_ALLMULTI);
 2629                         if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
 2630                                 /*
 2631                                  * If the difference bettween last flag and
 2632                                  * new flag is only IFF_PROMISC or
 2633                                  * IFF_ALLMULTI, set multicast filter only
 2634                                  * (don't reset to prevent link down).
 2635                                  */
 2636                                 wm_set_filter(sc);
 2637                         } else {
 2638                                 /*
 2639                                  * Reset the interface to pick up changes in
 2640                                  * any other flags that affect the hardware
 2641                                  * state.
 2642                                  */
 2643                                 wm_init(ifp);
 2644                         }
 2645                 } else {
 2646                         if (ifp->if_flags & IFF_RUNNING)
 2647                                 wm_stop(ifp, 1);
 2648                 }
 2649                 sc->sc_if_flags = ifp->if_flags;
 2650                 error = 0;
 2651                 break;
 2652         case SIOCSIFMEDIA:
 2653         case SIOCGIFMEDIA:
 2654                 /* Flow control requires full-duplex mode. */
 2655                 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
 2656                     (ifr->ifr_media & IFM_FDX) == 0)
 2657                         ifr->ifr_media &= ~IFM_ETH_FMASK;
 2658                 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
 2659                         if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
 2660                                 /* We can do both TXPAUSE and RXPAUSE. */
 2661                                 ifr->ifr_media |=
 2662                                     IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
 2663                         }
 2664                         sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
 2665                 }
 2666                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
 2667                 break;
 2668         case SIOCSIFADDR:
 2669                 if (ifa->ifa_addr->sa_family == AF_LINK) {
 2670                         sdl = satosdl(ifp->if_dl->ifa_addr);
 2671                         (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
 2672                             LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
 2673                         /* unicast address is first multicast entry */
 2674                         wm_set_filter(sc);
 2675                         error = 0;
 2676                         break;
 2677                 }
 2678                 /* Fall through for rest */
 2679         default:
 2680                 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
 2681                         break;
 2682 
 2683                 error = 0;
 2684 
 2685                 if (cmd == SIOCSIFCAP)
 2686                         error = (*ifp->if_init)(ifp);
 2687                 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
 2688                         ;
 2689                 else if (ifp->if_flags & IFF_RUNNING) {
 2690                         /*
 2691                          * Multicast list has changed; set the hardware filter
 2692                          * accordingly.
 2693                          */
 2694                         wm_set_filter(sc);
 2695                 }
 2696                 break;
 2697         }
 2698 
 2699         /* Try to get more packets going. */
 2700         wm_start(ifp);
 2701 
 2702         splx(s);
 2703         return error;
 2704 }
 2705 
 2706 /*
 2707  * wm_intr:
 2708  *
 2709  *      Interrupt service routine.
 2710  */
 2711 static int
 2712 wm_intr(void *arg)
 2713 {
 2714         struct wm_softc *sc = arg;
 2715         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2716         uint32_t icr;
 2717         int handled = 0;
 2718 
 2719         while (1 /* CONSTCOND */) {
 2720                 icr = CSR_READ(sc, WMREG_ICR);
 2721                 if ((icr & sc->sc_icr) == 0)
 2722                         break;
 2723 #if 0 /*NRND > 0*/
 2724                 if (RND_ENABLED(&sc->rnd_source))
 2725                         rnd_add_uint32(&sc->rnd_source, icr);
 2726 #endif
 2727 
 2728                 handled = 1;
 2729 
 2730 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
 2731                 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
 2732                         DPRINTF(WM_DEBUG_RX,
 2733                             ("%s: RX: got Rx intr 0x%08x\n",
 2734                             device_xname(sc->sc_dev),
 2735                             icr & (ICR_RXDMT0|ICR_RXT0)));
 2736                         WM_EVCNT_INCR(&sc->sc_ev_rxintr);
 2737                 }
 2738 #endif
 2739                 wm_rxintr(sc);
 2740 
 2741 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
 2742                 if (icr & ICR_TXDW) {
 2743                         DPRINTF(WM_DEBUG_TX,
 2744                             ("%s: TX: got TXDW interrupt\n",
 2745                             device_xname(sc->sc_dev)));
 2746                         WM_EVCNT_INCR(&sc->sc_ev_txdw);
 2747                 }
 2748 #endif
 2749                 wm_txintr(sc);
 2750 
 2751                 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
 2752                         WM_EVCNT_INCR(&sc->sc_ev_linkintr);
 2753                         wm_linkintr(sc, icr);
 2754                 }
 2755 
 2756                 if (icr & ICR_RXO) {
 2757 #if defined(WM_DEBUG)
 2758                         log(LOG_WARNING, "%s: Receive overrun\n",
 2759                             device_xname(sc->sc_dev));
 2760 #endif /* defined(WM_DEBUG) */
 2761                 }
 2762         }
 2763 
 2764         if (handled) {
 2765                 /* Try to get more packets going. */
 2766                 wm_start(ifp);
 2767         }
 2768 
 2769         return handled;
 2770 }
 2771 
 2772 /*
 2773  * wm_txintr:
 2774  *
 2775  *      Helper; handle transmit interrupts.
 2776  */
 2777 static void
 2778 wm_txintr(struct wm_softc *sc)
 2779 {
 2780         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2781         struct wm_txsoft *txs;
 2782         uint8_t status;
 2783         int i;
 2784 
 2785         ifp->if_flags &= ~IFF_OACTIVE;
 2786 
 2787         /*
 2788          * Go through the Tx list and free mbufs for those
 2789          * frames which have been transmitted.
 2790          */
 2791         for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
 2792              i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
 2793                 txs = &sc->sc_txsoft[i];
 2794 
 2795                 DPRINTF(WM_DEBUG_TX,
 2796                     ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
 2797 
 2798                 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
 2799                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 2800 
 2801                 status =
 2802                     sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
 2803                 if ((status & WTX_ST_DD) == 0) {
 2804                         WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
 2805                             BUS_DMASYNC_PREREAD);
 2806                         break;
 2807                 }
 2808 
 2809                 DPRINTF(WM_DEBUG_TX,
 2810                     ("%s: TX: job %d done: descs %d..%d\n",
 2811                     device_xname(sc->sc_dev), i, txs->txs_firstdesc,
 2812                     txs->txs_lastdesc));
 2813 
 2814                 /*
 2815                  * XXX We should probably be using the statistics
 2816                  * XXX registers, but I don't know if they exist
 2817                  * XXX on chips before the i82544.
 2818                  */
 2819 
 2820 #ifdef WM_EVENT_COUNTERS
 2821                 if (status & WTX_ST_TU)
 2822                         WM_EVCNT_INCR(&sc->sc_ev_tu);
 2823 #endif /* WM_EVENT_COUNTERS */
 2824 
 2825                 if (status & (WTX_ST_EC|WTX_ST_LC)) {
 2826                         ifp->if_oerrors++;
 2827                         if (status & WTX_ST_LC)
 2828                                 log(LOG_WARNING, "%s: late collision\n",
 2829                                     device_xname(sc->sc_dev));
 2830                         else if (status & WTX_ST_EC) {
 2831                                 ifp->if_collisions += 16;
 2832                                 log(LOG_WARNING, "%s: excessive collisions\n",
 2833                                     device_xname(sc->sc_dev));
 2834                         }
 2835                 } else
 2836                         ifp->if_opackets++;
 2837 
 2838                 sc->sc_txfree += txs->txs_ndesc;
 2839                 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
 2840                     0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
 2841                 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
 2842                 m_freem(txs->txs_mbuf);
 2843                 txs->txs_mbuf = NULL;
 2844         }
 2845 
 2846         /* Update the dirty transmit buffer pointer. */
 2847         sc->sc_txsdirty = i;
 2848         DPRINTF(WM_DEBUG_TX,
 2849             ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
 2850 
 2851         /*
 2852          * If there are no more pending transmissions, cancel the watchdog
 2853          * timer.
 2854          */
 2855         if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
 2856                 ifp->if_timer = 0;
 2857 }
 2858 
 2859 /*
 2860  * wm_rxintr:
 2861  *
 2862  *      Helper; handle receive interrupts.
 2863  */
 2864 static void
 2865 wm_rxintr(struct wm_softc *sc)
 2866 {
 2867         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 2868         struct wm_rxsoft *rxs;
 2869         struct mbuf *m;
 2870         int i, len;
 2871         uint8_t status, errors;
 2872         uint16_t vlantag;
 2873 
 2874         for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
 2875                 rxs = &sc->sc_rxsoft[i];
 2876 
 2877                 DPRINTF(WM_DEBUG_RX,
 2878                     ("%s: RX: checking descriptor %d\n",
 2879                     device_xname(sc->sc_dev), i));
 2880 
 2881                 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 2882 
 2883                 status = sc->sc_rxdescs[i].wrx_status;
 2884                 errors = sc->sc_rxdescs[i].wrx_errors;
 2885                 len = le16toh(sc->sc_rxdescs[i].wrx_len);
 2886                 vlantag = sc->sc_rxdescs[i].wrx_special;
 2887 
 2888                 if ((status & WRX_ST_DD) == 0) {
 2889                         /*
 2890                          * We have processed all of the receive descriptors.
 2891                          */
 2892                         WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
 2893                         break;
 2894                 }
 2895 
 2896                 if (__predict_false(sc->sc_rxdiscard)) {
 2897                         DPRINTF(WM_DEBUG_RX,
 2898                             ("%s: RX: discarding contents of descriptor %d\n",
 2899                             device_xname(sc->sc_dev), i));
 2900                         WM_INIT_RXDESC(sc, i);
 2901                         if (status & WRX_ST_EOP) {
 2902                                 /* Reset our state. */
 2903                                 DPRINTF(WM_DEBUG_RX,
 2904                                     ("%s: RX: resetting rxdiscard -> 0\n",
 2905                                     device_xname(sc->sc_dev)));
 2906                                 sc->sc_rxdiscard = 0;
 2907                         }
 2908                         continue;
 2909                 }
 2910 
 2911                 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 2912                     rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
 2913 
 2914                 m = rxs->rxs_mbuf;
 2915 
 2916                 /*
 2917                  * Add a new receive buffer to the ring, unless of
 2918                  * course the length is zero. Treat the latter as a
 2919                  * failed mapping.
 2920                  */
 2921                 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
 2922                         /*
 2923                          * Failed, throw away what we've done so
 2924                          * far, and discard the rest of the packet.
 2925                          */
 2926                         ifp->if_ierrors++;
 2927                         bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 2928                             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 2929                         WM_INIT_RXDESC(sc, i);
 2930                         if ((status & WRX_ST_EOP) == 0)
 2931                                 sc->sc_rxdiscard = 1;
 2932                         if (sc->sc_rxhead != NULL)
 2933                                 m_freem(sc->sc_rxhead);
 2934                         WM_RXCHAIN_RESET(sc);
 2935                         DPRINTF(WM_DEBUG_RX,
 2936                             ("%s: RX: Rx buffer allocation failed, "
 2937                             "dropping packet%s\n", device_xname(sc->sc_dev),
 2938                             sc->sc_rxdiscard ? " (discard)" : ""));
 2939                         continue;
 2940                 }
 2941 
 2942                 m->m_len = len;
 2943                 sc->sc_rxlen += len;
 2944                 DPRINTF(WM_DEBUG_RX,
 2945                     ("%s: RX: buffer at %p len %d\n",
 2946                     device_xname(sc->sc_dev), m->m_data, len));
 2947 
 2948                 /*
 2949                  * If this is not the end of the packet, keep
 2950                  * looking.
 2951                  */
 2952                 if ((status & WRX_ST_EOP) == 0) {
 2953                         WM_RXCHAIN_LINK(sc, m);
 2954                         DPRINTF(WM_DEBUG_RX,
 2955                             ("%s: RX: not yet EOP, rxlen -> %d\n",
 2956                             device_xname(sc->sc_dev), sc->sc_rxlen));
 2957                         continue;
 2958                 }
 2959 
 2960                 /*
 2961                  * Okay, we have the entire packet now.  The chip is
 2962                  * configured to include the FCS (not all chips can
 2963                  * be configured to strip it), so we need to trim it.
 2964                  * May need to adjust length of previous mbuf in the
 2965                  * chain if the current mbuf is too short.
 2966                  */
 2967                 if (m->m_len < ETHER_CRC_LEN) {
 2968                         sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
 2969                         m->m_len = 0;
 2970                 } else {
 2971                         m->m_len -= ETHER_CRC_LEN;
 2972                 }
 2973                 len = sc->sc_rxlen - ETHER_CRC_LEN;
 2974 
 2975                 WM_RXCHAIN_LINK(sc, m);
 2976 
 2977                 *sc->sc_rxtailp = NULL;
 2978                 m = sc->sc_rxhead;
 2979 
 2980                 WM_RXCHAIN_RESET(sc);
 2981 
 2982                 DPRINTF(WM_DEBUG_RX,
 2983                     ("%s: RX: have entire packet, len -> %d\n",
 2984                     device_xname(sc->sc_dev), len));
 2985 
 2986                 /*
 2987                  * If an error occurred, update stats and drop the packet.
 2988                  */
 2989                 if (errors &
 2990                      (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
 2991                         if (errors & WRX_ER_SE)
 2992                                 log(LOG_WARNING, "%s: symbol error\n",
 2993                                     device_xname(sc->sc_dev));
 2994                         else if (errors & WRX_ER_SEQ)
 2995                                 log(LOG_WARNING, "%s: receive sequence error\n",
 2996                                     device_xname(sc->sc_dev));
 2997                         else if (errors & WRX_ER_CE)
 2998                                 log(LOG_WARNING, "%s: CRC error\n",
 2999                                     device_xname(sc->sc_dev));
 3000                         m_freem(m);
 3001                         continue;
 3002                 }
 3003 
 3004                 /*
 3005                  * No errors.  Receive the packet.
 3006                  */
 3007                 m->m_pkthdr.rcvif = ifp;
 3008                 m->m_pkthdr.len = len;
 3009 
 3010                 /*
 3011                  * If VLANs are enabled, VLAN packets have been unwrapped
 3012                  * for us.  Associate the tag with the packet.
 3013                  */
 3014                 if ((status & WRX_ST_VP) != 0) {
 3015                         VLAN_INPUT_TAG(ifp, m,
 3016                             le16toh(vlantag),
 3017                             continue);
 3018                 }
 3019 
 3020                 /*
 3021                  * Set up checksum info for this packet.
 3022                  */
 3023                 if ((status & WRX_ST_IXSM) == 0) {
 3024                         if (status & WRX_ST_IPCS) {
 3025                                 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
 3026                                 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
 3027                                 if (errors & WRX_ER_IPE)
 3028                                         m->m_pkthdr.csum_flags |=
 3029                                             M_CSUM_IPv4_BAD;
 3030                         }
 3031                         if (status & WRX_ST_TCPCS) {
 3032                                 /*
 3033                                  * Note: we don't know if this was TCP or UDP,
 3034                                  * so we just set both bits, and expect the
 3035                                  * upper layers to deal.
 3036                                  */
 3037                                 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
 3038                                 m->m_pkthdr.csum_flags |=
 3039                                     M_CSUM_TCPv4 | M_CSUM_UDPv4 |
 3040                                     M_CSUM_TCPv6 | M_CSUM_UDPv6;
 3041                                 if (errors & WRX_ER_TCPE)
 3042                                         m->m_pkthdr.csum_flags |=
 3043                                             M_CSUM_TCP_UDP_BAD;
 3044                         }
 3045                 }
 3046 
 3047                 ifp->if_ipackets++;
 3048 
 3049 #if NBPFILTER > 0
 3050                 /* Pass this up to any BPF listeners. */
 3051                 if (ifp->if_bpf)
 3052                         bpf_mtap(ifp->if_bpf, m);
 3053 #endif /* NBPFILTER > 0 */
 3054 
 3055                 /* Pass it on. */
 3056                 (*ifp->if_input)(ifp, m);
 3057         }
 3058 
 3059         /* Update the receive pointer. */
 3060         sc->sc_rxptr = i;
 3061 
 3062         DPRINTF(WM_DEBUG_RX,
 3063             ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
 3064 }
 3065 
 3066 /*
 3067  * wm_linkintr_gmii:
 3068  *
 3069  *      Helper; handle link interrupts for GMII.
 3070  */
 3071 static void
 3072 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
 3073 {
 3074 
 3075         DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
 3076                 __func__));
 3077 
 3078         if (icr & ICR_LSC) {
 3079                 DPRINTF(WM_DEBUG_LINK,
 3080                     ("%s: LINK: LSC -> mii_tick\n",
 3081                         device_xname(sc->sc_dev)));
 3082                 mii_tick(&sc->sc_mii);
 3083                 if (sc->sc_type == WM_T_82543) {
 3084                         int miistatus, active;
 3085 
 3086                         /*
 3087                          * With 82543, we need to force speed and
 3088                          * duplex on the MAC equal to what the PHY
 3089                          * speed and duplex configuration is.
 3090                          */
 3091                         miistatus = sc->sc_mii.mii_media_status;
 3092 
 3093                         if (miistatus & IFM_ACTIVE) {
 3094                                 active = sc->sc_mii.mii_media_active;
 3095                                 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
 3096                                 switch (IFM_SUBTYPE(active)) {
 3097                                 case IFM_10_T:
 3098                                         sc->sc_ctrl |= CTRL_SPEED_10;
 3099                                         break;
 3100                                 case IFM_100_TX:
 3101                                         sc->sc_ctrl |= CTRL_SPEED_100;
 3102                                         break;
 3103                                 case IFM_1000_T:
 3104                                         sc->sc_ctrl |= CTRL_SPEED_1000;
 3105                                         break;
 3106                                 default:
 3107                                         /*
 3108                                          * fiber?
 3109                                          * Shoud not enter here.
 3110                                          */
 3111                                         printf("unknown media (%x)\n",
 3112                                             active);
 3113                                         break;
 3114                                 }
 3115                                 if (active & IFM_FDX)
 3116                                         sc->sc_ctrl |= CTRL_FD;
 3117                                 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 3118                         }
 3119                 } else if ((sc->sc_type == WM_T_ICH8)
 3120                     && (sc->sc_phytype == WMPHY_IGP_3)) {
 3121                         wm_kmrn_lock_loss_workaround_ich8lan(sc);
 3122                 } else if (sc->sc_type == WM_T_PCH) {
 3123                         wm_k1_gig_workaround_hv(sc,
 3124                             ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
 3125                 }
 3126 
 3127                 if ((sc->sc_phytype == WMPHY_82578)
 3128                     && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
 3129                         == IFM_1000_T)) {
 3130 
 3131                         if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
 3132                                 delay(200*1000); /* XXX too big */
 3133 
 3134                                 /* Link stall fix for link up */
 3135                                 wm_gmii_hv_writereg(sc->sc_dev, 1,
 3136                                     HV_MUX_DATA_CTRL,
 3137                                     HV_MUX_DATA_CTRL_GEN_TO_MAC
 3138                                     | HV_MUX_DATA_CTRL_FORCE_SPEED);
 3139                                 wm_gmii_hv_writereg(sc->sc_dev, 1,
 3140                                     HV_MUX_DATA_CTRL,
 3141                                     HV_MUX_DATA_CTRL_GEN_TO_MAC);
 3142                         }
 3143                 }
 3144         } else if (icr & ICR_RXSEQ) {
 3145                 DPRINTF(WM_DEBUG_LINK,
 3146                     ("%s: LINK Receive sequence error\n",
 3147                         device_xname(sc->sc_dev)));
 3148         }
 3149 }
 3150 
 3151 /*
 3152  * wm_linkintr_tbi:
 3153  *
 3154  *      Helper; handle link interrupts for TBI mode.
 3155  */
 3156 static void
 3157 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
 3158 {
 3159         uint32_t status;
 3160 
 3161         DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
 3162                 __func__));
 3163 
 3164         status = CSR_READ(sc, WMREG_STATUS);
 3165         if (icr & ICR_LSC) {
 3166                 if (status & STATUS_LU) {
 3167                         DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
 3168                             device_xname(sc->sc_dev),
 3169                             (status & STATUS_FD) ? "FDX" : "HDX"));
 3170                         /*
 3171                          * NOTE: CTRL will update TFCE and RFCE automatically,
 3172                          * so we should update sc->sc_ctrl
 3173                          */
 3174 
 3175                         sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
 3176                         sc->sc_tctl &= ~TCTL_COLD(0x3ff);
 3177                         sc->sc_fcrtl &= ~FCRTL_XONE;
 3178                         if (status & STATUS_FD)
 3179                                 sc->sc_tctl |=
 3180                                     TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
 3181                         else
 3182                                 sc->sc_tctl |=
 3183                                     TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
 3184                         if (sc->sc_ctrl & CTRL_TFCE)
 3185                                 sc->sc_fcrtl |= FCRTL_XONE;
 3186                         CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
 3187                         CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
 3188                                       WMREG_OLD_FCRTL : WMREG_FCRTL,
 3189                                       sc->sc_fcrtl);
 3190                         sc->sc_tbi_linkup = 1;
 3191                 } else {
 3192                         DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
 3193                             device_xname(sc->sc_dev)));
 3194                         sc->sc_tbi_linkup = 0;
 3195                 }
 3196                 wm_tbi_set_linkled(sc);
 3197         } else if (icr & ICR_RXCFG) {
 3198                 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
 3199                     device_xname(sc->sc_dev)));
 3200                 sc->sc_tbi_nrxcfg++;
 3201                 wm_check_for_link(sc);
 3202         } else if (icr & ICR_RXSEQ) {
 3203                 DPRINTF(WM_DEBUG_LINK,
 3204                     ("%s: LINK: Receive sequence error\n",
 3205                     device_xname(sc->sc_dev)));
 3206         }
 3207 }
 3208 
 3209 /*
 3210  * wm_linkintr:
 3211  *
 3212  *      Helper; handle link interrupts.
 3213  */
 3214 static void
 3215 wm_linkintr(struct wm_softc *sc, uint32_t icr)
 3216 {
 3217 
 3218         if (sc->sc_flags & WM_F_HAS_MII)
 3219                 wm_linkintr_gmii(sc, icr);
 3220         else
 3221                 wm_linkintr_tbi(sc, icr);
 3222 }
 3223 
 3224 /*
 3225  * wm_tick:
 3226  *
 3227  *      One second timer, used to check link status, sweep up
 3228  *      completed transmit jobs, etc.
 3229  */
 3230 static void
 3231 wm_tick(void *arg)
 3232 {
 3233         struct wm_softc *sc = arg;
 3234         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 3235         int s;
 3236 
 3237         s = splnet();
 3238 
 3239         if (sc->sc_type >= WM_T_82542_2_1) {
 3240                 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
 3241                 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
 3242                 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
 3243                 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
 3244                 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
 3245         }
 3246 
 3247         ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
 3248         ifp->if_ierrors += 0ULL + /* ensure quad_t */
 3249             + CSR_READ(sc, WMREG_CRCERRS)
 3250             + CSR_READ(sc, WMREG_ALGNERRC)
 3251             + CSR_READ(sc, WMREG_SYMERRC)
 3252             + CSR_READ(sc, WMREG_RXERRC)
 3253             + CSR_READ(sc, WMREG_SEC)
 3254             + CSR_READ(sc, WMREG_CEXTERR)
 3255             + CSR_READ(sc, WMREG_RLEC);
 3256 
 3257         if (sc->sc_flags & WM_F_HAS_MII)
 3258                 mii_tick(&sc->sc_mii);
 3259         else
 3260                 wm_tbi_check_link(sc);
 3261 
 3262         splx(s);
 3263 
 3264         callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
 3265 }
 3266 
 3267 /*
 3268  * wm_reset:
 3269  *
 3270  *      Reset the i82542 chip.
 3271  */
 3272 static void
 3273 wm_reset(struct wm_softc *sc)
 3274 {
 3275         int phy_reset = 0;
 3276         uint32_t reg, mask;
 3277         int i;
 3278 
 3279         /*
 3280          * Allocate on-chip memory according to the MTU size.
 3281          * The Packet Buffer Allocation register must be written
 3282          * before the chip is reset.
 3283          */
 3284         switch (sc->sc_type) {
 3285         case WM_T_82547:
 3286         case WM_T_82547_2:
 3287                 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
 3288                     PBA_22K : PBA_30K;
 3289                 sc->sc_txfifo_head = 0;
 3290                 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
 3291                 sc->sc_txfifo_size =
 3292                     (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
 3293                 sc->sc_txfifo_stall = 0;
 3294                 break;
 3295         case WM_T_82571:
 3296         case WM_T_82572:
 3297         case WM_T_82575:        /* XXX need special handing for jumbo frames */
 3298         case WM_T_80003:
 3299                 sc->sc_pba = PBA_32K;
 3300                 break;
 3301         case WM_T_82580:
 3302         case WM_T_82580ER:
 3303                 sc->sc_pba = PBA_35K;
 3304                 break;
 3305         case WM_T_82576:
 3306                 sc->sc_pba = PBA_64K;
 3307                 break;
 3308         case WM_T_82573:
 3309                 sc->sc_pba = PBA_12K;
 3310                 break;
 3311         case WM_T_82574:
 3312         case WM_T_82583:
 3313                 sc->sc_pba = PBA_20K;
 3314                 break;
 3315         case WM_T_ICH8:
 3316                 sc->sc_pba = PBA_8K;
 3317                 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
 3318                 break;
 3319         case WM_T_ICH9:
 3320         case WM_T_ICH10:
 3321         case WM_T_PCH:
 3322                 sc->sc_pba = PBA_10K;
 3323                 break;
 3324         default:
 3325                 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
 3326                     PBA_40K : PBA_48K;
 3327                 break;
 3328         }
 3329         CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
 3330 
 3331         /* Prevent the PCI-E bus from sticking */
 3332         if (sc->sc_flags & WM_F_PCIE) {
 3333                 int timeout = 800;
 3334 
 3335                 sc->sc_ctrl |= CTRL_GIO_M_DIS;
 3336                 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 3337 
 3338                 while (timeout--) {
 3339                         if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
 3340                                 break;
 3341                         delay(100);
 3342                 }
 3343         }
 3344 
 3345         /* Set the completion timeout for interface */
 3346         if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
 3347                 wm_set_pcie_completion_timeout(sc);
 3348 
 3349         /* Clear interrupt */
 3350         CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
 3351 
 3352         /* Stop the transmit and receive processes. */
 3353         CSR_WRITE(sc, WMREG_RCTL, 0);
 3354         CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
 3355         sc->sc_rctl &= ~RCTL_EN;
 3356 
 3357         /* XXX set_tbi_sbp_82543() */
 3358 
 3359         delay(10*1000);
 3360 
 3361         /* Must acquire the MDIO ownership before MAC reset */
 3362         switch (sc->sc_type) {
 3363         case WM_T_82573:
 3364         case WM_T_82574:
 3365         case WM_T_82583:
 3366                 i = 0;
 3367                 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
 3368                     | EXTCNFCTR_MDIO_SW_OWNERSHIP;
 3369                 do {
 3370                         CSR_WRITE(sc, WMREG_EXTCNFCTR,
 3371                             reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
 3372                         reg = CSR_READ(sc, WMREG_EXTCNFCTR);
 3373                         if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
 3374                                 break;
 3375                         reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
 3376                         delay(2*1000);
 3377                         i++;
 3378                 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
 3379                 break;
 3380         default:
 3381                 break;
 3382         }
 3383 
 3384         /*
 3385          * 82541 Errata 29? & 82547 Errata 28?
 3386          * See also the description about PHY_RST bit in CTRL register
 3387          * in 8254x_GBe_SDM.pdf.
 3388          */
 3389         if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
 3390                 CSR_WRITE(sc, WMREG_CTRL,
 3391                     CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
 3392                 delay(5000);
 3393         }
 3394 
 3395         switch (sc->sc_type) {
 3396         case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
 3397         case WM_T_82541:
 3398         case WM_T_82541_2:
 3399         case WM_T_82547:
 3400         case WM_T_82547_2:
 3401                 /*
 3402                  * On some chipsets, a reset through a memory-mapped write
 3403                  * cycle can cause the chip to reset before completing the
 3404                  * write cycle.  This causes major headache that can be
 3405                  * avoided by issuing the reset via indirect register writes
 3406                  * through I/O space.
 3407                  *
 3408                  * So, if we successfully mapped the I/O BAR at attach time,
 3409                  * use that.  Otherwise, try our luck with a memory-mapped
 3410                  * reset.
 3411                  */
 3412                 if (sc->sc_flags & WM_F_IOH_VALID)
 3413                         wm_io_write(sc, WMREG_CTRL, CTRL_RST);
 3414                 else
 3415                         CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
 3416                 break;
 3417         case WM_T_82545_3:
 3418         case WM_T_82546_3:
 3419                 /* Use the shadow control register on these chips. */
 3420                 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
 3421                 break;
 3422         case WM_T_80003:
 3423                 mask = swfwphysem[sc->sc_funcid];
 3424                 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
 3425                 wm_get_swfw_semaphore(sc, mask);
 3426                 CSR_WRITE(sc, WMREG_CTRL, reg);
 3427                 wm_put_swfw_semaphore(sc, mask);
 3428                 break;
 3429         case WM_T_ICH8:
 3430         case WM_T_ICH9:
 3431         case WM_T_ICH10:
 3432         case WM_T_PCH:
 3433                 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
 3434                 if (wm_check_reset_block(sc) == 0) {
 3435                         if (sc->sc_type >= WM_T_PCH) {
 3436                                 uint32_t status;
 3437 
 3438                                 status = CSR_READ(sc, WMREG_STATUS);
 3439                                 CSR_WRITE(sc, WMREG_STATUS,
 3440                                     status & ~STATUS_PHYRA);
 3441                         }
 3442 
 3443                         reg |= CTRL_PHY_RESET;
 3444                         phy_reset = 1;
 3445                 }
 3446                 wm_get_swfwhw_semaphore(sc);
 3447                 CSR_WRITE(sc, WMREG_CTRL, reg);
 3448                 delay(20*1000);
 3449                 wm_put_swfwhw_semaphore(sc);
 3450                 break;
 3451         case WM_T_82542_2_0:
 3452         case WM_T_82542_2_1:
 3453         case WM_T_82543:
 3454         case WM_T_82540:
 3455         case WM_T_82545:
 3456         case WM_T_82546:
 3457         case WM_T_82571:
 3458         case WM_T_82572:
 3459         case WM_T_82573:
 3460         case WM_T_82574:
 3461         case WM_T_82575:
 3462         case WM_T_82576:
 3463         case WM_T_82583:
 3464         default:
 3465                 /* Everything else can safely use the documented method. */
 3466                 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
 3467                 break;
 3468         }
 3469 
 3470         if (phy_reset != 0)
 3471                 wm_get_cfg_done(sc);
 3472 
 3473         /* reload EEPROM */
 3474         switch (sc->sc_type) {
 3475         case WM_T_82542_2_0:
 3476         case WM_T_82542_2_1:
 3477         case WM_T_82543:
 3478         case WM_T_82544:
 3479                 delay(10);
 3480                 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
 3481                 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
 3482                 delay(2000);
 3483                 break;
 3484         case WM_T_82540:
 3485         case WM_T_82545:
 3486         case WM_T_82545_3:
 3487         case WM_T_82546:
 3488         case WM_T_82546_3:
 3489                 delay(5*1000);
 3490                 /* XXX Disable HW ARPs on ASF enabled adapters */
 3491                 break;
 3492         case WM_T_82541:
 3493         case WM_T_82541_2:
 3494         case WM_T_82547:
 3495         case WM_T_82547_2:
 3496                 delay(20000);
 3497                 /* XXX Disable HW ARPs on ASF enabled adapters */
 3498                 break;
 3499         case WM_T_82571:
 3500         case WM_T_82572:
 3501         case WM_T_82573:
 3502         case WM_T_82574:
 3503         case WM_T_82583:
 3504                 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
 3505                         delay(10);
 3506                         reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
 3507                         CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
 3508                 }
 3509                 /* check EECD_EE_AUTORD */
 3510                 wm_get_auto_rd_done(sc);
 3511                 /*
 3512                  * Phy configuration from NVM just starts after EECD_AUTO_RD
 3513                  * is set.
 3514                  */
 3515                 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
 3516                     || (sc->sc_type == WM_T_82583))
 3517                         delay(25*1000);
 3518                 break;
 3519         case WM_T_82575:
 3520         case WM_T_82576:
 3521         case WM_T_80003:
 3522         case WM_T_ICH8:
 3523         case WM_T_ICH9:
 3524                 /* check EECD_EE_AUTORD */
 3525                 wm_get_auto_rd_done(sc);
 3526                 break;
 3527         case WM_T_ICH10:
 3528         case WM_T_PCH:
 3529                 wm_lan_init_done(sc);
 3530                 break;
 3531         default:
 3532                 panic("%s: unknown type\n", __func__);
 3533         }
 3534 
 3535         /* Check whether EEPROM is present or not */
 3536         switch (sc->sc_type) {
 3537         case WM_T_82575:
 3538         case WM_T_82576:
 3539         case WM_T_82580:
 3540         case WM_T_ICH8:
 3541         case WM_T_ICH9:
 3542                 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
 3543                         /* Not found */
 3544                         sc->sc_flags |= WM_F_EEPROM_INVALID;
 3545                         if (sc->sc_type == WM_T_82575) /* 82575 only */
 3546                                 wm_reset_init_script_82575(sc);
 3547                 }
 3548                 break;
 3549         default:
 3550                 break;
 3551         }
 3552 
 3553         /* Clear any pending interrupt events. */
 3554         CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
 3555         reg = CSR_READ(sc, WMREG_ICR);
 3556 
 3557         /* reload sc_ctrl */
 3558         sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
 3559 
 3560         /* dummy read from WUC */
 3561         if (sc->sc_type == WM_T_PCH)
 3562                 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
 3563         /*
 3564          * For PCH, this write will make sure that any noise will be detected
 3565          * as a CRC error and be dropped rather than show up as a bad packet
 3566          * to the DMA engine
 3567          */
 3568         if (sc->sc_type == WM_T_PCH)
 3569                 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
 3570 
 3571         if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
 3572                 CSR_WRITE(sc, WMREG_WUC, 0);
 3573 
 3574         /* XXX need special handling for 82580 */
 3575 }
 3576 
 3577 /*
 3578  * wm_init:             [ifnet interface function]
 3579  *
 3580  *      Initialize the interface.  Must be called at splnet().
 3581  */
 3582 static int
 3583 wm_init(struct ifnet *ifp)
 3584 {
 3585         struct wm_softc *sc = ifp->if_softc;
 3586         struct wm_rxsoft *rxs;
 3587         int i, error = 0;
 3588         uint32_t reg;
 3589 
 3590         /*
 3591          * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
 3592          * There is a small but measurable benefit to avoiding the adjusment
 3593          * of the descriptor so that the headers are aligned, for normal mtu,
 3594          * on such platforms.  One possibility is that the DMA itself is
 3595          * slightly more efficient if the front of the entire packet (instead
 3596          * of the front of the headers) is aligned.
 3597          *
 3598          * Note we must always set align_tweak to 0 if we are using
 3599          * jumbo frames.
 3600          */
 3601 #ifdef __NO_STRICT_ALIGNMENT
 3602         sc->sc_align_tweak = 0;
 3603 #else
 3604         if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
 3605                 sc->sc_align_tweak = 0;
 3606         else
 3607                 sc->sc_align_tweak = 2;
 3608 #endif /* __NO_STRICT_ALIGNMENT */
 3609 
 3610         /* Cancel any pending I/O. */
 3611         wm_stop(ifp, 0);
 3612 
 3613         /* update statistics before reset */
 3614         ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
 3615         ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
 3616 
 3617         /* Reset the chip to a known state. */
 3618         wm_reset(sc);
 3619 
 3620         switch (sc->sc_type) {
 3621         case WM_T_82571:
 3622         case WM_T_82572:
 3623         case WM_T_82573:
 3624         case WM_T_82574:
 3625         case WM_T_82583:
 3626         case WM_T_80003:
 3627         case WM_T_ICH8:
 3628         case WM_T_ICH9:
 3629         case WM_T_ICH10:
 3630         case WM_T_PCH:
 3631                 if (wm_check_mng_mode(sc) != 0)
 3632                         wm_get_hw_control(sc);
 3633                 break;
 3634         default:
 3635                 break;
 3636         }
 3637 
 3638         /* Reset the PHY. */
 3639         if (sc->sc_flags & WM_F_HAS_MII)
 3640                 wm_gmii_reset(sc);
 3641 
 3642         reg = CSR_READ(sc, WMREG_CTRL_EXT);
 3643         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
 3644         if (sc->sc_type == WM_T_PCH)
 3645                 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
 3646 
 3647         /* Initialize the transmit descriptor ring. */
 3648         memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
 3649         WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
 3650             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 3651         sc->sc_txfree = WM_NTXDESC(sc);
 3652         sc->sc_txnext = 0;
 3653 
 3654         if (sc->sc_type < WM_T_82543) {
 3655                 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
 3656                 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
 3657                 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
 3658                 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
 3659                 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
 3660                 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
 3661         } else {
 3662                 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
 3663                 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
 3664                 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
 3665                 CSR_WRITE(sc, WMREG_TDH, 0);
 3666                 CSR_WRITE(sc, WMREG_TDT, 0);
 3667                 CSR_WRITE(sc, WMREG_TIDV, 375);         /* ITR / 4 */
 3668                 CSR_WRITE(sc, WMREG_TADV, 375);         /* should be same */
 3669 
 3670                 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
 3671                         CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
 3672                             | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
 3673                             | TXDCTL_WTHRESH(0));
 3674                 else {
 3675                         CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
 3676                             TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
 3677                         CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
 3678                             RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
 3679                 }
 3680         }
 3681         CSR_WRITE(sc, WMREG_TQSA_LO, 0);
 3682         CSR_WRITE(sc, WMREG_TQSA_HI, 0);
 3683 
 3684         /* Initialize the transmit job descriptors. */
 3685         for (i = 0; i < WM_TXQUEUELEN(sc); i++)
 3686                 sc->sc_txsoft[i].txs_mbuf = NULL;
 3687         sc->sc_txsfree = WM_TXQUEUELEN(sc);
 3688         sc->sc_txsnext = 0;
 3689         sc->sc_txsdirty = 0;
 3690 
 3691         /*
 3692          * Initialize the receive descriptor and receive job
 3693          * descriptor rings.
 3694          */
 3695         if (sc->sc_type < WM_T_82543) {
 3696                 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
 3697                 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
 3698                 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
 3699                 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
 3700                 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
 3701                 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
 3702 
 3703                 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
 3704                 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
 3705                 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
 3706                 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
 3707                 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
 3708                 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
 3709         } else {
 3710                 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
 3711                 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
 3712                 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
 3713                 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
 3714                         CSR_WRITE(sc, WMREG_EITR(0), 450);
 3715                         if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
 3716                                 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
 3717                         CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
 3718                             | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
 3719                         CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
 3720                             | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
 3721                             | RXDCTL_WTHRESH(1));
 3722                 } else {
 3723                         CSR_WRITE(sc, WMREG_RDH, 0);
 3724                         CSR_WRITE(sc, WMREG_RDT, 0);
 3725                         CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);      /* ITR/4 */
 3726                         CSR_WRITE(sc, WMREG_RADV, 375);         /* MUST be same */
 3727                 }
 3728         }
 3729         for (i = 0; i < WM_NRXDESC; i++) {
 3730                 rxs = &sc->sc_rxsoft[i];
 3731                 if (rxs->rxs_mbuf == NULL) {
 3732                         if ((error = wm_add_rxbuf(sc, i)) != 0) {
 3733                                 log(LOG_ERR, "%s: unable to allocate or map rx "
 3734                                     "buffer %d, error = %d\n",
 3735                                     device_xname(sc->sc_dev), i, error);
 3736                                 /*
 3737                                  * XXX Should attempt to run with fewer receive
 3738                                  * XXX buffers instead of just failing.
 3739                                  */
 3740                                 wm_rxdrain(sc);
 3741                                 goto out;
 3742                         }
 3743                 } else {
 3744                         if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
 3745                                 WM_INIT_RXDESC(sc, i);
 3746                 }
 3747         }
 3748         sc->sc_rxptr = 0;
 3749         sc->sc_rxdiscard = 0;
 3750         WM_RXCHAIN_RESET(sc);
 3751 
 3752         /*
 3753          * Clear out the VLAN table -- we don't use it (yet).
 3754          */
 3755         CSR_WRITE(sc, WMREG_VET, 0);
 3756         for (i = 0; i < WM_VLAN_TABSIZE; i++)
 3757                 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
 3758 
 3759         /*
 3760          * Set up flow-control parameters.
 3761          *
 3762          * XXX Values could probably stand some tuning.
 3763          */
 3764         if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
 3765             && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
 3766                 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
 3767                 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
 3768                 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
 3769         }
 3770 
 3771         sc->sc_fcrtl = FCRTL_DFLT;
 3772         if (sc->sc_type < WM_T_82543) {
 3773                 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
 3774                 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
 3775         } else {
 3776                 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
 3777                 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
 3778         }
 3779 
 3780         if (sc->sc_type == WM_T_80003)
 3781                 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
 3782         else
 3783                 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
 3784 
 3785         /* Deal with VLAN enables. */
 3786         if (VLAN_ATTACHED(&sc->sc_ethercom))
 3787                 sc->sc_ctrl |= CTRL_VME;
 3788         else
 3789                 sc->sc_ctrl &= ~CTRL_VME;
 3790 
 3791         /* Write the control registers. */
 3792         CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 3793 
 3794         if (sc->sc_flags & WM_F_HAS_MII) {
 3795                 int val;
 3796 
 3797                 switch (sc->sc_type) {
 3798                 case WM_T_80003:
 3799                 case WM_T_ICH8:
 3800                 case WM_T_ICH9:
 3801                 case WM_T_ICH10:
 3802                 case WM_T_PCH:
 3803                         /*
 3804                          * Set the mac to wait the maximum time between each
 3805                          * iteration and increase the max iterations when
 3806                          * polling the phy; this fixes erroneous timeouts at
 3807                          * 10Mbps.
 3808                          */
 3809                         wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
 3810                             0xFFFF);
 3811                         val = wm_kmrn_readreg(sc,
 3812                             KUMCTRLSTA_OFFSET_INB_PARAM);
 3813                         val |= 0x3F;
 3814                         wm_kmrn_writereg(sc,
 3815                             KUMCTRLSTA_OFFSET_INB_PARAM, val);
 3816                         break;
 3817                 default:
 3818                         break;
 3819                 }
 3820 
 3821                 if (sc->sc_type == WM_T_80003) {
 3822                         val = CSR_READ(sc, WMREG_CTRL_EXT);
 3823                         val &= ~CTRL_EXT_LINK_MODE_MASK;
 3824                         CSR_WRITE(sc, WMREG_CTRL_EXT, val);
 3825 
 3826                         /* Bypass RX and TX FIFO's */
 3827                         wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
 3828                             KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
 3829                             | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
 3830                         wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
 3831                             KUMCTRLSTA_INB_CTRL_DIS_PADDING |
 3832                             KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
 3833                 }
 3834         }
 3835 #if 0
 3836         CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
 3837 #endif
 3838 
 3839         /*
 3840          * Set up checksum offload parameters.
 3841          */
 3842         reg = CSR_READ(sc, WMREG_RXCSUM);
 3843         reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
 3844         if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
 3845                 reg |= RXCSUM_IPOFL;
 3846         if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
 3847                 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
 3848         if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
 3849                 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
 3850         CSR_WRITE(sc, WMREG_RXCSUM, reg);
 3851 
 3852         /* Reset TBI's RXCFG count */
 3853         sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
 3854 
 3855         /*
 3856          * Set up the interrupt registers.
 3857          */
 3858         CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
 3859         sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
 3860             ICR_RXO | ICR_RXT0;
 3861         if ((sc->sc_flags & WM_F_HAS_MII) == 0)
 3862                 sc->sc_icr |= ICR_RXCFG;
 3863         CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
 3864 
 3865         if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
 3866             || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
 3867                 reg = CSR_READ(sc, WMREG_KABGTXD);
 3868                 reg |= KABGTXD_BGSQLBIAS;
 3869                 CSR_WRITE(sc, WMREG_KABGTXD, reg);
 3870         }
 3871 
 3872         /* Set up the inter-packet gap. */
 3873         CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
 3874 
 3875         if (sc->sc_type >= WM_T_82543) {
 3876                 /*
 3877                  * Set up the interrupt throttling register (units of 256ns)
 3878                  * Note that a footnote in Intel's documentation says this
 3879                  * ticker runs at 1/4 the rate when the chip is in 100Mbit
 3880                  * or 10Mbit mode.  Empirically, it appears to be the case
 3881                  * that that is also true for the 1024ns units of the other
 3882                  * interrupt-related timer registers -- so, really, we ought
 3883                  * to divide this value by 4 when the link speed is low.
 3884                  *
 3885                  * XXX implement this division at link speed change!
 3886                  */
 3887 
 3888                  /*
 3889                   * For N interrupts/sec, set this value to:
 3890                   * 1000000000 / (N * 256).  Note that we set the
 3891                   * absolute and packet timer values to this value
 3892                   * divided by 4 to get "simple timer" behavior.
 3893                   */
 3894 
 3895                 sc->sc_itr = 1500;              /* 2604 ints/sec */
 3896                 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
 3897         }
 3898 
 3899         /* Set the VLAN ethernetype. */
 3900         CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
 3901 
 3902         /*
 3903          * Set up the transmit control register; we start out with
 3904          * a collision distance suitable for FDX, but update it whe
 3905          * we resolve the media type.
 3906          */
 3907         sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
 3908             | TCTL_CT(TX_COLLISION_THRESHOLD)
 3909             | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
 3910         if (sc->sc_type >= WM_T_82571)
 3911                 sc->sc_tctl |= TCTL_MULR;
 3912         CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
 3913 
 3914         if (sc->sc_type == WM_T_80003) {
 3915                 reg = CSR_READ(sc, WMREG_TCTL_EXT);
 3916                 reg &= ~TCTL_EXT_GCEX_MASK;
 3917                 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
 3918                 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
 3919         }
 3920 
 3921         /* Set the media. */
 3922         if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
 3923                 goto out;
 3924 
 3925         /*
 3926          * Set up the receive control register; we actually program
 3927          * the register when we set the receive filter.  Use multicast
 3928          * address offset type 0.
 3929          *
 3930          * Only the i82544 has the ability to strip the incoming
 3931          * CRC, so we don't enable that feature.
 3932          */
 3933         sc->sc_mchash_type = 0;
 3934         sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
 3935             | RCTL_MO(sc->sc_mchash_type);
 3936 
 3937         if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
 3938             && (ifp->if_mtu > ETHERMTU)) {
 3939                 sc->sc_rctl |= RCTL_LPE;
 3940                 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
 3941                         CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
 3942         }
 3943 
 3944         if (MCLBYTES == 2048) {
 3945                 sc->sc_rctl |= RCTL_2k;
 3946         } else {
 3947                 if (sc->sc_type >= WM_T_82543) {
 3948                         switch (MCLBYTES) {
 3949                         case 4096:
 3950                                 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
 3951                                 break;
 3952                         case 8192:
 3953                                 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
 3954                                 break;
 3955                         case 16384:
 3956                                 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
 3957                                 break;
 3958                         default:
 3959                                 panic("wm_init: MCLBYTES %d unsupported",
 3960                                     MCLBYTES);
 3961                                 break;
 3962                         }
 3963                 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
 3964         }
 3965 
 3966         /* Set the receive filter. */
 3967         wm_set_filter(sc);
 3968 
 3969         /* On 575 and later set RDT only if RX enabled... */
 3970         if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
 3971                 for (i = 0; i < WM_NRXDESC; i++)
 3972                         WM_INIT_RXDESC(sc, i);
 3973 
 3974         /* Start the one second link check clock. */
 3975         callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
 3976 
 3977         /* ...all done! */
 3978         ifp->if_flags |= IFF_RUNNING;
 3979         ifp->if_flags &= ~IFF_OACTIVE;
 3980 
 3981  out:
 3982         if (error)
 3983                 log(LOG_ERR, "%s: interface not running\n",
 3984                     device_xname(sc->sc_dev));
 3985         return error;
 3986 }
 3987 
 3988 /*
 3989  * wm_rxdrain:
 3990  *
 3991  *      Drain the receive queue.
 3992  */
 3993 static void
 3994 wm_rxdrain(struct wm_softc *sc)
 3995 {
 3996         struct wm_rxsoft *rxs;
 3997         int i;
 3998 
 3999         for (i = 0; i < WM_NRXDESC; i++) {
 4000                 rxs = &sc->sc_rxsoft[i];
 4001                 if (rxs->rxs_mbuf != NULL) {
 4002                         bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
 4003                         m_freem(rxs->rxs_mbuf);
 4004                         rxs->rxs_mbuf = NULL;
 4005                 }
 4006         }
 4007 }
 4008 
 4009 /*
 4010  * wm_stop:             [ifnet interface function]
 4011  *
 4012  *      Stop transmission on the interface.
 4013  */
 4014 static void
 4015 wm_stop(struct ifnet *ifp, int disable)
 4016 {
 4017         struct wm_softc *sc = ifp->if_softc;
 4018         struct wm_txsoft *txs;
 4019         int i;
 4020 
 4021         /* Stop the one second clock. */
 4022         callout_stop(&sc->sc_tick_ch);
 4023 
 4024         /* Stop the 82547 Tx FIFO stall check timer. */
 4025         if (sc->sc_type == WM_T_82547)
 4026                 callout_stop(&sc->sc_txfifo_ch);
 4027 
 4028         if (sc->sc_flags & WM_F_HAS_MII) {
 4029                 /* Down the MII. */
 4030                 mii_down(&sc->sc_mii);
 4031         } else {
 4032 #if 0
 4033                 /* Should we clear PHY's status properly? */
 4034                 wm_reset(sc);
 4035 #endif
 4036         }
 4037 
 4038         /* Stop the transmit and receive processes. */
 4039         CSR_WRITE(sc, WMREG_TCTL, 0);
 4040         CSR_WRITE(sc, WMREG_RCTL, 0);
 4041         sc->sc_rctl &= ~RCTL_EN;
 4042 
 4043         /*
 4044          * Clear the interrupt mask to ensure the device cannot assert its
 4045          * interrupt line.
 4046          * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
 4047          * any currently pending or shared interrupt.
 4048          */
 4049         CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
 4050         sc->sc_icr = 0;
 4051 
 4052         /* Release any queued transmit buffers. */
 4053         for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
 4054                 txs = &sc->sc_txsoft[i];
 4055                 if (txs->txs_mbuf != NULL) {
 4056                         bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
 4057                         m_freem(txs->txs_mbuf);
 4058                         txs->txs_mbuf = NULL;
 4059                 }
 4060         }
 4061 
 4062         /* Mark the interface as down and cancel the watchdog timer. */
 4063         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 4064         ifp->if_timer = 0;
 4065 
 4066         if (disable)
 4067                 wm_rxdrain(sc);
 4068 
 4069 #if 0 /* notyet */
 4070         if (sc->sc_type >= WM_T_82544)
 4071                 CSR_WRITE(sc, WMREG_WUC, 0);
 4072 #endif
 4073 }
 4074 
 4075 void
 4076 wm_get_auto_rd_done(struct wm_softc *sc)
 4077 {
 4078         int i;
 4079 
 4080         /* wait for eeprom to reload */
 4081         switch (sc->sc_type) {
 4082         case WM_T_82571:
 4083         case WM_T_82572:
 4084         case WM_T_82573:
 4085         case WM_T_82574:
 4086         case WM_T_82583:
 4087         case WM_T_82575:
 4088         case WM_T_82576:
 4089         case WM_T_80003:
 4090         case WM_T_ICH8:
 4091         case WM_T_ICH9:
 4092                 for (i = 0; i < 10; i++) {
 4093                         if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
 4094                                 break;
 4095                         delay(1000);
 4096                 }
 4097                 if (i == 10) {
 4098                         log(LOG_ERR, "%s: auto read from eeprom failed to "
 4099                             "complete\n", device_xname(sc->sc_dev));
 4100                 }
 4101                 break;
 4102         default:
 4103                 break;
 4104         }
 4105 }
 4106 
 4107 void
 4108 wm_lan_init_done(struct wm_softc *sc)
 4109 {
 4110         uint32_t reg = 0;
 4111         int i;
 4112 
 4113         /* wait for eeprom to reload */
 4114         switch (sc->sc_type) {
 4115         case WM_T_ICH10:
 4116         case WM_T_PCH:
 4117                 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
 4118                         reg = CSR_READ(sc, WMREG_STATUS);
 4119                         if ((reg & STATUS_LAN_INIT_DONE) != 0)
 4120                                 break;
 4121                         delay(100);
 4122                 }
 4123                 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
 4124                         log(LOG_ERR, "%s: %s: lan_init_done failed to "
 4125                             "complete\n", device_xname(sc->sc_dev), __func__);
 4126                 }
 4127                 break;
 4128         default:
 4129                 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
 4130                     __func__);
 4131                 break;
 4132         }
 4133 
 4134         reg &= ~STATUS_LAN_INIT_DONE;
 4135         CSR_WRITE(sc, WMREG_STATUS, reg);
 4136 }
 4137 
 4138 void
 4139 wm_get_cfg_done(struct wm_softc *sc)
 4140 {
 4141         int mask;
 4142         uint32_t reg;
 4143         int i;
 4144 
 4145         /* wait for eeprom to reload */
 4146         switch (sc->sc_type) {
 4147         case WM_T_82542_2_0:
 4148         case WM_T_82542_2_1:
 4149                 /* null */
 4150                 break;
 4151         case WM_T_82543:
 4152         case WM_T_82544:
 4153         case WM_T_82540:
 4154         case WM_T_82545:
 4155         case WM_T_82545_3:
 4156         case WM_T_82546:
 4157         case WM_T_82546_3:
 4158         case WM_T_82541:
 4159         case WM_T_82541_2:
 4160         case WM_T_82547:
 4161         case WM_T_82547_2:
 4162         case WM_T_82573:
 4163         case WM_T_82574:
 4164         case WM_T_82583:
 4165                 /* generic */
 4166                 delay(10*1000);
 4167                 break;
 4168         case WM_T_80003:
 4169         case WM_T_82571:
 4170         case WM_T_82572:
 4171         case WM_T_82575:
 4172         case WM_T_82576:
 4173         case WM_T_82580:
 4174                 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
 4175                 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
 4176                         if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
 4177                                 break;
 4178                         delay(1000);
 4179                 }
 4180                 if (i >= WM_PHY_CFG_TIMEOUT) {
 4181                         DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
 4182                                 device_xname(sc->sc_dev), __func__));
 4183                 }
 4184                 break;
 4185         case WM_T_ICH8:
 4186         case WM_T_ICH9:
 4187         case WM_T_ICH10:
 4188         case WM_T_PCH:
 4189                 if (sc->sc_type >= WM_T_PCH) {
 4190                         reg = CSR_READ(sc, WMREG_STATUS);
 4191                         if ((reg & STATUS_PHYRA) != 0)
 4192                                 CSR_WRITE(sc, WMREG_STATUS,
 4193                                     reg & ~STATUS_PHYRA);
 4194                 }
 4195                 delay(10*1000);
 4196                 break;
 4197         default:
 4198                 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
 4199                     __func__);
 4200                 break;
 4201         }
 4202 }
 4203 
 4204 /*
 4205  * wm_acquire_eeprom:
 4206  *
 4207  *      Perform the EEPROM handshake required on some chips.
 4208  */
 4209 static int
 4210 wm_acquire_eeprom(struct wm_softc *sc)
 4211 {
 4212         uint32_t reg;
 4213         int x;
 4214         int ret = 0;
 4215 
 4216         /* always success */
 4217         if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
 4218                 return 0;
 4219 
 4220         if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
 4221                 ret = wm_get_swfwhw_semaphore(sc);
 4222         } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
 4223                 /* this will also do wm_get_swsm_semaphore() if needed */
 4224                 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
 4225         } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
 4226                 ret = wm_get_swsm_semaphore(sc);
 4227         }
 4228 
 4229         if (ret) {
 4230                 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
 4231                         __func__);
 4232                 return 1;
 4233         }
 4234 
 4235         if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
 4236                 reg = CSR_READ(sc, WMREG_EECD);
 4237 
 4238                 /* Request EEPROM access. */
 4239                 reg |= EECD_EE_REQ;
 4240                 CSR_WRITE(sc, WMREG_EECD, reg);
 4241 
 4242                 /* ..and wait for it to be granted. */
 4243                 for (x = 0; x < 1000; x++) {
 4244                         reg = CSR_READ(sc, WMREG_EECD);
 4245                         if (reg & EECD_EE_GNT)
 4246                                 break;
 4247                         delay(5);
 4248                 }
 4249                 if ((reg & EECD_EE_GNT) == 0) {
 4250                         aprint_error_dev(sc->sc_dev,
 4251                             "could not acquire EEPROM GNT\n");
 4252                         reg &= ~EECD_EE_REQ;
 4253                         CSR_WRITE(sc, WMREG_EECD, reg);
 4254                         if (sc->sc_flags & WM_F_SWFWHW_SYNC)
 4255                                 wm_put_swfwhw_semaphore(sc);
 4256                         if (sc->sc_flags & WM_F_SWFW_SYNC)
 4257                                 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
 4258                         else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
 4259                                 wm_put_swsm_semaphore(sc);
 4260                         return 1;
 4261                 }
 4262         }
 4263 
 4264         return 0;
 4265 }
 4266 
 4267 /*
 4268  * wm_release_eeprom:
 4269  *
 4270  *      Release the EEPROM mutex.
 4271  */
 4272 static void
 4273 wm_release_eeprom(struct wm_softc *sc)
 4274 {
 4275         uint32_t reg;
 4276 
 4277         /* always success */
 4278         if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
 4279                 return;
 4280 
 4281         if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
 4282                 reg = CSR_READ(sc, WMREG_EECD);
 4283                 reg &= ~EECD_EE_REQ;
 4284                 CSR_WRITE(sc, WMREG_EECD, reg);
 4285         }
 4286 
 4287         if (sc->sc_flags & WM_F_SWFWHW_SYNC)
 4288                 wm_put_swfwhw_semaphore(sc);
 4289         if (sc->sc_flags & WM_F_SWFW_SYNC)
 4290                 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
 4291         else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
 4292                 wm_put_swsm_semaphore(sc);
 4293 }
 4294 
 4295 /*
 4296  * wm_eeprom_sendbits:
 4297  *
 4298  *      Send a series of bits to the EEPROM.
 4299  */
 4300 static void
 4301 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
 4302 {
 4303         uint32_t reg;
 4304         int x;
 4305 
 4306         reg = CSR_READ(sc, WMREG_EECD);
 4307 
 4308         for (x = nbits; x > 0; x--) {
 4309                 if (bits & (1U << (x - 1)))
 4310                         reg |= EECD_DI;
 4311                 else
 4312                         reg &= ~EECD_DI;
 4313                 CSR_WRITE(sc, WMREG_EECD, reg);
 4314                 delay(2);
 4315                 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
 4316                 delay(2);
 4317                 CSR_WRITE(sc, WMREG_EECD, reg);
 4318                 delay(2);
 4319         }
 4320 }
 4321 
 4322 /*
 4323  * wm_eeprom_recvbits:
 4324  *
 4325  *      Receive a series of bits from the EEPROM.
 4326  */
 4327 static void
 4328 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
 4329 {
 4330         uint32_t reg, val;
 4331         int x;
 4332 
 4333         reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
 4334 
 4335         val = 0;
 4336         for (x = nbits; x > 0; x--) {
 4337                 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
 4338                 delay(2);
 4339                 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
 4340                         val |= (1U << (x - 1));
 4341                 CSR_WRITE(sc, WMREG_EECD, reg);
 4342                 delay(2);
 4343         }
 4344         *valp = val;
 4345 }
 4346 
 4347 /*
 4348  * wm_read_eeprom_uwire:
 4349  *
 4350  *      Read a word from the EEPROM using the MicroWire protocol.
 4351  */
 4352 static int
 4353 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
 4354 {
 4355         uint32_t reg, val;
 4356         int i;
 4357 
 4358         for (i = 0; i < wordcnt; i++) {
 4359                 /* Clear SK and DI. */
 4360                 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
 4361                 CSR_WRITE(sc, WMREG_EECD, reg);
 4362 
 4363                 /* Set CHIP SELECT. */
 4364                 reg |= EECD_CS;
 4365                 CSR_WRITE(sc, WMREG_EECD, reg);
 4366                 delay(2);
 4367 
 4368                 /* Shift in the READ command. */
 4369                 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
 4370 
 4371                 /* Shift in address. */
 4372                 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
 4373 
 4374                 /* Shift out the data. */
 4375                 wm_eeprom_recvbits(sc, &val, 16);
 4376                 data[i] = val & 0xffff;
 4377 
 4378                 /* Clear CHIP SELECT. */
 4379                 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
 4380                 CSR_WRITE(sc, WMREG_EECD, reg);
 4381                 delay(2);
 4382         }
 4383 
 4384         return 0;
 4385 }
 4386 
 4387 /*
 4388  * wm_spi_eeprom_ready:
 4389  *
 4390  *      Wait for a SPI EEPROM to be ready for commands.
 4391  */
 4392 static int
 4393 wm_spi_eeprom_ready(struct wm_softc *sc)
 4394 {
 4395         uint32_t val;
 4396         int usec;
 4397 
 4398         for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
 4399                 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
 4400                 wm_eeprom_recvbits(sc, &val, 8);
 4401                 if ((val & SPI_SR_RDY) == 0)
 4402                         break;
 4403         }
 4404         if (usec >= SPI_MAX_RETRIES) {
 4405                 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
 4406                 return 1;
 4407         }
 4408         return 0;
 4409 }
 4410 
 4411 /*
 4412  * wm_read_eeprom_spi:
 4413  *
 4414  *      Read a work from the EEPROM using the SPI protocol.
 4415  */
 4416 static int
 4417 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
 4418 {
 4419         uint32_t reg, val;
 4420         int i;
 4421         uint8_t opc;
 4422 
 4423         /* Clear SK and CS. */
 4424         reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
 4425         CSR_WRITE(sc, WMREG_EECD, reg);
 4426         delay(2);
 4427 
 4428         if (wm_spi_eeprom_ready(sc))
 4429                 return 1;
 4430 
 4431         /* Toggle CS to flush commands. */
 4432         CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
 4433         delay(2);
 4434         CSR_WRITE(sc, WMREG_EECD, reg);
 4435         delay(2);
 4436 
 4437         opc = SPI_OPC_READ;
 4438         if (sc->sc_ee_addrbits == 8 && word >= 128)
 4439                 opc |= SPI_OPC_A8;
 4440 
 4441         wm_eeprom_sendbits(sc, opc, 8);
 4442         wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
 4443 
 4444         for (i = 0; i < wordcnt; i++) {
 4445                 wm_eeprom_recvbits(sc, &val, 16);
 4446                 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
 4447         }
 4448 
 4449         /* Raise CS and clear SK. */
 4450         reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
 4451         CSR_WRITE(sc, WMREG_EECD, reg);
 4452         delay(2);
 4453 
 4454         return 0;
 4455 }
 4456 
 4457 #define EEPROM_CHECKSUM         0xBABA
 4458 #define EEPROM_SIZE             0x0040
 4459 
 4460 /*
 4461  * wm_validate_eeprom_checksum
 4462  *
 4463  * The checksum is defined as the sum of the first 64 (16 bit) words.
 4464  */
 4465 static int
 4466 wm_validate_eeprom_checksum(struct wm_softc *sc)
 4467 {
 4468         uint16_t checksum;
 4469         uint16_t eeprom_data;
 4470         int i;
 4471 
 4472         checksum = 0;
 4473 
 4474         for (i = 0; i < EEPROM_SIZE; i++) {
 4475                 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
 4476                         return 1;
 4477                 checksum += eeprom_data;
 4478         }
 4479 
 4480         if (checksum != (uint16_t) EEPROM_CHECKSUM)
 4481                 return 1;
 4482 
 4483         return 0;
 4484 }
 4485 
 4486 /*
 4487  * wm_read_eeprom:
 4488  *
 4489  *      Read data from the serial EEPROM.
 4490  */
 4491 static int
 4492 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
 4493 {
 4494         int rv;
 4495 
 4496         if (sc->sc_flags & WM_F_EEPROM_INVALID)
 4497                 return 1;
 4498 
 4499         if (wm_acquire_eeprom(sc))
 4500                 return 1;
 4501 
 4502         if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
 4503             || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
 4504                 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
 4505         else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
 4506                 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
 4507         else if (sc->sc_flags & WM_F_EEPROM_SPI)
 4508                 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
 4509         else
 4510                 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
 4511 
 4512         wm_release_eeprom(sc);
 4513         return rv;
 4514 }
 4515 
 4516 static int
 4517 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
 4518     uint16_t *data)
 4519 {
 4520         int i, eerd = 0;
 4521         int error = 0;
 4522 
 4523         for (i = 0; i < wordcnt; i++) {
 4524                 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
 4525 
 4526                 CSR_WRITE(sc, WMREG_EERD, eerd);
 4527                 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
 4528                 if (error != 0)
 4529                         break;
 4530 
 4531                 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
 4532         }
 4533 
 4534         return error;
 4535 }
 4536 
 4537 static int
 4538 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
 4539 {
 4540         uint32_t attempts = 100000;
 4541         uint32_t i, reg = 0;
 4542         int32_t done = -1;
 4543 
 4544         for (i = 0; i < attempts; i++) {
 4545                 reg = CSR_READ(sc, rw);
 4546 
 4547                 if (reg & EERD_DONE) {
 4548                         done = 0;
 4549                         break;
 4550                 }
 4551                 delay(5);
 4552         }
 4553 
 4554         return done;
 4555 }
 4556 
 4557 /*
 4558  * wm_add_rxbuf:
 4559  *
 4560  *      Add a receive buffer to the indiciated descriptor.
 4561  */
 4562 static int
 4563 wm_add_rxbuf(struct wm_softc *sc, int idx)
 4564 {
 4565         struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
 4566         struct mbuf *m;
 4567         int error;
 4568 
 4569         MGETHDR(m, M_DONTWAIT, MT_DATA);
 4570         if (m == NULL)
 4571                 return ENOBUFS;
 4572 
 4573         MCLGET(m, M_DONTWAIT);
 4574         if ((m->m_flags & M_EXT) == 0) {
 4575                 m_freem(m);
 4576                 return ENOBUFS;
 4577         }
 4578 
 4579         if (rxs->rxs_mbuf != NULL)
 4580                 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
 4581 
 4582         rxs->rxs_mbuf = m;
 4583 
 4584         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
 4585         error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
 4586             BUS_DMA_READ|BUS_DMA_NOWAIT);
 4587         if (error) {
 4588                 /* XXX XXX XXX */
 4589                 aprint_error_dev(sc->sc_dev,
 4590                     "unable to load rx DMA map %d, error = %d\n",
 4591                     idx, error);
 4592                 panic("wm_add_rxbuf");
 4593         }
 4594 
 4595         bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
 4596             rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
 4597 
 4598         if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
 4599                 if ((sc->sc_rctl & RCTL_EN) != 0)
 4600                         WM_INIT_RXDESC(sc, idx);
 4601         } else 
 4602                 WM_INIT_RXDESC(sc, idx);
 4603 
 4604         return 0;
 4605 }
 4606 
 4607 /*
 4608  * wm_set_ral:
 4609  *
 4610  *      Set an entery in the receive address list.
 4611  */
 4612 static void
 4613 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
 4614 {
 4615         uint32_t ral_lo, ral_hi;
 4616 
 4617         if (enaddr != NULL) {
 4618                 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
 4619                     (enaddr[3] << 24);
 4620                 ral_hi = enaddr[4] | (enaddr[5] << 8);
 4621                 ral_hi |= RAL_AV;
 4622         } else {
 4623                 ral_lo = 0;
 4624                 ral_hi = 0;
 4625         }
 4626 
 4627         if (sc->sc_type >= WM_T_82544) {
 4628                 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
 4629                     ral_lo);
 4630                 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
 4631                     ral_hi);
 4632         } else {
 4633                 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
 4634                 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
 4635         }
 4636 }
 4637 
 4638 /*
 4639  * wm_mchash:
 4640  *
 4641  *      Compute the hash of the multicast address for the 4096-bit
 4642  *      multicast filter.
 4643  */
 4644 static uint32_t
 4645 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
 4646 {
 4647         static const int lo_shift[4] = { 4, 3, 2, 0 };
 4648         static const int hi_shift[4] = { 4, 5, 6, 8 };
 4649         static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
 4650         static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
 4651         uint32_t hash;
 4652 
 4653         if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
 4654             || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
 4655                 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
 4656                     (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
 4657                 return (hash & 0x3ff);
 4658         }
 4659         hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
 4660             (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
 4661 
 4662         return (hash & 0xfff);
 4663 }
 4664 
 4665 /*
 4666  * wm_set_filter:
 4667  *
 4668  *      Set up the receive filter.
 4669  */
 4670 static void
 4671 wm_set_filter(struct wm_softc *sc)
 4672 {
 4673         struct ethercom *ec = &sc->sc_ethercom;
 4674         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 4675         struct ether_multi *enm;
 4676         struct ether_multistep step;
 4677         bus_addr_t mta_reg;
 4678         uint32_t hash, reg, bit;
 4679         int i, size;
 4680 
 4681         if (sc->sc_type >= WM_T_82544)
 4682                 mta_reg = WMREG_CORDOVA_MTA;
 4683         else
 4684                 mta_reg = WMREG_MTA;
 4685 
 4686         sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
 4687 
 4688         if (ifp->if_flags & IFF_BROADCAST)
 4689                 sc->sc_rctl |= RCTL_BAM;
 4690         if (ifp->if_flags & IFF_PROMISC) {
 4691                 sc->sc_rctl |= RCTL_UPE;
 4692                 goto allmulti;
 4693         }
 4694 
 4695         /*
 4696          * Set the station address in the first RAL slot, and
 4697          * clear the remaining slots.
 4698          */
 4699         if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
 4700             || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
 4701                 size = WM_ICH8_RAL_TABSIZE;
 4702         else
 4703                 size = WM_RAL_TABSIZE;
 4704         wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
 4705         for (i = 1; i < size; i++)
 4706                 wm_set_ral(sc, NULL, i);
 4707 
 4708         if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
 4709             || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
 4710                 size = WM_ICH8_MC_TABSIZE;
 4711         else
 4712                 size = WM_MC_TABSIZE;
 4713         /* Clear out the multicast table. */
 4714         for (i = 0; i < size; i++)
 4715                 CSR_WRITE(sc, mta_reg + (i << 2), 0);
 4716 
 4717         ETHER_FIRST_MULTI(step, ec, enm);
 4718         while (enm != NULL) {
 4719                 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
 4720                         /*
 4721                          * We must listen to a range of multicast addresses.
 4722                          * For now, just accept all multicasts, rather than
 4723                          * trying to set only those filter bits needed to match
 4724                          * the range.  (At this time, the only use of address
 4725                          * ranges is for IP multicast routing, for which the
 4726                          * range is big enough to require all bits set.)
 4727                          */
 4728                         goto allmulti;
 4729                 }
 4730 
 4731                 hash = wm_mchash(sc, enm->enm_addrlo);
 4732 
 4733                 reg = (hash >> 5);
 4734                 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
 4735                     || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
 4736                         reg &= 0x1f;
 4737                 else
 4738                         reg &= 0x7f;
 4739                 bit = hash & 0x1f;
 4740 
 4741                 hash = CSR_READ(sc, mta_reg + (reg << 2));
 4742                 hash |= 1U << bit;
 4743 
 4744                 /* XXX Hardware bug?? */
 4745                 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
 4746                         bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
 4747                         CSR_WRITE(sc, mta_reg + (reg << 2), hash);
 4748                         CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
 4749                 } else
 4750                         CSR_WRITE(sc, mta_reg + (reg << 2), hash);
 4751 
 4752                 ETHER_NEXT_MULTI(step, enm);
 4753         }
 4754 
 4755         ifp->if_flags &= ~IFF_ALLMULTI;
 4756         goto setit;
 4757 
 4758  allmulti:
 4759         ifp->if_flags |= IFF_ALLMULTI;
 4760         sc->sc_rctl |= RCTL_MPE;
 4761 
 4762  setit:
 4763         CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
 4764 }
 4765 
 4766 /*
 4767  * wm_tbi_mediainit:
 4768  *
 4769  *      Initialize media for use on 1000BASE-X devices.
 4770  */
 4771 static void
 4772 wm_tbi_mediainit(struct wm_softc *sc)
 4773 {
 4774         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 4775         const char *sep = "";
 4776 
 4777         if (sc->sc_type < WM_T_82543)
 4778                 sc->sc_tipg = TIPG_WM_DFLT;
 4779         else
 4780                 sc->sc_tipg = TIPG_LG_DFLT;
 4781 
 4782         sc->sc_tbi_anegticks = 5;
 4783 
 4784         /* Initialize our media structures */
 4785         sc->sc_mii.mii_ifp = ifp;
 4786 
 4787         sc->sc_ethercom.ec_mii = &sc->sc_mii;
 4788         ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
 4789             wm_tbi_mediastatus);
 4790 
 4791         /*
 4792          * SWD Pins:
 4793          *
 4794          *      0 = Link LED (output)
 4795          *      1 = Loss Of Signal (input)
 4796          */
 4797         sc->sc_ctrl |= CTRL_SWDPIO(0);
 4798         sc->sc_ctrl &= ~CTRL_SWDPIO(1);
 4799 
 4800         CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 4801 
 4802 #define ADD(ss, mm, dd)                                                 \
 4803 do {                                                                    \
 4804         aprint_normal("%s%s", sep, ss);                                 \
 4805         ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
 4806         sep = ", ";                                                     \
 4807 } while (/*CONSTCOND*/0)
 4808 
 4809         aprint_normal_dev(sc->sc_dev, "");
 4810         ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
 4811         ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
 4812         ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
 4813         aprint_normal("\n");
 4814 
 4815 #undef ADD
 4816 
 4817         ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
 4818 }
 4819 
 4820 /*
 4821  * wm_tbi_mediastatus:  [ifmedia interface function]
 4822  *
 4823  *      Get the current interface media status on a 1000BASE-X device.
 4824  */
 4825 static void
 4826 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 4827 {
 4828         struct wm_softc *sc = ifp->if_softc;
 4829         uint32_t ctrl, status;
 4830 
 4831         ifmr->ifm_status = IFM_AVALID;
 4832         ifmr->ifm_active = IFM_ETHER;
 4833 
 4834         status = CSR_READ(sc, WMREG_STATUS);
 4835         if ((status & STATUS_LU) == 0) {
 4836                 ifmr->ifm_active |= IFM_NONE;
 4837                 return;
 4838         }
 4839 
 4840         ifmr->ifm_status |= IFM_ACTIVE;
 4841         ifmr->ifm_active |= IFM_1000_SX;
 4842         if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
 4843                 ifmr->ifm_active |= IFM_FDX;
 4844         ctrl = CSR_READ(sc, WMREG_CTRL);
 4845         if (ctrl & CTRL_RFCE)
 4846                 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
 4847         if (ctrl & CTRL_TFCE)
 4848                 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
 4849 }
 4850 
 4851 /*
 4852  * wm_tbi_mediachange:  [ifmedia interface function]
 4853  *
 4854  *      Set hardware to newly-selected media on a 1000BASE-X device.
 4855  */
 4856 static int
 4857 wm_tbi_mediachange(struct ifnet *ifp)
 4858 {
 4859         struct wm_softc *sc = ifp->if_softc;
 4860         struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
 4861         uint32_t status;
 4862         int i;
 4863 
 4864         sc->sc_txcw = 0;
 4865         if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
 4866             (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
 4867                 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
 4868         if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
 4869                 sc->sc_txcw |= TXCW_ANE;
 4870         } else {
 4871                 /*
 4872                  * If autonegotiation is turned off, force link up and turn on
 4873                  * full duplex
 4874                  */
 4875                 sc->sc_txcw &= ~TXCW_ANE;
 4876                 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
 4877                 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
 4878                 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 4879                 delay(1000);
 4880         }
 4881 
 4882         DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
 4883                     device_xname(sc->sc_dev),sc->sc_txcw));
 4884         CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
 4885         delay(10000);
 4886 
 4887         i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
 4888         DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
 4889 
 4890         /*
 4891          * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
 4892          * optics detect a signal, 0 if they don't.
 4893          */
 4894         if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
 4895                 /* Have signal; wait for the link to come up. */
 4896 
 4897                 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
 4898                         /*
 4899                          * Reset the link, and let autonegotiation do its thing
 4900                          */
 4901                         sc->sc_ctrl |= CTRL_LRST;
 4902                         CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 4903                         delay(1000);
 4904                         sc->sc_ctrl &= ~CTRL_LRST;
 4905                         CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 4906                         delay(1000);
 4907                 }
 4908 
 4909                 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
 4910                         delay(10000);
 4911                         if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
 4912                                 break;
 4913                 }
 4914 
 4915                 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
 4916                             device_xname(sc->sc_dev),i));
 4917 
 4918                 status = CSR_READ(sc, WMREG_STATUS);
 4919                 DPRINTF(WM_DEBUG_LINK,
 4920                     ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
 4921                         device_xname(sc->sc_dev),status, STATUS_LU));
 4922                 if (status & STATUS_LU) {
 4923                         /* Link is up. */
 4924                         DPRINTF(WM_DEBUG_LINK,
 4925                             ("%s: LINK: set media -> link up %s\n",
 4926                             device_xname(sc->sc_dev),
 4927                             (status & STATUS_FD) ? "FDX" : "HDX"));
 4928 
 4929                         /*
 4930                          * NOTE: CTRL will update TFCE and RFCE automatically,
 4931                          * so we should update sc->sc_ctrl
 4932                          */
 4933                         sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
 4934                         sc->sc_tctl &= ~TCTL_COLD(0x3ff);
 4935                         sc->sc_fcrtl &= ~FCRTL_XONE;
 4936                         if (status & STATUS_FD)
 4937                                 sc->sc_tctl |=
 4938                                     TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
 4939                         else
 4940                                 sc->sc_tctl |=
 4941                                     TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
 4942                         if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
 4943                                 sc->sc_fcrtl |= FCRTL_XONE;
 4944                         CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
 4945                         CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
 4946                                       WMREG_OLD_FCRTL : WMREG_FCRTL,
 4947                                       sc->sc_fcrtl);
 4948                         sc->sc_tbi_linkup = 1;
 4949                 } else {
 4950                         if (i == WM_LINKUP_TIMEOUT)
 4951                                 wm_check_for_link(sc);
 4952                         /* Link is down. */
 4953                         DPRINTF(WM_DEBUG_LINK,
 4954                             ("%s: LINK: set media -> link down\n",
 4955                             device_xname(sc->sc_dev)));
 4956                         sc->sc_tbi_linkup = 0;
 4957                 }
 4958         } else {
 4959                 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
 4960                     device_xname(sc->sc_dev)));
 4961                 sc->sc_tbi_linkup = 0;
 4962         }
 4963 
 4964         wm_tbi_set_linkled(sc);
 4965 
 4966         return 0;
 4967 }
 4968 
 4969 /*
 4970  * wm_tbi_set_linkled:
 4971  *
 4972  *      Update the link LED on 1000BASE-X devices.
 4973  */
 4974 static void
 4975 wm_tbi_set_linkled(struct wm_softc *sc)
 4976 {
 4977 
 4978         if (sc->sc_tbi_linkup)
 4979                 sc->sc_ctrl |= CTRL_SWDPIN(0);
 4980         else
 4981                 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
 4982 
 4983         /* 82540 or newer devices are active low */
 4984         sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
 4985 
 4986         CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 4987 }
 4988 
 4989 /*
 4990  * wm_tbi_check_link:
 4991  *
 4992  *      Check the link on 1000BASE-X devices.
 4993  */
 4994 static void
 4995 wm_tbi_check_link(struct wm_softc *sc)
 4996 {
 4997         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 4998         struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
 4999         uint32_t rxcw, ctrl, status;
 5000 
 5001         status = CSR_READ(sc, WMREG_STATUS);
 5002 
 5003         rxcw = CSR_READ(sc, WMREG_RXCW);
 5004         ctrl = CSR_READ(sc, WMREG_CTRL);
 5005 
 5006         /* set link status */
 5007         if ((status & STATUS_LU) == 0) {
 5008                 DPRINTF(WM_DEBUG_LINK,
 5009                     ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
 5010                 sc->sc_tbi_linkup = 0;
 5011         } else if (sc->sc_tbi_linkup == 0) {
 5012                 DPRINTF(WM_DEBUG_LINK,
 5013                     ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
 5014                     (status & STATUS_FD) ? "FDX" : "HDX"));
 5015                 sc->sc_tbi_linkup = 1;
 5016         }
 5017 
 5018         if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
 5019             && ((status & STATUS_LU) == 0)) {
 5020                 sc->sc_tbi_linkup = 0;
 5021                 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
 5022                         /* RXCFG storm! */
 5023                         DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
 5024                                 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
 5025                         wm_init(ifp);
 5026                         wm_start(ifp);
 5027                 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
 5028                         /* If the timer expired, retry autonegotiation */
 5029                         if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
 5030                                 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
 5031                                 sc->sc_tbi_ticks = 0;
 5032                                 /*
 5033                                  * Reset the link, and let autonegotiation do
 5034                                  * its thing
 5035                                  */
 5036                                 sc->sc_ctrl |= CTRL_LRST;
 5037                                 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 5038                                 delay(1000);
 5039                                 sc->sc_ctrl &= ~CTRL_LRST;
 5040                                 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 5041                                 delay(1000);
 5042                                 CSR_WRITE(sc, WMREG_TXCW,
 5043                                     sc->sc_txcw & ~TXCW_ANE);
 5044                                 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
 5045                         }
 5046                 }
 5047         }
 5048 
 5049         wm_tbi_set_linkled(sc);
 5050 }
 5051 
 5052 /*
 5053  * wm_gmii_reset:
 5054  *
 5055  *      Reset the PHY.
 5056  */
 5057 static void
 5058 wm_gmii_reset(struct wm_softc *sc)
 5059 {
 5060         uint32_t reg;
 5061         int rv;
 5062 
 5063         /* get phy semaphore */
 5064         switch (sc->sc_type) {
 5065         case WM_T_82571:
 5066         case WM_T_82572:
 5067         case WM_T_82573:
 5068         case WM_T_82574:
 5069         case WM_T_82583:
 5070                  /* XXX should get sw semaphore, too */
 5071                 rv = wm_get_swsm_semaphore(sc);
 5072                 break;
 5073         case WM_T_82575:
 5074         case WM_T_82576:
 5075         case WM_T_82580:
 5076         case WM_T_82580ER:
 5077         case WM_T_80003:
 5078                 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
 5079                 break;
 5080         case WM_T_ICH8:
 5081         case WM_T_ICH9:
 5082         case WM_T_ICH10:
 5083         case WM_T_PCH:
 5084                 rv = wm_get_swfwhw_semaphore(sc);
 5085                 break;
 5086         default:
 5087                 /* nothing to do*/
 5088                 rv = 0;
 5089                 break;
 5090         }
 5091         if (rv != 0) {
 5092                 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
 5093                     __func__);
 5094                 return;
 5095         }
 5096 
 5097         switch (sc->sc_type) {
 5098         case WM_T_82542_2_0:
 5099         case WM_T_82542_2_1:
 5100                 /* null */
 5101                 break;
 5102         case WM_T_82543:
 5103                 /*
 5104                  * With 82543, we need to force speed and duplex on the MAC
 5105                  * equal to what the PHY speed and duplex configuration is.
 5106                  * In addition, we need to perform a hardware reset on the PHY
 5107                  * to take it out of reset.
 5108                  */
 5109                 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
 5110                 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 5111 
 5112                 /* The PHY reset pin is active-low. */
 5113                 reg = CSR_READ(sc, WMREG_CTRL_EXT);
 5114                 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
 5115                     CTRL_EXT_SWDPIN(4));
 5116                 reg |= CTRL_EXT_SWDPIO(4);
 5117 
 5118                 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
 5119                 delay(10*1000);
 5120 
 5121                 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
 5122                 delay(150);
 5123 #if 0
 5124                 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
 5125 #endif
 5126                 delay(20*1000); /* XXX extra delay to get PHY ID? */
 5127                 break;
 5128         case WM_T_82544:        /* reset 10000us */
 5129         case WM_T_82540:
 5130         case WM_T_82545:
 5131         case WM_T_82545_3:
 5132         case WM_T_82546:
 5133         case WM_T_82546_3:
 5134         case WM_T_82541:
 5135         case WM_T_82541_2:
 5136         case WM_T_82547:
 5137         case WM_T_82547_2:
 5138         case WM_T_82571:        /* reset 100us */
 5139         case WM_T_82572:
 5140         case WM_T_82573:
 5141         case WM_T_82574:
 5142         case WM_T_82575:
 5143         case WM_T_82576:
 5144         case WM_T_82580:
 5145         case WM_T_82580ER:
 5146         case WM_T_82583:
 5147         case WM_T_80003:
 5148                 /* generic reset */
 5149                 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
 5150                 delay(20000);
 5151                 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 5152                 delay(20000);
 5153 
 5154                 if ((sc->sc_type == WM_T_82541)
 5155                     || (sc->sc_type == WM_T_82541_2)
 5156                     || (sc->sc_type == WM_T_82547)
 5157                     || (sc->sc_type == WM_T_82547_2)) {
 5158                         /* workaround for igp are done in igp_reset() */
 5159                         /* XXX add code to set LED after phy reset */
 5160                 }
 5161                 break;
 5162         case WM_T_ICH8:
 5163         case WM_T_ICH9:
 5164         case WM_T_ICH10:
 5165         case WM_T_PCH:
 5166                 /* generic reset */
 5167                 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
 5168                 delay(100);
 5169                 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 5170                 delay(150);
 5171                 break;
 5172         default:
 5173                 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
 5174                     __func__);
 5175                 break;
 5176         }
 5177 
 5178         /* release PHY semaphore */
 5179         switch (sc->sc_type) {
 5180         case WM_T_82571:
 5181         case WM_T_82572:
 5182         case WM_T_82573:
 5183         case WM_T_82574:
 5184         case WM_T_82583:
 5185                  /* XXX sould put sw semaphore, too */
 5186                 wm_put_swsm_semaphore(sc);
 5187                 break;
 5188         case WM_T_82575:
 5189         case WM_T_82576:
 5190         case WM_T_82580:
 5191         case WM_T_82580ER:
 5192         case WM_T_80003:
 5193                 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
 5194                 break;
 5195         case WM_T_ICH8:
 5196         case WM_T_ICH9:
 5197         case WM_T_ICH10:
 5198         case WM_T_PCH:
 5199                 wm_put_swfwhw_semaphore(sc);
 5200                 break;
 5201         default:
 5202                 /* nothing to do*/
 5203                 rv = 0;
 5204                 break;
 5205         }
 5206 
 5207         /* get_cfg_done */
 5208         wm_get_cfg_done(sc);
 5209 
 5210         /* extra setup */
 5211         switch (sc->sc_type) {
 5212         case WM_T_82542_2_0:
 5213         case WM_T_82542_2_1:
 5214         case WM_T_82543:
 5215         case WM_T_82544:
 5216         case WM_T_82540:
 5217         case WM_T_82545:
 5218         case WM_T_82545_3:
 5219         case WM_T_82546:
 5220         case WM_T_82546_3:
 5221         case WM_T_82541_2:
 5222         case WM_T_82547_2:
 5223         case WM_T_82571:
 5224         case WM_T_82572:
 5225         case WM_T_82573:
 5226         case WM_T_82574:
 5227         case WM_T_82575:
 5228         case WM_T_82576:
 5229         case WM_T_82580:
 5230         case WM_T_82580ER:
 5231         case WM_T_82583:
 5232         case WM_T_80003:
 5233                 /* null */
 5234                 break;
 5235         case WM_T_82541:
 5236         case WM_T_82547:
 5237                 /* XXX Configure actively LED after PHY reset */
 5238                 break;
 5239         case WM_T_ICH8:
 5240         case WM_T_ICH9:
 5241         case WM_T_ICH10:
 5242         case WM_T_PCH:
 5243                 /* Allow time for h/w to get to a quiescent state afer reset */
 5244                 delay(10*1000);
 5245 
 5246                 if (sc->sc_type == WM_T_PCH) {
 5247                         wm_hv_phy_workaround_ich8lan(sc);
 5248 
 5249                         /*
 5250                          * dummy read to clear the phy wakeup bit after lcd
 5251                          * reset
 5252                          */
 5253                         reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
 5254                 }
 5255 
 5256                 /*
 5257                  * XXX Configure the LCD with th extended configuration region
 5258                  * in NVM
 5259                  */
 5260 
 5261                 /* Configure the LCD with the OEM bits in NVM */
 5262                 if (sc->sc_type == WM_T_PCH) {
 5263                         /*
 5264                          * Disable LPLU.
 5265                          * XXX It seems that 82567 has LPLU, too.
 5266                          */
 5267                         reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
 5268                         reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
 5269                         reg |= HV_OEM_BITS_ANEGNOW;
 5270                         wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
 5271                 }
 5272                 break;
 5273         default:
 5274                 panic("%s: unknown type\n", __func__);
 5275                 break;
 5276         }
 5277 }
 5278 
 5279 /*
 5280  * wm_gmii_mediainit:
 5281  *
 5282  *      Initialize media for use on 1000BASE-T devices.
 5283  */
 5284 static void
 5285 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
 5286 {
 5287         struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 5288 
 5289         /* We have MII. */
 5290         sc->sc_flags |= WM_F_HAS_MII;
 5291 
 5292         if (sc->sc_type == WM_T_80003)
 5293                 sc->sc_tipg =  TIPG_1000T_80003_DFLT;
 5294         else
 5295                 sc->sc_tipg = TIPG_1000T_DFLT;
 5296 
 5297         /*
 5298          * Let the chip set speed/duplex on its own based on
 5299          * signals from the PHY.
 5300          * XXXbouyer - I'm not sure this is right for the 80003,
 5301          * the em driver only sets CTRL_SLU here - but it seems to work.
 5302          */
 5303         sc->sc_ctrl |= CTRL_SLU;
 5304         CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 5305 
 5306         /* Initialize our media structures and probe the GMII. */
 5307         sc->sc_mii.mii_ifp = ifp;
 5308 
 5309         switch (prodid) {
 5310         case PCI_PRODUCT_INTEL_PCH_M_LM:
 5311         case PCI_PRODUCT_INTEL_PCH_M_LC:
 5312                 /* 82577 */
 5313                 sc->sc_phytype = WMPHY_82577;
 5314                 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
 5315                 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
 5316                 break;
 5317         case PCI_PRODUCT_INTEL_PCH_D_DM:
 5318         case PCI_PRODUCT_INTEL_PCH_D_DC:
 5319                 /* 82578 */
 5320                 sc->sc_phytype = WMPHY_82578;
 5321                 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
 5322                 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
 5323                 break;
 5324         case PCI_PRODUCT_INTEL_82801I_BM:
 5325         case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
 5326         case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
 5327         case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
 5328         case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
 5329         case PCI_PRODUCT_INTEL_82801J_R_BM_V:
 5330                 /* 82567 */
 5331                 sc->sc_phytype = WMPHY_BM;
 5332                 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
 5333                 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
 5334                 break;
 5335         default:
 5336                 if ((sc->sc_flags & WM_F_SGMII) != 0) {
 5337                         sc->sc_mii.mii_readreg = wm_sgmii_readreg;
 5338                         sc->sc_mii.mii_writereg = wm_sgmii_writereg;
 5339                 } else if (sc->sc_type >= WM_T_80003) {
 5340                         sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
 5341                         sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
 5342                 } else if (sc->sc_type >= WM_T_82544) {
 5343                         sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
 5344                         sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
 5345                 } else {
 5346                         sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
 5347                         sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
 5348                 }
 5349                 break;
 5350         }
 5351         sc->sc_mii.mii_statchg = wm_gmii_statchg;
 5352 
 5353         wm_gmii_reset(sc);
 5354 
 5355         sc->sc_ethercom.ec_mii = &sc->sc_mii;
 5356         ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
 5357             wm_gmii_mediastatus);
 5358 
 5359         mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
 5360             MII_OFFSET_ANY, MIIF_DOPAUSE);
 5361 
 5362         if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
 5363                 /* if failed, retry with *_bm_* */
 5364                 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
 5365                 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
 5366 
 5367                 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
 5368                     MII_OFFSET_ANY, MIIF_DOPAUSE);
 5369         }
 5370         if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
 5371                 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
 5372                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
 5373                 sc->sc_phytype = WMPHY_NONE;
 5374         } else {
 5375                 /* Check PHY type */
 5376                 uint32_t model;
 5377                 struct mii_softc *child;
 5378 
 5379                 child = LIST_FIRST(&sc->sc_mii.mii_phys);
 5380                 if (device_is_a(child->mii_dev, "igphy")) {
 5381                         struct igphy_softc *isc = (struct igphy_softc *)child;
 5382 
 5383                         model = isc->sc_mii.mii_mpd_model;
 5384                         if (model == MII_MODEL_yyINTEL_I82566)
 5385                                 sc->sc_phytype = WMPHY_IGP_3;
 5386                 }
 5387 
 5388                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
 5389         }
 5390 }
 5391 
 5392 /*
 5393  * wm_gmii_mediastatus: [ifmedia interface function]
 5394  *
 5395  *      Get the current interface media status on a 1000BASE-T device.
 5396  */
 5397 static void
 5398 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 5399 {
 5400         struct wm_softc *sc = ifp->if_softc;
 5401 
 5402         ether_mediastatus(ifp, ifmr);
 5403         ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
 5404             | sc->sc_flowflags;
 5405 }
 5406 
 5407 /*
 5408  * wm_gmii_mediachange: [ifmedia interface function]
 5409  *
 5410  *      Set hardware to newly-selected media on a 1000BASE-T device.
 5411  */
 5412 static int
 5413 wm_gmii_mediachange(struct ifnet *ifp)
 5414 {
 5415         struct wm_softc *sc = ifp->if_softc;
 5416         struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
 5417         int rc;
 5418 
 5419         if ((ifp->if_flags & IFF_UP) == 0)
 5420                 return 0;
 5421 
 5422         sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
 5423         sc->sc_ctrl |= CTRL_SLU;
 5424         if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
 5425             || (sc->sc_type > WM_T_82543)) {
 5426                 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
 5427         } else {
 5428                 sc->sc_ctrl &= ~CTRL_ASDE;
 5429                 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
 5430                 if (ife->ifm_media & IFM_FDX)
 5431                         sc->sc_ctrl |= CTRL_FD;
 5432                 switch (IFM_SUBTYPE(ife->ifm_media)) {
 5433                 case IFM_10_T:
 5434                         sc->sc_ctrl |= CTRL_SPEED_10;
 5435                         break;
 5436                 case IFM_100_TX:
 5437                         sc->sc_ctrl |= CTRL_SPEED_100;
 5438                         break;
 5439                 case IFM_1000_T:
 5440                         sc->sc_ctrl |= CTRL_SPEED_1000;
 5441                         break;
 5442                 default:
 5443                         panic("wm_gmii_mediachange: bad media 0x%x",
 5444                             ife->ifm_media);
 5445                 }
 5446         }
 5447         CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 5448         if (sc->sc_type <= WM_T_82543)
 5449                 wm_gmii_reset(sc);
 5450 
 5451         if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
 5452                 return 0;
 5453         return rc;
 5454 }
 5455 
 5456 #define MDI_IO          CTRL_SWDPIN(2)
 5457 #define MDI_DIR         CTRL_SWDPIO(2)  /* host -> PHY */
 5458 #define MDI_CLK         CTRL_SWDPIN(3)
 5459 
 5460 static void
 5461 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
 5462 {
 5463         uint32_t i, v;
 5464 
 5465         v = CSR_READ(sc, WMREG_CTRL);
 5466         v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
 5467         v |= MDI_DIR | CTRL_SWDPIO(3);
 5468 
 5469         for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
 5470                 if (data & i)
 5471                         v |= MDI_IO;
 5472                 else
 5473                         v &= ~MDI_IO;
 5474                 CSR_WRITE(sc, WMREG_CTRL, v);
 5475                 delay(10);
 5476                 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
 5477                 delay(10);
 5478                 CSR_WRITE(sc, WMREG_CTRL, v);
 5479                 delay(10);
 5480         }
 5481 }
 5482 
 5483 static uint32_t
 5484 i82543_mii_recvbits(struct wm_softc *sc)
 5485 {
 5486         uint32_t v, i, data = 0;
 5487 
 5488         v = CSR_READ(sc, WMREG_CTRL);
 5489         v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
 5490         v |= CTRL_SWDPIO(3);
 5491 
 5492         CSR_WRITE(sc, WMREG_CTRL, v);
 5493         delay(10);
 5494         CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
 5495         delay(10);
 5496         CSR_WRITE(sc, WMREG_CTRL, v);
 5497         delay(10);
 5498 
 5499         for (i = 0; i < 16; i++) {
 5500                 data <<= 1;
 5501                 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
 5502                 delay(10);
 5503                 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
 5504                         data |= 1;
 5505                 CSR_WRITE(sc, WMREG_CTRL, v);
 5506                 delay(10);
 5507         }
 5508 
 5509         CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
 5510         delay(10);
 5511         CSR_WRITE(sc, WMREG_CTRL, v);
 5512         delay(10);
 5513 
 5514         return data;
 5515 }
 5516 
 5517 #undef MDI_IO
 5518 #undef MDI_DIR
 5519 #undef MDI_CLK
 5520 
 5521 /*
 5522  * wm_gmii_i82543_readreg:      [mii interface function]
 5523  *
 5524  *      Read a PHY register on the GMII (i82543 version).
 5525  */
 5526 static int
 5527 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
 5528 {
 5529         struct wm_softc *sc = device_private(self);
 5530         int rv;
 5531 
 5532         i82543_mii_sendbits(sc, 0xffffffffU, 32);
 5533         i82543_mii_sendbits(sc, reg | (phy << 5) |
 5534             (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
 5535         rv = i82543_mii_recvbits(sc) & 0xffff;
 5536 
 5537         DPRINTF(WM_DEBUG_GMII,
 5538             ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
 5539             device_xname(sc->sc_dev), phy, reg, rv));
 5540 
 5541         return rv;
 5542 }
 5543 
 5544 /*
 5545  * wm_gmii_i82543_writereg:     [mii interface function]
 5546  *
 5547  *      Write a PHY register on the GMII (i82543 version).
 5548  */
 5549 static void
 5550 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
 5551 {
 5552         struct wm_softc *sc = device_private(self);
 5553 
 5554         i82543_mii_sendbits(sc, 0xffffffffU, 32);
 5555         i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
 5556             (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
 5557             (MII_COMMAND_START << 30), 32);
 5558 }
 5559 
 5560 /*
 5561  * wm_gmii_i82544_readreg:      [mii interface function]
 5562  *
 5563  *      Read a PHY register on the GMII.
 5564  */
 5565 static int
 5566 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
 5567 {
 5568         struct wm_softc *sc = device_private(self);
 5569         uint32_t mdic = 0;
 5570         int i, rv;
 5571 
 5572         CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
 5573             MDIC_REGADD(reg));
 5574 
 5575         for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
 5576                 mdic = CSR_READ(sc, WMREG_MDIC);
 5577                 if (mdic & MDIC_READY)
 5578                         break;
 5579                 delay(50);
 5580         }
 5581 
 5582         if ((mdic & MDIC_READY) == 0) {
 5583                 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
 5584                     device_xname(sc->sc_dev), phy, reg);
 5585                 rv = 0;
 5586         } else if (mdic & MDIC_E) {
 5587 #if 0 /* This is normal if no PHY is present. */
 5588                 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
 5589                     device_xname(sc->sc_dev), phy, reg);
 5590 #endif
 5591                 rv = 0;
 5592         } else {
 5593                 rv = MDIC_DATA(mdic);
 5594                 if (rv == 0xffff)
 5595                         rv = 0;
 5596         }
 5597 
 5598         return rv;
 5599 }
 5600 
 5601 /*
 5602  * wm_gmii_i82544_writereg:     [mii interface function]
 5603  *
 5604  *      Write a PHY register on the GMII.
 5605  */
 5606 static void
 5607 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
 5608 {
 5609         struct wm_softc *sc = device_private(self);
 5610         uint32_t mdic = 0;
 5611         int i;
 5612 
 5613         CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
 5614             MDIC_REGADD(reg) | MDIC_DATA(val));
 5615 
 5616         for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
 5617                 mdic = CSR_READ(sc, WMREG_MDIC);
 5618                 if (mdic & MDIC_READY)
 5619                         break;
 5620                 delay(50);
 5621         }
 5622 
 5623         if ((mdic & MDIC_READY) == 0)
 5624                 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
 5625                     device_xname(sc->sc_dev), phy, reg);
 5626         else if (mdic & MDIC_E)
 5627                 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
 5628                     device_xname(sc->sc_dev), phy, reg);
 5629 }
 5630 
 5631 /*
 5632  * wm_gmii_i80003_readreg:      [mii interface function]
 5633  *
 5634  *      Read a PHY register on the kumeran
 5635  * This could be handled by the PHY layer if we didn't have to lock the
 5636  * ressource ...
 5637  */
 5638 static int
 5639 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
 5640 {
 5641         struct wm_softc *sc = device_private(self);
 5642         int sem;
 5643         int rv;
 5644 
 5645         if (phy != 1) /* only one PHY on kumeran bus */
 5646                 return 0;
 5647 
 5648         sem = swfwphysem[sc->sc_funcid];
 5649         if (wm_get_swfw_semaphore(sc, sem)) {
 5650                 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
 5651                     __func__);
 5652                 return 0;
 5653         }
 5654 
 5655         if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
 5656                 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
 5657                     reg >> GG82563_PAGE_SHIFT);
 5658         } else {
 5659                 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
 5660                     reg >> GG82563_PAGE_SHIFT);
 5661         }
 5662         /* Wait more 200us for a bug of the ready bit in the MDIC register */
 5663         delay(200);
 5664         rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
 5665         delay(200);
 5666 
 5667         wm_put_swfw_semaphore(sc, sem);
 5668         return rv;
 5669 }
 5670 
 5671 /*
 5672  * wm_gmii_i80003_writereg:     [mii interface function]
 5673  *
 5674  *      Write a PHY register on the kumeran.
 5675  * This could be handled by the PHY layer if we didn't have to lock the
 5676  * ressource ...
 5677  */
 5678 static void
 5679 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
 5680 {
 5681         struct wm_softc *sc = device_private(self);
 5682         int sem;
 5683 
 5684         if (phy != 1) /* only one PHY on kumeran bus */
 5685                 return;
 5686 
 5687         sem = swfwphysem[sc->sc_funcid];
 5688         if (wm_get_swfw_semaphore(sc, sem)) {
 5689                 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
 5690                     __func__);
 5691                 return;
 5692         }
 5693 
 5694         if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
 5695                 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
 5696                     reg >> GG82563_PAGE_SHIFT);
 5697         } else {
 5698                 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
 5699                     reg >> GG82563_PAGE_SHIFT);
 5700         }
 5701         /* Wait more 200us for a bug of the ready bit in the MDIC register */
 5702         delay(200);
 5703         wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
 5704         delay(200);
 5705 
 5706         wm_put_swfw_semaphore(sc, sem);
 5707 }
 5708 
 5709 /*
 5710  * wm_gmii_bm_readreg:  [mii interface function]
 5711  *
 5712  *      Read a PHY register on the kumeran
 5713  * This could be handled by the PHY layer if we didn't have to lock the
 5714  * ressource ...
 5715  */
 5716 static int
 5717 wm_gmii_bm_readreg(device_t self, int phy, int reg)
 5718 {
 5719         struct wm_softc *sc = device_private(self);
 5720         int sem;
 5721         int rv;
 5722 
 5723         sem = swfwphysem[sc->sc_funcid];
 5724         if (wm_get_swfw_semaphore(sc, sem)) {
 5725                 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
 5726                     __func__);
 5727                 return 0;
 5728         }
 5729 
 5730         if (reg > BME1000_MAX_MULTI_PAGE_REG) {
 5731                 if (phy == 1)
 5732                         wm_gmii_i82544_writereg(self, phy, 0x1f,
 5733                             reg);
 5734                 else
 5735                         wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
 5736                             reg >> GG82563_PAGE_SHIFT);
 5737 
 5738         }
 5739 
 5740         rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
 5741         wm_put_swfw_semaphore(sc, sem);
 5742         return rv;
 5743 }
 5744 
 5745 /*
 5746  * wm_gmii_bm_writereg: [mii interface function]
 5747  *
 5748  *      Write a PHY register on the kumeran.
 5749  * This could be handled by the PHY layer if we didn't have to lock the
 5750  * ressource ...
 5751  */
 5752 static void
 5753 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
 5754 {
 5755         struct wm_softc *sc = device_private(self);
 5756         int sem;
 5757 
 5758         sem = swfwphysem[sc->sc_funcid];
 5759         if (wm_get_swfw_semaphore(sc, sem)) {
 5760                 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
 5761                     __func__);
 5762                 return;
 5763         }
 5764 
 5765         if (reg > BME1000_MAX_MULTI_PAGE_REG) {
 5766                 if (phy == 1)
 5767                         wm_gmii_i82544_writereg(self, phy, 0x1f,
 5768                             reg);
 5769                 else
 5770                         wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
 5771                             reg >> GG82563_PAGE_SHIFT);
 5772 
 5773         }
 5774 
 5775         wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
 5776         wm_put_swfw_semaphore(sc, sem);
 5777 }
 5778 
 5779 static void
 5780 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
 5781 {
 5782         struct wm_softc *sc = device_private(self);
 5783         uint16_t regnum = BM_PHY_REG_NUM(offset);
 5784         uint16_t wuce;
 5785 
 5786         /* XXX Gig must be disabled for MDIO accesses to page 800 */
 5787         if (sc->sc_type == WM_T_PCH) {
 5788                 /* XXX e1000 driver do nothing... why? */
 5789         }
 5790 
 5791         /* Set page 769 */
 5792         wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
 5793             BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
 5794 
 5795         wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
 5796 
 5797         wuce &= ~BM_WUC_HOST_WU_BIT;
 5798         wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
 5799             wuce | BM_WUC_ENABLE_BIT);
 5800 
 5801         /* Select page 800 */
 5802         wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
 5803             BM_WUC_PAGE << BME1000_PAGE_SHIFT);
 5804 
 5805         /* Write page 800 */
 5806         wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
 5807 
 5808         if (rd)
 5809                 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
 5810         else
 5811                 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
 5812 
 5813         /* Set page 769 */
 5814         wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
 5815             BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
 5816 
 5817         wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
 5818 }
 5819 
 5820 /*
 5821  * wm_gmii_hv_readreg:  [mii interface function]
 5822  *
 5823  *      Read a PHY register on the kumeran
 5824  * This could be handled by the PHY layer if we didn't have to lock the
 5825  * ressource ...
 5826  */
 5827 static int
 5828 wm_gmii_hv_readreg(device_t self, int phy, int reg)
 5829 {
 5830         struct wm_softc *sc = device_private(self);
 5831         uint16_t page = BM_PHY_REG_PAGE(reg);
 5832         uint16_t regnum = BM_PHY_REG_NUM(reg);
 5833         uint16_t val;
 5834         int rv;
 5835 
 5836         if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
 5837                 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
 5838                     __func__);
 5839                 return 0;
 5840         }
 5841 
 5842         /* XXX Workaround failure in MDIO access while cable is disconnected */
 5843         if (sc->sc_phytype == WMPHY_82577) {
 5844                 /* XXX must write */
 5845         }
 5846 
 5847         /* Page 800 works differently than the rest so it has its own func */
 5848         if (page == BM_WUC_PAGE) {
 5849                 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
 5850                 return val;
 5851         }
 5852 
 5853         /*
 5854          * Lower than page 768 works differently than the rest so it has its
 5855          * own func
 5856          */
 5857         if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
 5858                 printf("gmii_hv_readreg!!!\n");
 5859                 return 0;
 5860         }
 5861 
 5862         if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
 5863                 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
 5864                     page << BME1000_PAGE_SHIFT);
 5865         }
 5866 
 5867         rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
 5868         wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
 5869         return rv;
 5870 }
 5871 
 5872 /*
 5873  * wm_gmii_hv_writereg: [mii interface function]
 5874  *
 5875  *      Write a PHY register on the kumeran.
 5876  * This could be handled by the PHY layer if we didn't have to lock the
 5877  * ressource ...
 5878  */
 5879 static void
 5880 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
 5881 {
 5882         struct wm_softc *sc = device_private(self);
 5883         uint16_t page = BM_PHY_REG_PAGE(reg);
 5884         uint16_t regnum = BM_PHY_REG_NUM(reg);
 5885 
 5886         if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
 5887                 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
 5888                     __func__);
 5889                 return;
 5890         }
 5891 
 5892         /* XXX Workaround failure in MDIO access while cable is disconnected */
 5893 
 5894         /* Page 800 works differently than the rest so it has its own func */
 5895         if (page == BM_WUC_PAGE) {
 5896                 uint16_t tmp;
 5897 
 5898                 tmp = val;
 5899                 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
 5900                 return;
 5901         }
 5902 
 5903         /*
 5904          * Lower than page 768 works differently than the rest so it has its
 5905          * own func
 5906          */
 5907         if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
 5908                 printf("gmii_hv_writereg!!!\n");
 5909                 return;
 5910         }
 5911 
 5912         /*
 5913          * XXX Workaround MDIO accesses being disabled after entering IEEE
 5914          * Power Down (whenever bit 11 of the PHY control register is set)
 5915          */
 5916 
 5917         if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
 5918                 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
 5919                     page << BME1000_PAGE_SHIFT);
 5920         }
 5921 
 5922         wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
 5923         wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
 5924 }
 5925 
 5926 /*
 5927  * wm_gmii_hv_readreg:  [mii interface function]
 5928  *
 5929  *      Read a PHY register on the kumeran
 5930  * This could be handled by the PHY layer if we didn't have to lock the
 5931  * ressource ...
 5932  */
 5933 static int
 5934 wm_sgmii_readreg(device_t self, int phy, int reg)
 5935 {
 5936         struct wm_softc *sc = device_private(self);
 5937         uint32_t i2ccmd;
 5938         int i, rv;
 5939 
 5940         if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
 5941                 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
 5942                     __func__);
 5943                 return 0;
 5944         }
 5945 
 5946         i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
 5947             | (phy << I2CCMD_PHY_ADDR_SHIFT)
 5948             | I2CCMD_OPCODE_READ;
 5949         CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
 5950 
 5951         /* Poll the ready bit */
 5952         for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
 5953                 delay(50);
 5954                 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
 5955                 if (i2ccmd & I2CCMD_READY)
 5956                         break;
 5957         }
 5958         if ((i2ccmd & I2CCMD_READY) == 0)
 5959                 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
 5960         if ((i2ccmd & I2CCMD_ERROR) != 0)
 5961                 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
 5962 
 5963         rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
 5964 
 5965         wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
 5966         return rv;
 5967 }
 5968 
 5969 /*
 5970  * wm_gmii_hv_writereg: [mii interface function]
 5971  *
 5972  *      Write a PHY register on the kumeran.
 5973  * This could be handled by the PHY layer if we didn't have to lock the
 5974  * ressource ...
 5975  */
 5976 static void
 5977 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
 5978 {
 5979         struct wm_softc *sc = device_private(self);
 5980         uint32_t i2ccmd;
 5981         int i;
 5982 
 5983         if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
 5984                 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
 5985                     __func__);
 5986                 return;
 5987         }
 5988 
 5989         i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
 5990             | (phy << I2CCMD_PHY_ADDR_SHIFT)
 5991             | I2CCMD_OPCODE_WRITE;
 5992         CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
 5993 
 5994         /* Poll the ready bit */
 5995         for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
 5996                 delay(50);
 5997                 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
 5998                 if (i2ccmd & I2CCMD_READY)
 5999                         break;
 6000         }
 6001         if ((i2ccmd & I2CCMD_READY) == 0)
 6002                 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
 6003         if ((i2ccmd & I2CCMD_ERROR) != 0)
 6004                 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
 6005 
 6006         wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
 6007 }
 6008 
 6009 /*
 6010  * wm_gmii_statchg:     [mii interface function]
 6011  *
 6012  *      Callback from MII layer when media changes.
 6013  */
 6014 static void
 6015 wm_gmii_statchg(device_t self)
 6016 {
 6017         struct wm_softc *sc = device_private(self);
 6018         struct mii_data *mii = &sc->sc_mii;
 6019 
 6020         sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
 6021         sc->sc_tctl &= ~TCTL_COLD(0x3ff);
 6022         sc->sc_fcrtl &= ~FCRTL_XONE;
 6023 
 6024         /*
 6025          * Get flow control negotiation result.
 6026          */
 6027         if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
 6028             (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
 6029                 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
 6030                 mii->mii_media_active &= ~IFM_ETH_FMASK;
 6031         }
 6032 
 6033         if (sc->sc_flowflags & IFM_FLOW) {
 6034                 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
 6035                         sc->sc_ctrl |= CTRL_TFCE;
 6036                         sc->sc_fcrtl |= FCRTL_XONE;
 6037                 }
 6038                 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
 6039                         sc->sc_ctrl |= CTRL_RFCE;
 6040         }
 6041 
 6042         if (sc->sc_mii.mii_media_active & IFM_FDX) {
 6043                 DPRINTF(WM_DEBUG_LINK,
 6044                     ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
 6045                 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
 6046         } else {
 6047                 DPRINTF(WM_DEBUG_LINK,
 6048                     ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
 6049                 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
 6050         }
 6051 
 6052         CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
 6053         CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
 6054         CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
 6055                                                  : WMREG_FCRTL, sc->sc_fcrtl);
 6056         if (sc->sc_type == WM_T_80003) {
 6057                 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
 6058                 case IFM_1000_T:
 6059                         wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
 6060                             KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
 6061                         sc->sc_tipg =  TIPG_1000T_80003_DFLT;
 6062                         break;
 6063                 default:
 6064                         wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
 6065                             KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
 6066                         sc->sc_tipg =  TIPG_10_100_80003_DFLT;
 6067                         break;
 6068                 }
 6069                 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
 6070         }
 6071 }
 6072 
 6073 /*
 6074  * wm_kmrn_readreg:
 6075  *
 6076  *      Read a kumeran register
 6077  */
 6078 static int
 6079 wm_kmrn_readreg(struct wm_softc *