The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/vr/if_vr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-4-Clause
    3  *
    4  * Copyright (c) 1997, 1998
    5  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. All advertising materials mentioning features or use of this software
   16  *    must display the following acknowledgement:
   17  *      This product includes software developed by Bill Paul.
   18  * 4. Neither the name of the author nor the names of any co-contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   32  * THE POSSIBILITY OF SUCH DAMAGE.
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD$");
   37 
   38 /*
   39  * VIA Rhine fast ethernet PCI NIC driver
   40  *
   41  * Supports various network adapters based on the VIA Rhine
   42  * and Rhine II PCI controllers, including the D-Link DFE530TX.
   43  * Datasheets are available at http://www.via.com.tw.
   44  *
   45  * Written by Bill Paul <wpaul@ctr.columbia.edu>
   46  * Electrical Engineering Department
   47  * Columbia University, New York City
   48  */
   49 
   50 /*
   51  * The VIA Rhine controllers are similar in some respects to the
   52  * the DEC tulip chips, except less complicated. The controller
   53  * uses an MII bus and an external physical layer interface. The
   54  * receiver has a one entry perfect filter and a 64-bit hash table
   55  * multicast filter. Transmit and receive descriptors are similar
   56  * to the tulip.
   57  *
   58  * Some Rhine chips has a serious flaw in its transmit DMA mechanism:
   59  * transmit buffers must be longword aligned. Unfortunately,
   60  * FreeBSD doesn't guarantee that mbufs will be filled in starting
   61  * at longword boundaries, so we have to do a buffer copy before
   62  * transmission.
   63  */
   64 
   65 #ifdef HAVE_KERNEL_OPTION_HEADERS
   66 #include "opt_device_polling.h"
   67 #endif
   68 
   69 #include <sys/param.h>
   70 #include <sys/systm.h>
   71 #include <sys/bus.h>
   72 #include <sys/endian.h>
   73 #include <sys/kernel.h>
   74 #include <sys/malloc.h>
   75 #include <sys/mbuf.h>
   76 #include <sys/module.h>
   77 #include <sys/rman.h>
   78 #include <sys/socket.h>
   79 #include <sys/sockio.h>
   80 #include <sys/sysctl.h>
   81 #include <sys/taskqueue.h>
   82 
   83 #include <net/bpf.h>
   84 #include <net/if.h>
   85 #include <net/if_var.h>
   86 #include <net/ethernet.h>
   87 #include <net/if_dl.h>
   88 #include <net/if_media.h>
   89 #include <net/if_types.h>
   90 #include <net/if_vlan_var.h>
   91 
   92 #include <dev/mii/mii.h>
   93 #include <dev/mii/miivar.h>
   94 
   95 #include <dev/pci/pcireg.h>
   96 #include <dev/pci/pcivar.h>
   97 
   98 #include <machine/bus.h>
   99 
  100 #include <dev/vr/if_vrreg.h>
  101 
  102 /* "device miibus" required.  See GENERIC if you get errors here. */
  103 #include "miibus_if.h"
  104 
  105 MODULE_DEPEND(vr, pci, 1, 1, 1);
  106 MODULE_DEPEND(vr, ether, 1, 1, 1);
  107 MODULE_DEPEND(vr, miibus, 1, 1, 1);
  108 
  109 /* Define to show Rx/Tx error status. */
  110 #undef  VR_SHOW_ERRORS
  111 #define VR_CSUM_FEATURES        (CSUM_IP | CSUM_TCP | CSUM_UDP)
  112 
  113 /*
  114  * Various supported device vendors/types, their names & quirks.
  115  */
  116 #define VR_Q_NEEDALIGN          (1<<0)
  117 #define VR_Q_CSUM               (1<<1)
  118 #define VR_Q_CAM                (1<<2)
  119 
  120 static const struct vr_type {
  121         u_int16_t               vr_vid;
  122         u_int16_t               vr_did;
  123         int                     vr_quirks;
  124         const char              *vr_name;
  125 } vr_devs[] = {
  126         { VIA_VENDORID, VIA_DEVICEID_RHINE,
  127             VR_Q_NEEDALIGN,
  128             "VIA VT3043 Rhine I 10/100BaseTX" },
  129         { VIA_VENDORID, VIA_DEVICEID_RHINE_II,
  130             VR_Q_NEEDALIGN,
  131             "VIA VT86C100A Rhine II 10/100BaseTX" },
  132         { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
  133             0,
  134             "VIA VT6102 Rhine II 10/100BaseTX" },
  135         { VIA_VENDORID, VIA_DEVICEID_RHINE_III,
  136             0,
  137             "VIA VT6105 Rhine III 10/100BaseTX" },
  138         { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
  139             VR_Q_CSUM,
  140             "VIA VT6105M Rhine III 10/100BaseTX" },
  141         { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
  142             VR_Q_NEEDALIGN,
  143             "Delta Electronics Rhine II 10/100BaseTX" },
  144         { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
  145             VR_Q_NEEDALIGN,
  146             "Addtron Technology Rhine II 10/100BaseTX" },
  147         { 0, 0, 0, NULL }
  148 };
  149 
  150 static int vr_probe(device_t);
  151 static int vr_attach(device_t);
  152 static int vr_detach(device_t);
  153 static int vr_shutdown(device_t);
  154 static int vr_suspend(device_t);
  155 static int vr_resume(device_t);
  156 
  157 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  158 static int vr_dma_alloc(struct vr_softc *);
  159 static void vr_dma_free(struct vr_softc *);
  160 static __inline void vr_discard_rxbuf(struct vr_rxdesc *);
  161 static int vr_newbuf(struct vr_softc *, int);
  162 
  163 #ifndef __NO_STRICT_ALIGNMENT
  164 static __inline void vr_fixup_rx(struct mbuf *);
  165 #endif
  166 static int vr_rxeof(struct vr_softc *);
  167 static void vr_txeof(struct vr_softc *);
  168 static void vr_tick(void *);
  169 static int vr_error(struct vr_softc *, uint16_t);
  170 static void vr_tx_underrun(struct vr_softc *);
  171 static int vr_intr(void *);
  172 static void vr_int_task(void *, int);
  173 static void vr_start(struct ifnet *);
  174 static void vr_start_locked(struct ifnet *);
  175 static int vr_encap(struct vr_softc *, struct mbuf **);
  176 static int vr_ioctl(struct ifnet *, u_long, caddr_t);
  177 static void vr_init(void *);
  178 static void vr_init_locked(struct vr_softc *);
  179 static void vr_tx_start(struct vr_softc *);
  180 static void vr_rx_start(struct vr_softc *);
  181 static int vr_tx_stop(struct vr_softc *);
  182 static int vr_rx_stop(struct vr_softc *);
  183 static void vr_stop(struct vr_softc *);
  184 static void vr_watchdog(struct vr_softc *);
  185 static int vr_ifmedia_upd(struct ifnet *);
  186 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  187 
  188 static int vr_miibus_readreg(device_t, int, int);
  189 static int vr_miibus_writereg(device_t, int, int, int);
  190 static void vr_miibus_statchg(device_t);
  191 
  192 static void vr_cam_mask(struct vr_softc *, uint32_t, int);
  193 static int vr_cam_data(struct vr_softc *, int, int, uint8_t *);
  194 static void vr_set_filter(struct vr_softc *);
  195 static void vr_reset(const struct vr_softc *);
  196 static int vr_tx_ring_init(struct vr_softc *);
  197 static int vr_rx_ring_init(struct vr_softc *);
  198 static void vr_setwol(struct vr_softc *);
  199 static void vr_clrwol(struct vr_softc *);
  200 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS);
  201 
  202 static const struct vr_tx_threshold_table {
  203         int tx_cfg;
  204         int bcr_cfg;
  205         int value;
  206 } vr_tx_threshold_tables[] = {
  207         { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 },
  208         { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 },
  209         { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 },
  210         { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 },
  211         { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 },
  212         { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 }
  213 };
  214 
  215 static device_method_t vr_methods[] = {
  216         /* Device interface */
  217         DEVMETHOD(device_probe,         vr_probe),
  218         DEVMETHOD(device_attach,        vr_attach),
  219         DEVMETHOD(device_detach,        vr_detach),
  220         DEVMETHOD(device_shutdown,      vr_shutdown),
  221         DEVMETHOD(device_suspend,       vr_suspend),
  222         DEVMETHOD(device_resume,        vr_resume),
  223 
  224         /* MII interface */
  225         DEVMETHOD(miibus_readreg,       vr_miibus_readreg),
  226         DEVMETHOD(miibus_writereg,      vr_miibus_writereg),
  227         DEVMETHOD(miibus_statchg,       vr_miibus_statchg),
  228 
  229         DEVMETHOD_END
  230 };
  231 
  232 static driver_t vr_driver = {
  233         "vr",
  234         vr_methods,
  235         sizeof(struct vr_softc)
  236 };
  237 
  238 DRIVER_MODULE(vr, pci, vr_driver, 0, 0);
  239 DRIVER_MODULE(miibus, vr, miibus_driver, 0, 0);
  240 
  241 static int
  242 vr_miibus_readreg(device_t dev, int phy, int reg)
  243 {
  244         struct vr_softc         *sc;
  245         int                     i;
  246 
  247         sc = device_get_softc(dev);
  248 
  249         /* Set the register address. */
  250         CSR_WRITE_1(sc, VR_MIIADDR, reg);
  251         VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
  252 
  253         for (i = 0; i < VR_MII_TIMEOUT; i++) {
  254                 DELAY(1);
  255                 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
  256                         break;
  257         }
  258         if (i == VR_MII_TIMEOUT)
  259                 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg);
  260 
  261         return (CSR_READ_2(sc, VR_MIIDATA));
  262 }
  263 
  264 static int
  265 vr_miibus_writereg(device_t dev, int phy, int reg, int data)
  266 {
  267         struct vr_softc         *sc;
  268         int                     i;
  269 
  270         sc = device_get_softc(dev);
  271 
  272         /* Set the register address and data to write. */
  273         CSR_WRITE_1(sc, VR_MIIADDR, reg);
  274         CSR_WRITE_2(sc, VR_MIIDATA, data);
  275         VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
  276 
  277         for (i = 0; i < VR_MII_TIMEOUT; i++) {
  278                 DELAY(1);
  279                 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
  280                         break;
  281         }
  282         if (i == VR_MII_TIMEOUT)
  283                 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy,
  284                     reg);
  285 
  286         return (0);
  287 }
  288 
  289 /*
  290  * In order to fiddle with the
  291  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
  292  * first have to put the transmit and/or receive logic in the idle state.
  293  */
  294 static void
  295 vr_miibus_statchg(device_t dev)
  296 {
  297         struct vr_softc         *sc;
  298         struct mii_data         *mii;
  299         struct ifnet            *ifp;
  300         int                     lfdx, mfdx;
  301         uint8_t                 cr0, cr1, fc;
  302 
  303         sc = device_get_softc(dev);
  304         mii = device_get_softc(sc->vr_miibus);
  305         ifp = sc->vr_ifp;
  306         if (mii == NULL || ifp == NULL ||
  307             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  308                 return;
  309 
  310         sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
  311         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
  312             (IFM_ACTIVE | IFM_AVALID)) {
  313                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  314                 case IFM_10_T:
  315                 case IFM_100_TX:
  316                         sc->vr_flags |= VR_F_LINK;
  317                         break;
  318                 default:
  319                         break;
  320                 }
  321         }
  322 
  323         if ((sc->vr_flags & VR_F_LINK) != 0) {
  324                 cr0 = CSR_READ_1(sc, VR_CR0);
  325                 cr1 = CSR_READ_1(sc, VR_CR1);
  326                 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0;
  327                 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0;
  328                 if (mfdx != lfdx) {
  329                         if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) {
  330                                 if (vr_tx_stop(sc) != 0 ||
  331                                     vr_rx_stop(sc) != 0) {
  332                                         device_printf(sc->vr_dev,
  333                                             "%s: Tx/Rx shutdown error -- "
  334                                             "resetting\n", __func__);
  335                                         sc->vr_flags |= VR_F_RESTART;
  336                                         VR_UNLOCK(sc);
  337                                         return;
  338                                 }
  339                         }
  340                         if (lfdx)
  341                                 cr1 |= VR_CR1_FULLDUPLEX;
  342                         else
  343                                 cr1 &= ~VR_CR1_FULLDUPLEX;
  344                         CSR_WRITE_1(sc, VR_CR1, cr1);
  345                 }
  346                 fc = 0;
  347                 /* Configure flow-control. */
  348                 if (sc->vr_revid >= REV_ID_VT6105_A0) {
  349                         fc = CSR_READ_1(sc, VR_FLOWCR1);
  350                         fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE);
  351                         if ((IFM_OPTIONS(mii->mii_media_active) &
  352                             IFM_ETH_RXPAUSE) != 0)
  353                                 fc |= VR_FLOWCR1_RXPAUSE;
  354                         if ((IFM_OPTIONS(mii->mii_media_active) &
  355                             IFM_ETH_TXPAUSE) != 0) {
  356                                 fc |= VR_FLOWCR1_TXPAUSE;
  357                                 sc->vr_flags |= VR_F_TXPAUSE;
  358                         }
  359                         CSR_WRITE_1(sc, VR_FLOWCR1, fc);
  360                 } else if (sc->vr_revid >= REV_ID_VT6102_A) {
  361                         /* No Tx puase capability available for Rhine II. */
  362                         fc = CSR_READ_1(sc, VR_MISC_CR0);
  363                         fc &= ~VR_MISCCR0_RXPAUSE;
  364                         if ((IFM_OPTIONS(mii->mii_media_active) &
  365                             IFM_ETH_RXPAUSE) != 0)
  366                                 fc |= VR_MISCCR0_RXPAUSE;
  367                         CSR_WRITE_1(sc, VR_MISC_CR0, fc);
  368                 }
  369                 vr_rx_start(sc);
  370                 vr_tx_start(sc);
  371         } else {
  372                 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) {
  373                         device_printf(sc->vr_dev,
  374                             "%s: Tx/Rx shutdown error -- resetting\n",
  375                             __func__);
  376                         sc->vr_flags |= VR_F_RESTART;
  377                 }
  378         }
  379 }
  380 
  381 static void
  382 vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type)
  383 {
  384 
  385         if (type == VR_MCAST_CAM)
  386                 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
  387         else
  388                 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
  389         CSR_WRITE_4(sc, VR_CAMMASK, mask);
  390         CSR_WRITE_1(sc, VR_CAMCTL, 0);
  391 }
  392 
  393 static int
  394 vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac)
  395 {
  396         int     i;
  397 
  398         if (type == VR_MCAST_CAM) {
  399                 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL)
  400                         return (EINVAL);
  401                 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
  402         } else
  403                 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
  404 
  405         /* Set CAM entry address. */
  406         CSR_WRITE_1(sc, VR_CAMADDR, idx);
  407         /* Set CAM entry data. */
  408         if (type == VR_MCAST_CAM) {
  409                 for (i = 0; i < ETHER_ADDR_LEN; i++)
  410                         CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]);
  411         } else {
  412                 CSR_WRITE_1(sc, VR_VCAM0, mac[0]);
  413                 CSR_WRITE_1(sc, VR_VCAM1, mac[1]);
  414         }
  415         DELAY(10);
  416         /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */
  417         CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE);
  418         for (i = 0; i < VR_TIMEOUT; i++) {
  419                 DELAY(1);
  420                 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0)
  421                         break;
  422         }
  423 
  424         if (i == VR_TIMEOUT)
  425                 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n",
  426                     __func__);
  427         CSR_WRITE_1(sc, VR_CAMCTL, 0);
  428 
  429         return (i == VR_TIMEOUT ? ETIMEDOUT : 0);
  430 }
  431 
  432 struct vr_hash_maddr_cam_ctx {
  433         struct vr_softc *sc;
  434         uint32_t mask;
  435         int error;
  436 };
  437 
  438 static u_int
  439 vr_hash_maddr_cam(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
  440 {
  441         struct vr_hash_maddr_cam_ctx *ctx = arg;
  442 
  443         if (ctx->error != 0)
  444                 return (0);
  445         ctx->error = vr_cam_data(ctx->sc, VR_MCAST_CAM, mcnt, LLADDR(sdl));
  446         if (ctx->error != 0) {
  447                 ctx->mask = 0;
  448                 return (0);
  449         }
  450         ctx->mask |= 1 << mcnt;
  451 
  452         return (1);
  453 }
  454 
  455 static u_int
  456 vr_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
  457 {
  458         uint32_t *hashes = arg;
  459         int h;
  460 
  461         h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
  462         if (h < 32)
  463                 hashes[0] |= (1 << h);
  464         else
  465                 hashes[1] |= (1 << (h - 32));
  466 
  467         return (1);
  468 }
  469 
  470 /*
  471  * Program the 64-bit multicast hash filter.
  472  */
  473 static void
  474 vr_set_filter(struct vr_softc *sc)
  475 {
  476         struct ifnet            *ifp;
  477         uint32_t                hashes[2] = { 0, 0 };
  478         uint8_t                 rxfilt;
  479         int                     error, mcnt;
  480 
  481         VR_LOCK_ASSERT(sc);
  482 
  483         ifp = sc->vr_ifp;
  484         rxfilt = CSR_READ_1(sc, VR_RXCFG);
  485         rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD |
  486             VR_RXCFG_RX_MULTI);
  487         if (ifp->if_flags & IFF_BROADCAST)
  488                 rxfilt |= VR_RXCFG_RX_BROAD;
  489         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
  490                 rxfilt |= VR_RXCFG_RX_MULTI;
  491                 if (ifp->if_flags & IFF_PROMISC)
  492                         rxfilt |= VR_RXCFG_RX_PROMISC;
  493                 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
  494                 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
  495                 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
  496                 return;
  497         }
  498 
  499         /* Now program new ones. */
  500         error = 0;
  501         if ((sc->vr_quirks & VR_Q_CAM) != 0) {
  502                 struct vr_hash_maddr_cam_ctx ctx;
  503 
  504                 /*
  505                  * For hardwares that have CAM capability, use
  506                  * 32 entries multicast perfect filter.
  507                  */
  508                 ctx.sc = sc;
  509                 ctx.mask = 0;
  510                 ctx.error = 0;
  511                 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr_cam, &ctx);
  512                 vr_cam_mask(sc, VR_MCAST_CAM, ctx.mask);
  513         }
  514 
  515         if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) {
  516                 /*
  517                  * If there are too many multicast addresses or
  518                  * setting multicast CAM filter failed, use hash
  519                  * table based filtering.
  520                  */
  521                 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr, hashes);
  522         }
  523 
  524         if (mcnt > 0)
  525                 rxfilt |= VR_RXCFG_RX_MULTI;
  526 
  527         CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
  528         CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
  529         CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
  530 }
  531 
  532 static void
  533 vr_reset(const struct vr_softc *sc)
  534 {
  535         int             i;
  536 
  537         /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
  538 
  539         CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET);
  540         if (sc->vr_revid < REV_ID_VT6102_A) {
  541                 /* VT86C100A needs more delay after reset. */
  542                 DELAY(100);
  543         }
  544         for (i = 0; i < VR_TIMEOUT; i++) {
  545                 DELAY(10);
  546                 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET))
  547                         break;
  548         }
  549         if (i == VR_TIMEOUT) {
  550                 if (sc->vr_revid < REV_ID_VT6102_A)
  551                         device_printf(sc->vr_dev, "reset never completed!\n");
  552                 else {
  553                         /* Use newer force reset command. */
  554                         device_printf(sc->vr_dev,
  555                             "Using force reset command.\n");
  556                         VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
  557                         /*
  558                          * Wait a little while for the chip to get its brains
  559                          * in order.
  560                          */
  561                         DELAY(2000);
  562                 }
  563         }
  564 
  565 }
  566 
  567 /*
  568  * Probe for a VIA Rhine chip. Check the PCI vendor and device
  569  * IDs against our list and return a match or NULL
  570  */
  571 static const struct vr_type *
  572 vr_match(device_t dev)
  573 {
  574         const struct vr_type    *t = vr_devs;
  575 
  576         for (t = vr_devs; t->vr_name != NULL; t++)
  577                 if ((pci_get_vendor(dev) == t->vr_vid) &&
  578                     (pci_get_device(dev) == t->vr_did))
  579                         return (t);
  580         return (NULL);
  581 }
  582 
  583 /*
  584  * Probe for a VIA Rhine chip. Check the PCI vendor and device
  585  * IDs against our list and return a device name if we find a match.
  586  */
  587 static int
  588 vr_probe(device_t dev)
  589 {
  590         const struct vr_type    *t;
  591 
  592         t = vr_match(dev);
  593         if (t != NULL) {
  594                 device_set_desc(dev, t->vr_name);
  595                 return (BUS_PROBE_DEFAULT);
  596         }
  597         return (ENXIO);
  598 }
  599 
  600 /*
  601  * Attach the interface. Allocate softc structures, do ifmedia
  602  * setup and ethernet/BPF attach.
  603  */
  604 static int
  605 vr_attach(device_t dev)
  606 {
  607         struct vr_softc         *sc;
  608         struct ifnet            *ifp;
  609         const struct vr_type    *t;
  610         uint8_t                 eaddr[ETHER_ADDR_LEN];
  611         int                     error, rid;
  612         int                     i, phy, pmc;
  613 
  614         sc = device_get_softc(dev);
  615         sc->vr_dev = dev;
  616         t = vr_match(dev);
  617         KASSERT(t != NULL, ("Lost if_vr device match"));
  618         sc->vr_quirks = t->vr_quirks;
  619         device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
  620 
  621         mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  622             MTX_DEF);
  623         callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
  624         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  625             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
  626             OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
  627             sc, 0, vr_sysctl_stats, "I", "Statistics");
  628 
  629         error = 0;
  630 
  631         /*
  632          * Map control/status registers.
  633          */
  634         pci_enable_busmaster(dev);
  635         sc->vr_revid = pci_get_revid(dev);
  636         device_printf(dev, "Revision: 0x%x\n", sc->vr_revid);
  637 
  638         sc->vr_res_id = PCIR_BAR(0);
  639         sc->vr_res_type = SYS_RES_IOPORT;
  640         sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type,
  641             &sc->vr_res_id, RF_ACTIVE);
  642         if (sc->vr_res == NULL) {
  643                 device_printf(dev, "couldn't map ports\n");
  644                 error = ENXIO;
  645                 goto fail;
  646         }
  647 
  648         /* Allocate interrupt. */
  649         rid = 0;
  650         sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
  651             RF_SHAREABLE | RF_ACTIVE);
  652 
  653         if (sc->vr_irq == NULL) {
  654                 device_printf(dev, "couldn't map interrupt\n");
  655                 error = ENXIO;
  656                 goto fail;
  657         }
  658 
  659         /* Allocate ifnet structure. */
  660         ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
  661         if (ifp == NULL) {
  662                 device_printf(dev, "couldn't allocate ifnet structure\n");
  663                 error = ENOSPC;
  664                 goto fail;
  665         }
  666         ifp->if_softc = sc;
  667         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  668         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  669         ifp->if_ioctl = vr_ioctl;
  670         ifp->if_start = vr_start;
  671         ifp->if_init = vr_init;
  672         IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1);
  673         ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1;
  674         IFQ_SET_READY(&ifp->if_snd);
  675 
  676         NET_TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc);
  677 
  678         /* Configure Tx FIFO threshold. */
  679         sc->vr_txthresh = VR_TXTHRESH_MIN;
  680         if (sc->vr_revid < REV_ID_VT6105_A0) {
  681                 /*
  682                  * Use store and forward mode for Rhine I/II.
  683                  * Otherwise they produce a lot of Tx underruns and
  684                  * it would take a while to get working FIFO threshold
  685                  * value.
  686                  */
  687                 sc->vr_txthresh = VR_TXTHRESH_MAX;
  688         }
  689         if ((sc->vr_quirks & VR_Q_CSUM) != 0) {
  690                 ifp->if_hwassist = VR_CSUM_FEATURES;
  691                 ifp->if_capabilities |= IFCAP_HWCSUM;
  692                 /*
  693                  * To update checksum field the hardware may need to
  694                  * store entire frames into FIFO before transmitting.
  695                  */
  696                 sc->vr_txthresh = VR_TXTHRESH_MAX;
  697         }
  698 
  699         if (sc->vr_revid >= REV_ID_VT6102_A &&
  700             pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
  701                 ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC;
  702 
  703         /* Rhine supports oversized VLAN frame. */
  704         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  705         ifp->if_capenable = ifp->if_capabilities;
  706 #ifdef DEVICE_POLLING
  707         ifp->if_capabilities |= IFCAP_POLLING;
  708 #endif
  709 
  710         /*
  711          * Windows may put the chip in suspend mode when it
  712          * shuts down. Be sure to kick it in the head to wake it
  713          * up again.
  714          */
  715         if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
  716                 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
  717 
  718         /*
  719          * Get station address. The way the Rhine chips work,
  720          * you're not allowed to directly access the EEPROM once
  721          * they've been programmed a special way. Consequently,
  722          * we need to read the node address from the PAR0 and PAR1
  723          * registers.
  724          * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB,
  725          * VR_CFGC and VR_CFGD such that memory mapped IO configured
  726          * by driver is reset to default state.
  727          */
  728         VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
  729         for (i = VR_TIMEOUT; i > 0; i--) {
  730                 DELAY(1);
  731                 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0)
  732                         break;
  733         }
  734         if (i == 0)
  735                 device_printf(dev, "Reloading EEPROM timeout!\n");
  736         for (i = 0; i < ETHER_ADDR_LEN; i++)
  737                 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
  738 
  739         /* Reset the adapter. */
  740         vr_reset(sc);
  741         /* Ack intr & disable further interrupts. */
  742         CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
  743         CSR_WRITE_2(sc, VR_IMR, 0);
  744         if (sc->vr_revid >= REV_ID_VT6102_A)
  745                 CSR_WRITE_2(sc, VR_MII_IMR, 0);
  746 
  747         if (sc->vr_revid < REV_ID_VT6102_A) {
  748                 pci_write_config(dev, VR_PCI_MODE2,
  749                     pci_read_config(dev, VR_PCI_MODE2, 1) |
  750                     VR_MODE2_MODE10T, 1);
  751         } else {
  752                 /* Report error instead of retrying forever. */
  753                 pci_write_config(dev, VR_PCI_MODE2,
  754                     pci_read_config(dev, VR_PCI_MODE2, 1) |
  755                     VR_MODE2_PCEROPT, 1);
  756                 /* Detect MII coding error. */
  757                 pci_write_config(dev, VR_PCI_MODE3,
  758                     pci_read_config(dev, VR_PCI_MODE3, 1) |
  759                     VR_MODE3_MIION, 1);
  760                 if (sc->vr_revid >= REV_ID_VT6105_LOM &&
  761                     sc->vr_revid < REV_ID_VT6105M_A0)
  762                         pci_write_config(dev, VR_PCI_MODE2,
  763                             pci_read_config(dev, VR_PCI_MODE2, 1) |
  764                             VR_MODE2_MODE10T, 1);
  765                 /* Enable Memory-Read-Multiple. */
  766                 if (sc->vr_revid >= REV_ID_VT6107_A1 &&
  767                     sc->vr_revid < REV_ID_VT6105M_A0)
  768                         pci_write_config(dev, VR_PCI_MODE2,
  769                             pci_read_config(dev, VR_PCI_MODE2, 1) |
  770                             VR_MODE2_MRDPL, 1);
  771         }
  772         /* Disable MII AUTOPOLL. */
  773         VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
  774 
  775         if (vr_dma_alloc(sc) != 0) {
  776                 error = ENXIO;
  777                 goto fail;
  778         }
  779 
  780         /* Do MII setup. */
  781         if (sc->vr_revid >= REV_ID_VT6105_A0)
  782                 phy = 1;
  783         else
  784                 phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK;
  785         error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd,
  786             vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY,
  787             sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0);
  788         if (error != 0) {
  789                 device_printf(dev, "attaching PHYs failed\n");
  790                 goto fail;
  791         }
  792 
  793         /* Call MI attach routine. */
  794         ether_ifattach(ifp, eaddr);
  795         /*
  796          * Tell the upper layer(s) we support long frames.
  797          * Must appear after the call to ether_ifattach() because
  798          * ether_ifattach() sets ifi_hdrlen to the default value.
  799          */
  800         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
  801 
  802         /* Hook interrupt last to avoid having to lock softc. */
  803         error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
  804             vr_intr, NULL, sc, &sc->vr_intrhand);
  805 
  806         if (error) {
  807                 device_printf(dev, "couldn't set up irq\n");
  808                 ether_ifdetach(ifp);
  809                 goto fail;
  810         }
  811 
  812 fail:
  813         if (error)
  814                 vr_detach(dev);
  815 
  816         return (error);
  817 }
  818 
  819 /*
  820  * Shutdown hardware and free up resources. This can be called any
  821  * time after the mutex has been initialized. It is called in both
  822  * the error case in attach and the normal detach case so it needs
  823  * to be careful about only freeing resources that have actually been
  824  * allocated.
  825  */
  826 static int
  827 vr_detach(device_t dev)
  828 {
  829         struct vr_softc         *sc = device_get_softc(dev);
  830         struct ifnet            *ifp = sc->vr_ifp;
  831 
  832         KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
  833 
  834 #ifdef DEVICE_POLLING
  835         if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
  836                 ether_poll_deregister(ifp);
  837 #endif
  838 
  839         /* These should only be active if attach succeeded. */
  840         if (device_is_attached(dev)) {
  841                 VR_LOCK(sc);
  842                 sc->vr_flags |= VR_F_DETACHED;
  843                 vr_stop(sc);
  844                 VR_UNLOCK(sc);
  845                 callout_drain(&sc->vr_stat_callout);
  846                 taskqueue_drain(taskqueue_fast, &sc->vr_inttask);
  847                 ether_ifdetach(ifp);
  848         }
  849         if (sc->vr_miibus)
  850                 device_delete_child(dev, sc->vr_miibus);
  851         bus_generic_detach(dev);
  852 
  853         if (sc->vr_intrhand)
  854                 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
  855         if (sc->vr_irq)
  856                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
  857         if (sc->vr_res)
  858                 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id,
  859                     sc->vr_res);
  860 
  861         if (ifp)
  862                 if_free(ifp);
  863 
  864         vr_dma_free(sc);
  865 
  866         mtx_destroy(&sc->vr_mtx);
  867 
  868         return (0);
  869 }
  870 
  871 struct vr_dmamap_arg {
  872         bus_addr_t      vr_busaddr;
  873 };
  874 
  875 static void
  876 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  877 {
  878         struct vr_dmamap_arg    *ctx;
  879 
  880         if (error != 0)
  881                 return;
  882         ctx = arg;
  883         ctx->vr_busaddr = segs[0].ds_addr;
  884 }
  885 
  886 static int
  887 vr_dma_alloc(struct vr_softc *sc)
  888 {
  889         struct vr_dmamap_arg    ctx;
  890         struct vr_txdesc        *txd;
  891         struct vr_rxdesc        *rxd;
  892         bus_size_t              tx_alignment;
  893         int                     error, i;
  894 
  895         /* Create parent DMA tag. */
  896         error = bus_dma_tag_create(
  897             bus_get_dma_tag(sc->vr_dev),        /* parent */
  898             1, 0,                       /* alignment, boundary */
  899             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
  900             BUS_SPACE_MAXADDR,          /* highaddr */
  901             NULL, NULL,                 /* filter, filterarg */
  902             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
  903             0,                          /* nsegments */
  904             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
  905             0,                          /* flags */
  906             NULL, NULL,                 /* lockfunc, lockarg */
  907             &sc->vr_cdata.vr_parent_tag);
  908         if (error != 0) {
  909                 device_printf(sc->vr_dev, "failed to create parent DMA tag\n");
  910                 goto fail;
  911         }
  912         /* Create tag for Tx ring. */
  913         error = bus_dma_tag_create(
  914             sc->vr_cdata.vr_parent_tag, /* parent */
  915             VR_RING_ALIGN, 0,           /* alignment, boundary */
  916             BUS_SPACE_MAXADDR,          /* lowaddr */
  917             BUS_SPACE_MAXADDR,          /* highaddr */
  918             NULL, NULL,                 /* filter, filterarg */
  919             VR_TX_RING_SIZE,            /* maxsize */
  920             1,                          /* nsegments */
  921             VR_TX_RING_SIZE,            /* maxsegsize */
  922             0,                          /* flags */
  923             NULL, NULL,                 /* lockfunc, lockarg */
  924             &sc->vr_cdata.vr_tx_ring_tag);
  925         if (error != 0) {
  926                 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n");
  927                 goto fail;
  928         }
  929 
  930         /* Create tag for Rx ring. */
  931         error = bus_dma_tag_create(
  932             sc->vr_cdata.vr_parent_tag, /* parent */
  933             VR_RING_ALIGN, 0,           /* alignment, boundary */
  934             BUS_SPACE_MAXADDR,          /* lowaddr */
  935             BUS_SPACE_MAXADDR,          /* highaddr */
  936             NULL, NULL,                 /* filter, filterarg */
  937             VR_RX_RING_SIZE,            /* maxsize */
  938             1,                          /* nsegments */
  939             VR_RX_RING_SIZE,            /* maxsegsize */
  940             0,                          /* flags */
  941             NULL, NULL,                 /* lockfunc, lockarg */
  942             &sc->vr_cdata.vr_rx_ring_tag);
  943         if (error != 0) {
  944                 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n");
  945                 goto fail;
  946         }
  947 
  948         if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0)
  949                 tx_alignment = sizeof(uint32_t);
  950         else
  951                 tx_alignment = 1;
  952         /* Create tag for Tx buffers. */
  953         error = bus_dma_tag_create(
  954             sc->vr_cdata.vr_parent_tag, /* parent */
  955             tx_alignment, 0,            /* alignment, boundary */
  956             BUS_SPACE_MAXADDR,          /* lowaddr */
  957             BUS_SPACE_MAXADDR,          /* highaddr */
  958             NULL, NULL,                 /* filter, filterarg */
  959             MCLBYTES * VR_MAXFRAGS,     /* maxsize */
  960             VR_MAXFRAGS,                /* nsegments */
  961             MCLBYTES,                   /* maxsegsize */
  962             0,                          /* flags */
  963             NULL, NULL,                 /* lockfunc, lockarg */
  964             &sc->vr_cdata.vr_tx_tag);
  965         if (error != 0) {
  966                 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n");
  967                 goto fail;
  968         }
  969 
  970         /* Create tag for Rx buffers. */
  971         error = bus_dma_tag_create(
  972             sc->vr_cdata.vr_parent_tag, /* parent */
  973             VR_RX_ALIGN, 0,             /* alignment, boundary */
  974             BUS_SPACE_MAXADDR,          /* lowaddr */
  975             BUS_SPACE_MAXADDR,          /* highaddr */
  976             NULL, NULL,                 /* filter, filterarg */
  977             MCLBYTES,                   /* maxsize */
  978             1,                          /* nsegments */
  979             MCLBYTES,                   /* maxsegsize */
  980             0,                          /* flags */
  981             NULL, NULL,                 /* lockfunc, lockarg */
  982             &sc->vr_cdata.vr_rx_tag);
  983         if (error != 0) {
  984                 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n");
  985                 goto fail;
  986         }
  987 
  988         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
  989         error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag,
  990             (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK |
  991             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map);
  992         if (error != 0) {
  993                 device_printf(sc->vr_dev,
  994                     "failed to allocate DMA'able memory for Tx ring\n");
  995                 goto fail;
  996         }
  997 
  998         ctx.vr_busaddr = 0;
  999         error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag,
 1000             sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring,
 1001             VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
 1002         if (error != 0 || ctx.vr_busaddr == 0) {
 1003                 device_printf(sc->vr_dev,
 1004                     "failed to load DMA'able memory for Tx ring\n");
 1005                 goto fail;
 1006         }
 1007         sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr;
 1008 
 1009         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
 1010         error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag,
 1011             (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK |
 1012             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map);
 1013         if (error != 0) {
 1014                 device_printf(sc->vr_dev,
 1015                     "failed to allocate DMA'able memory for Rx ring\n");
 1016                 goto fail;
 1017         }
 1018 
 1019         ctx.vr_busaddr = 0;
 1020         error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag,
 1021             sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring,
 1022             VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
 1023         if (error != 0 || ctx.vr_busaddr == 0) {
 1024                 device_printf(sc->vr_dev,
 1025                     "failed to load DMA'able memory for Rx ring\n");
 1026                 goto fail;
 1027         }
 1028         sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr;
 1029 
 1030         /* Create DMA maps for Tx buffers. */
 1031         for (i = 0; i < VR_TX_RING_CNT; i++) {
 1032                 txd = &sc->vr_cdata.vr_txdesc[i];
 1033                 txd->tx_m = NULL;
 1034                 txd->tx_dmamap = NULL;
 1035                 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0,
 1036                     &txd->tx_dmamap);
 1037                 if (error != 0) {
 1038                         device_printf(sc->vr_dev,
 1039                             "failed to create Tx dmamap\n");
 1040                         goto fail;
 1041                 }
 1042         }
 1043         /* Create DMA maps for Rx buffers. */
 1044         if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
 1045             &sc->vr_cdata.vr_rx_sparemap)) != 0) {
 1046                 device_printf(sc->vr_dev,
 1047                     "failed to create spare Rx dmamap\n");
 1048                 goto fail;
 1049         }
 1050         for (i = 0; i < VR_RX_RING_CNT; i++) {
 1051                 rxd = &sc->vr_cdata.vr_rxdesc[i];
 1052                 rxd->rx_m = NULL;
 1053                 rxd->rx_dmamap = NULL;
 1054                 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
 1055                     &rxd->rx_dmamap);
 1056                 if (error != 0) {
 1057                         device_printf(sc->vr_dev,
 1058                             "failed to create Rx dmamap\n");
 1059                         goto fail;
 1060                 }
 1061         }
 1062 
 1063 fail:
 1064         return (error);
 1065 }
 1066 
 1067 static void
 1068 vr_dma_free(struct vr_softc *sc)
 1069 {
 1070         struct vr_txdesc        *txd;
 1071         struct vr_rxdesc        *rxd;
 1072         int                     i;
 1073 
 1074         /* Tx ring. */
 1075         if (sc->vr_cdata.vr_tx_ring_tag) {
 1076                 if (sc->vr_rdata.vr_tx_ring_paddr)
 1077                         bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag,
 1078                             sc->vr_cdata.vr_tx_ring_map);
 1079                 if (sc->vr_rdata.vr_tx_ring)
 1080                         bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag,
 1081                             sc->vr_rdata.vr_tx_ring,
 1082                             sc->vr_cdata.vr_tx_ring_map);
 1083                 sc->vr_rdata.vr_tx_ring = NULL;
 1084                 sc->vr_rdata.vr_tx_ring_paddr = 0;
 1085                 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag);
 1086                 sc->vr_cdata.vr_tx_ring_tag = NULL;
 1087         }
 1088         /* Rx ring. */
 1089         if (sc->vr_cdata.vr_rx_ring_tag) {
 1090                 if (sc->vr_rdata.vr_rx_ring_paddr)
 1091                         bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag,
 1092                             sc->vr_cdata.vr_rx_ring_map);
 1093                 if (sc->vr_rdata.vr_rx_ring)
 1094                         bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag,
 1095                             sc->vr_rdata.vr_rx_ring,
 1096                             sc->vr_cdata.vr_rx_ring_map);
 1097                 sc->vr_rdata.vr_rx_ring = NULL;
 1098                 sc->vr_rdata.vr_rx_ring_paddr = 0;
 1099                 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag);
 1100                 sc->vr_cdata.vr_rx_ring_tag = NULL;
 1101         }
 1102         /* Tx buffers. */
 1103         if (sc->vr_cdata.vr_tx_tag) {
 1104                 for (i = 0; i < VR_TX_RING_CNT; i++) {
 1105                         txd = &sc->vr_cdata.vr_txdesc[i];
 1106                         if (txd->tx_dmamap) {
 1107                                 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag,
 1108                                     txd->tx_dmamap);
 1109                                 txd->tx_dmamap = NULL;
 1110                         }
 1111                 }
 1112                 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag);
 1113                 sc->vr_cdata.vr_tx_tag = NULL;
 1114         }
 1115         /* Rx buffers. */
 1116         if (sc->vr_cdata.vr_rx_tag) {
 1117                 for (i = 0; i < VR_RX_RING_CNT; i++) {
 1118                         rxd = &sc->vr_cdata.vr_rxdesc[i];
 1119                         if (rxd->rx_dmamap) {
 1120                                 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
 1121                                     rxd->rx_dmamap);
 1122                                 rxd->rx_dmamap = NULL;
 1123                         }
 1124                 }
 1125                 if (sc->vr_cdata.vr_rx_sparemap) {
 1126                         bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
 1127                             sc->vr_cdata.vr_rx_sparemap);
 1128                         sc->vr_cdata.vr_rx_sparemap = 0;
 1129                 }
 1130                 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag);
 1131                 sc->vr_cdata.vr_rx_tag = NULL;
 1132         }
 1133 
 1134         if (sc->vr_cdata.vr_parent_tag) {
 1135                 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag);
 1136                 sc->vr_cdata.vr_parent_tag = NULL;
 1137         }
 1138 }
 1139 
 1140 /*
 1141  * Initialize the transmit descriptors.
 1142  */
 1143 static int
 1144 vr_tx_ring_init(struct vr_softc *sc)
 1145 {
 1146         struct vr_ring_data     *rd;
 1147         struct vr_txdesc        *txd;
 1148         bus_addr_t              addr;
 1149         int                     i;
 1150 
 1151         sc->vr_cdata.vr_tx_prod = 0;
 1152         sc->vr_cdata.vr_tx_cons = 0;
 1153         sc->vr_cdata.vr_tx_cnt = 0;
 1154         sc->vr_cdata.vr_tx_pkts = 0;
 1155 
 1156         rd = &sc->vr_rdata;
 1157         bzero(rd->vr_tx_ring, VR_TX_RING_SIZE);
 1158         for (i = 0; i < VR_TX_RING_CNT; i++) {
 1159                 if (i == VR_TX_RING_CNT - 1)
 1160                         addr = VR_TX_RING_ADDR(sc, 0);
 1161                 else
 1162                         addr = VR_TX_RING_ADDR(sc, i + 1);
 1163                 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
 1164                 txd = &sc->vr_cdata.vr_txdesc[i];
 1165                 txd->tx_m = NULL;
 1166         }
 1167 
 1168         bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
 1169             sc->vr_cdata.vr_tx_ring_map,
 1170             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1171 
 1172         return (0);
 1173 }
 1174 
 1175 /*
 1176  * Initialize the RX descriptors and allocate mbufs for them. Note that
 1177  * we arrange the descriptors in a closed ring, so that the last descriptor
 1178  * points back to the first.
 1179  */
 1180 static int
 1181 vr_rx_ring_init(struct vr_softc *sc)
 1182 {
 1183         struct vr_ring_data     *rd;
 1184         struct vr_rxdesc        *rxd;
 1185         bus_addr_t              addr;
 1186         int                     i;
 1187 
 1188         sc->vr_cdata.vr_rx_cons = 0;
 1189 
 1190         rd = &sc->vr_rdata;
 1191         bzero(rd->vr_rx_ring, VR_RX_RING_SIZE);
 1192         for (i = 0; i < VR_RX_RING_CNT; i++) {
 1193                 rxd = &sc->vr_cdata.vr_rxdesc[i];
 1194                 rxd->rx_m = NULL;
 1195                 rxd->desc = &rd->vr_rx_ring[i];
 1196                 if (i == VR_RX_RING_CNT - 1)
 1197                         addr = VR_RX_RING_ADDR(sc, 0);
 1198                 else
 1199                         addr = VR_RX_RING_ADDR(sc, i + 1);
 1200                 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
 1201                 if (vr_newbuf(sc, i) != 0)
 1202                         return (ENOBUFS);
 1203         }
 1204 
 1205         bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
 1206             sc->vr_cdata.vr_rx_ring_map,
 1207             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1208 
 1209         return (0);
 1210 }
 1211 
 1212 static __inline void
 1213 vr_discard_rxbuf(struct vr_rxdesc *rxd)
 1214 {
 1215         struct vr_desc  *desc;
 1216 
 1217         desc = rxd->desc;
 1218         desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t)));
 1219         desc->vr_status = htole32(VR_RXSTAT_OWN);
 1220 }
 1221 
 1222 /*
 1223  * Initialize an RX descriptor and attach an MBUF cluster.
 1224  * Note: the length fields are only 11 bits wide, which means the
 1225  * largest size we can specify is 2047. This is important because
 1226  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
 1227  * overflow the field and make a mess.
 1228  */
 1229 static int
 1230 vr_newbuf(struct vr_softc *sc, int idx)
 1231 {
 1232         struct vr_desc          *desc;
 1233         struct vr_rxdesc        *rxd;
 1234         struct mbuf             *m;
 1235         bus_dma_segment_t       segs[1];
 1236         bus_dmamap_t            map;
 1237         int                     nsegs;
 1238 
 1239         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 1240         if (m == NULL)
 1241                 return (ENOBUFS);
 1242         m->m_len = m->m_pkthdr.len = MCLBYTES;
 1243         m_adj(m, sizeof(uint64_t));
 1244 
 1245         if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag,
 1246             sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
 1247                 m_freem(m);
 1248                 return (ENOBUFS);
 1249         }
 1250         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1251 
 1252         rxd = &sc->vr_cdata.vr_rxdesc[idx];
 1253         if (rxd->rx_m != NULL) {
 1254                 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
 1255                     BUS_DMASYNC_POSTREAD);
 1256                 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap);
 1257         }
 1258         map = rxd->rx_dmamap;
 1259         rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap;
 1260         sc->vr_cdata.vr_rx_sparemap = map;
 1261         bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
 1262             BUS_DMASYNC_PREREAD);
 1263         rxd->rx_m = m;
 1264         desc = rxd->desc;
 1265         desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr));
 1266         desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len);
 1267         desc->vr_status = htole32(VR_RXSTAT_OWN);
 1268 
 1269         return (0);
 1270 }
 1271 
 1272 #ifndef __NO_STRICT_ALIGNMENT
 1273 static __inline void
 1274 vr_fixup_rx(struct mbuf *m)
 1275 {
 1276         uint16_t                *src, *dst;
 1277         int                     i;
 1278 
 1279         src = mtod(m, uint16_t *);
 1280         dst = src - 1;
 1281 
 1282         for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
 1283                 *dst++ = *src++;
 1284 
 1285         m->m_data -= ETHER_ALIGN;
 1286 }
 1287 #endif
 1288 
 1289 /*
 1290  * A frame has been uploaded: pass the resulting mbuf chain up to
 1291  * the higher level protocols.
 1292  */
 1293 static int
 1294 vr_rxeof(struct vr_softc *sc)
 1295 {
 1296         struct vr_rxdesc        *rxd;
 1297         struct mbuf             *m;
 1298         struct ifnet            *ifp;
 1299         struct vr_desc          *cur_rx;
 1300         int                     cons, prog, total_len, rx_npkts;
 1301         uint32_t                rxstat, rxctl;
 1302 
 1303         VR_LOCK_ASSERT(sc);
 1304         ifp = sc->vr_ifp;
 1305         cons = sc->vr_cdata.vr_rx_cons;
 1306         rx_npkts = 0;
 1307 
 1308         bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
 1309             sc->vr_cdata.vr_rx_ring_map,
 1310             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1311 
 1312         for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) {
 1313 #ifdef DEVICE_POLLING
 1314                 if (ifp->if_capenable & IFCAP_POLLING) {
 1315                         if (sc->rxcycles <= 0)
 1316                                 break;
 1317                         sc->rxcycles--;
 1318                 }
 1319 #endif
 1320                 cur_rx = &sc->vr_rdata.vr_rx_ring[cons];
 1321                 rxstat = le32toh(cur_rx->vr_status);
 1322                 rxctl = le32toh(cur_rx->vr_ctl);
 1323                 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN)
 1324                         break;
 1325 
 1326                 prog++;
 1327                 rxd = &sc->vr_cdata.vr_rxdesc[cons];
 1328                 m = rxd->rx_m;
 1329 
 1330                 /*
 1331                  * If an error occurs, update stats, clear the
 1332                  * status word and leave the mbuf cluster in place:
 1333                  * it should simply get re-used next time this descriptor
 1334                  * comes up in the ring.
 1335                  * We don't support SG in Rx path yet, so discard
 1336                  * partial frame.
 1337                  */
 1338                 if ((rxstat & VR_RXSTAT_RX_OK) == 0 ||
 1339                     (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) !=
 1340                     (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) {
 1341                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 1342                         sc->vr_stat.rx_errors++;
 1343                         if (rxstat & VR_RXSTAT_CRCERR)
 1344                                 sc->vr_stat.rx_crc_errors++;
 1345                         if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
 1346                                 sc->vr_stat.rx_alignment++;
 1347                         if (rxstat & VR_RXSTAT_FIFOOFLOW)
 1348                                 sc->vr_stat.rx_fifo_overflows++;
 1349                         if (rxstat & VR_RXSTAT_GIANT)
 1350                                 sc->vr_stat.rx_giants++;
 1351                         if (rxstat & VR_RXSTAT_RUNT)
 1352                                 sc->vr_stat.rx_runts++;
 1353                         if (rxstat & VR_RXSTAT_BUFFERR)
 1354                                 sc->vr_stat.rx_no_buffers++;
 1355 #ifdef  VR_SHOW_ERRORS
 1356                         device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
 1357                             __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS);
 1358 #endif
 1359                         vr_discard_rxbuf(rxd);
 1360                         continue;
 1361                 }
 1362 
 1363                 if (vr_newbuf(sc, cons) != 0) {
 1364                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 1365                         sc->vr_stat.rx_errors++;
 1366                         sc->vr_stat.rx_no_mbufs++;
 1367                         vr_discard_rxbuf(rxd);
 1368                         continue;
 1369                 }
 1370 
 1371                 /*
 1372                  * XXX The VIA Rhine chip includes the CRC with every
 1373                  * received frame, and there's no way to turn this
 1374                  * behavior off (at least, I can't find anything in
 1375                  * the manual that explains how to do it) so we have
 1376                  * to trim off the CRC manually.
 1377                  */
 1378                 total_len = VR_RXBYTES(rxstat);
 1379                 total_len -= ETHER_CRC_LEN;
 1380                 m->m_pkthdr.len = m->m_len = total_len;
 1381 #ifndef __NO_STRICT_ALIGNMENT
 1382                 /*
 1383                  * RX buffers must be 32-bit aligned.
 1384                  * Ignore the alignment problems on the non-strict alignment
 1385                  * platform. The performance hit incurred due to unaligned
 1386                  * accesses is much smaller than the hit produced by forcing
 1387                  * buffer copies all the time.
 1388                  */
 1389                 vr_fixup_rx(m);
 1390 #endif
 1391                 m->m_pkthdr.rcvif = ifp;
 1392                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 1393                 sc->vr_stat.rx_ok++;
 1394                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
 1395                     (rxstat & VR_RXSTAT_FRAG) == 0 &&
 1396                     (rxctl & VR_RXCTL_IP) != 0) {
 1397                         /* Checksum is valid for non-fragmented IP packets. */
 1398                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1399                         if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) {
 1400                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1401                                 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) {
 1402                                         m->m_pkthdr.csum_flags |=
 1403                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
 1404                                         if ((rxctl & VR_RXCTL_TCPUDPOK) != 0)
 1405                                                 m->m_pkthdr.csum_data = 0xffff;
 1406                                 }
 1407                         }
 1408                 }
 1409                 VR_UNLOCK(sc);
 1410                 (*ifp->if_input)(ifp, m);
 1411                 VR_LOCK(sc);
 1412                 rx_npkts++;
 1413         }
 1414 
 1415         if (prog > 0) {
 1416                 /*
 1417                  * Let controller know how many number of RX buffers
 1418                  * are posted but avoid expensive register access if
 1419                  * TX pause capability was not negotiated with link
 1420                  * partner.
 1421                  */
 1422                 if ((sc->vr_flags & VR_F_TXPAUSE) != 0) {
 1423                         if (prog >= VR_RX_RING_CNT)
 1424                                 prog = VR_RX_RING_CNT - 1;
 1425                         CSR_WRITE_1(sc, VR_FLOWCR0, prog);
 1426                 }
 1427                 sc->vr_cdata.vr_rx_cons = cons;
 1428                 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
 1429                     sc->vr_cdata.vr_rx_ring_map,
 1430                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1431         }
 1432         return (rx_npkts);
 1433 }
 1434 
 1435 /*
 1436  * A frame was downloaded to the chip. It's safe for us to clean up
 1437  * the list buffers.
 1438  */
 1439 static void
 1440 vr_txeof(struct vr_softc *sc)
 1441 {
 1442         struct vr_txdesc        *txd;
 1443         struct vr_desc          *cur_tx;
 1444         struct ifnet            *ifp;
 1445         uint32_t                txctl, txstat;
 1446         int                     cons, prod;
 1447 
 1448         VR_LOCK_ASSERT(sc);
 1449 
 1450         cons = sc->vr_cdata.vr_tx_cons;
 1451         prod = sc->vr_cdata.vr_tx_prod;
 1452         if (cons == prod)
 1453                 return;
 1454 
 1455         bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
 1456             sc->vr_cdata.vr_tx_ring_map,
 1457             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1458 
 1459         ifp = sc->vr_ifp;
 1460         /*
 1461          * Go through our tx list and free mbufs for those
 1462          * frames that have been transmitted.
 1463          */
 1464         for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) {
 1465                 cur_tx = &sc->vr_rdata.vr_tx_ring[cons];
 1466                 txctl = le32toh(cur_tx->vr_ctl);
 1467                 txstat = le32toh(cur_tx->vr_status);
 1468                 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN)
 1469                         break;
 1470 
 1471                 sc->vr_cdata.vr_tx_cnt--;
 1472                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1473                 /* Only the first descriptor in the chain is valid. */
 1474                 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0)
 1475                         continue;
 1476 
 1477                 txd = &sc->vr_cdata.vr_txdesc[cons];
 1478                 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n",
 1479                     __func__));
 1480 
 1481                 if ((txstat & VR_TXSTAT_ERRSUM) != 0) {
 1482                         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 1483                         sc->vr_stat.tx_errors++;
 1484                         if ((txstat & VR_TXSTAT_ABRT) != 0) {
 1485                                 /* Give up and restart Tx. */
 1486                                 sc->vr_stat.tx_abort++;
 1487                                 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
 1488                                     txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 1489                                 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
 1490                                     txd->tx_dmamap);
 1491                                 m_freem(txd->tx_m);
 1492                                 txd->tx_m = NULL;
 1493                                 VR_INC(cons, VR_TX_RING_CNT);
 1494                                 sc->vr_cdata.vr_tx_cons = cons;
 1495                                 if (vr_tx_stop(sc) != 0) {
 1496                                         device_printf(sc->vr_dev,
 1497                                             "%s: Tx shutdown error -- "
 1498                                             "resetting\n", __func__);
 1499                                         sc->vr_flags |= VR_F_RESTART;
 1500                                         return;
 1501                                 }
 1502                                 vr_tx_start(sc);
 1503                                 break;
 1504                         }
 1505                         if ((sc->vr_revid < REV_ID_VT3071_A &&
 1506                             (txstat & VR_TXSTAT_UNDERRUN)) ||
 1507                             (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) {
 1508                                 sc->vr_stat.tx_underrun++;
 1509                                 /* Retry and restart Tx. */
 1510                                 sc->vr_cdata.vr_tx_cnt++;
 1511                                 sc->vr_cdata.vr_tx_cons = cons;
 1512                                 cur_tx->vr_status = htole32(VR_TXSTAT_OWN);
 1513                                 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
 1514                                     sc->vr_cdata.vr_tx_ring_map,
 1515                                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1516                                 vr_tx_underrun(sc);
 1517                                 return;
 1518                         }
 1519                         if ((txstat & VR_TXSTAT_DEFER) != 0) {
 1520                                 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
 1521                                 sc->vr_stat.tx_collisions++;
 1522                         }
 1523                         if ((txstat & VR_TXSTAT_LATECOLL) != 0) {
 1524                                 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
 1525                                 sc->vr_stat.tx_late_collisions++;
 1526                         }
 1527                 } else {
 1528                         sc->vr_stat.tx_ok++;
 1529                         if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 1530                 }
 1531 
 1532                 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
 1533                     BUS_DMASYNC_POSTWRITE);
 1534                 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
 1535                 if (sc->vr_revid < REV_ID_VT3071_A) {
 1536                         if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
 1537                             (txstat & VR_TXSTAT_COLLCNT) >> 3);
 1538                         sc->vr_stat.tx_collisions +=
 1539                             (txstat & VR_TXSTAT_COLLCNT) >> 3;
 1540                 } else {
 1541                         if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0x0f));
 1542                         sc->vr_stat.tx_collisions += (txstat & 0x0f);
 1543                 }
 1544                 m_freem(txd->tx_m);
 1545                 txd->tx_m = NULL;
 1546         }
 1547 
 1548         sc->vr_cdata.vr_tx_cons = cons;
 1549         if (sc->vr_cdata.vr_tx_cnt == 0)
 1550                 sc->vr_watchdog_timer = 0;
 1551 }
 1552 
 1553 static void
 1554 vr_tick(void *xsc)
 1555 {
 1556         struct vr_softc         *sc;
 1557         struct mii_data         *mii;
 1558 
 1559         sc = (struct vr_softc *)xsc;
 1560 
 1561         VR_LOCK_ASSERT(sc);
 1562 
 1563         if ((sc->vr_flags & VR_F_RESTART) != 0) {
 1564                 device_printf(sc->vr_dev, "restarting\n");
 1565                 sc->vr_stat.num_restart++;
 1566                 sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1567                 vr_init_locked(sc);
 1568                 sc->vr_flags &= ~VR_F_RESTART;
 1569         }
 1570 
 1571         mii = device_get_softc(sc->vr_miibus);
 1572         mii_tick(mii);
 1573         if ((sc->vr_flags & VR_F_LINK) == 0)
 1574                 vr_miibus_statchg(sc->vr_dev);
 1575         vr_watchdog(sc);
 1576         callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
 1577 }
 1578 
 1579 #ifdef DEVICE_POLLING
 1580 static poll_handler_t vr_poll;
 1581 static poll_handler_t vr_poll_locked;
 1582 
 1583 static int
 1584 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 1585 {
 1586         struct vr_softc *sc;
 1587         int rx_npkts;
 1588 
 1589         sc = ifp->if_softc;
 1590         rx_npkts = 0;
 1591 
 1592         VR_LOCK(sc);
 1593         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1594                 rx_npkts = vr_poll_locked(ifp, cmd, count);
 1595         VR_UNLOCK(sc);
 1596         return (rx_npkts);
 1597 }
 1598 
 1599 static int
 1600 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
 1601 {
 1602         struct vr_softc *sc;
 1603         int rx_npkts;
 1604 
 1605         sc = ifp->if_softc;
 1606 
 1607         VR_LOCK_ASSERT(sc);
 1608 
 1609         sc->rxcycles = count;
 1610         rx_npkts = vr_rxeof(sc);
 1611         vr_txeof(sc);
 1612         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1613                 vr_start_locked(ifp);
 1614 
 1615         if (cmd == POLL_AND_CHECK_STATUS) {
 1616                 uint16_t status;
 1617 
 1618                 /* Also check status register. */
 1619                 status = CSR_READ_2(sc, VR_ISR);
 1620                 if (status)
 1621                         CSR_WRITE_2(sc, VR_ISR, status);
 1622 
 1623                 if ((status & VR_INTRS) == 0)
 1624                         return (rx_npkts);
 1625 
 1626                 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
 1627                     VR_ISR_STATSOFLOW)) != 0) {
 1628                         if (vr_error(sc, status) != 0)
 1629                                 return (rx_npkts);
 1630                 }
 1631                 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
 1632 #ifdef  VR_SHOW_ERRORS
 1633                         device_printf(sc->vr_dev, "%s: receive error : 0x%b\n",
 1634                             __func__, status, VR_ISR_ERR_BITS);
 1635 #endif
 1636                         vr_rx_start(sc);
 1637                 }
 1638         }
 1639         return (rx_npkts);
 1640 }
 1641 #endif /* DEVICE_POLLING */
 1642 
 1643 /* Back off the transmit threshold. */
 1644 static void
 1645 vr_tx_underrun(struct vr_softc *sc)
 1646 {
 1647         int     thresh;
 1648 
 1649         device_printf(sc->vr_dev, "Tx underrun -- ");
 1650         if (sc->vr_txthresh < VR_TXTHRESH_MAX) {
 1651                 thresh = sc->vr_txthresh;
 1652                 sc->vr_txthresh++;
 1653                 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) {
 1654                         sc->vr_txthresh = VR_TXTHRESH_MAX;
 1655                         printf("using store and forward mode\n");
 1656                 } else
 1657                         printf("increasing Tx threshold(%d -> %d)\n",
 1658                             vr_tx_threshold_tables[thresh].value,
 1659                             vr_tx_threshold_tables[thresh + 1].value);
 1660         } else
 1661                 printf("\n");
 1662         sc->vr_stat.tx_underrun++;
 1663         if (vr_tx_stop(sc) != 0) {
 1664                 device_printf(sc->vr_dev, "%s: Tx shutdown error -- "
 1665                     "resetting\n", __func__);
 1666                 sc->vr_flags |= VR_F_RESTART;
 1667                 return;
 1668         }
 1669         vr_tx_start(sc);
 1670 }
 1671 
 1672 static int
 1673 vr_intr(void *arg)
 1674 {
 1675         struct vr_softc         *sc;
 1676         uint16_t                status;
 1677 
 1678         sc = (struct vr_softc *)arg;
 1679 
 1680         status = CSR_READ_2(sc, VR_ISR);
 1681         if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0)
 1682                 return (FILTER_STRAY);
 1683 
 1684         /* Disable interrupts. */
 1685         CSR_WRITE_2(sc, VR_IMR, 0x0000);
 1686 
 1687         taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask);
 1688 
 1689         return (FILTER_HANDLED);
 1690 }
 1691 
 1692 static void
 1693 vr_int_task(void *arg, int npending)
 1694 {
 1695         struct vr_softc         *sc;
 1696         struct ifnet            *ifp;
 1697         uint16_t                status;
 1698 
 1699         sc = (struct vr_softc *)arg;
 1700 
 1701         VR_LOCK(sc);
 1702 
 1703         if ((sc->vr_flags & VR_F_SUSPENDED) != 0)
 1704                 goto done_locked;
 1705 
 1706         status = CSR_READ_2(sc, VR_ISR);
 1707         ifp = sc->vr_ifp;
 1708 #ifdef DEVICE_POLLING
 1709         if ((ifp->if_capenable & IFCAP_POLLING) != 0)
 1710                 goto done_locked;
 1711 #endif
 1712 
 1713         /* Suppress unwanted interrupts. */
 1714         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
 1715             (sc->vr_flags & VR_F_RESTART) != 0) {
 1716                 CSR_WRITE_2(sc, VR_IMR, 0);
 1717                 CSR_WRITE_2(sc, VR_ISR, status);
 1718                 goto done_locked;
 1719         }
 1720 
 1721         for (; (status & VR_INTRS) != 0;) {
 1722                 CSR_WRITE_2(sc, VR_ISR, status);
 1723                 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
 1724                     VR_ISR_STATSOFLOW)) != 0) {
 1725                         if (vr_error(sc, status) != 0) {
 1726                                 VR_UNLOCK(sc);
 1727                                 return;
 1728                         }
 1729                 }
 1730                 vr_rxeof(sc);
 1731                 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
 1732 #ifdef  VR_SHOW_ERRORS
 1733                         device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
 1734                             __func__, status, VR_ISR_ERR_BITS);
 1735 #endif
 1736                         /* Restart Rx if RxDMA SM was stopped. */
 1737                         vr_rx_start(sc);
 1738                 }
 1739                 vr_txeof(sc);
 1740 
 1741                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1742                         vr_start_locked(ifp);
 1743 
 1744                 status = CSR_READ_2(sc, VR_ISR);
 1745         }
 1746 
 1747         /* Re-enable interrupts. */
 1748         CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
 1749 
 1750 done_locked:
 1751         VR_UNLOCK(sc);
 1752 }
 1753 
 1754 static int
 1755 vr_error(struct vr_softc *sc, uint16_t status)
 1756 {
 1757         uint16_t pcis;
 1758 
 1759         status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW;
 1760         if ((status & VR_ISR_BUSERR) != 0) {
 1761                 status &= ~VR_ISR_BUSERR;
 1762                 sc->vr_stat.bus_errors++;
 1763                 /* Disable further interrupts. */
 1764                 CSR_WRITE_2(sc, VR_IMR, 0);
 1765                 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2);
 1766                 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- "
 1767                     "resetting\n", pcis);
 1768                 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2);
 1769                 sc->vr_flags |= VR_F_RESTART;
 1770                 return (EAGAIN);
 1771         }
 1772         if ((status & VR_ISR_LINKSTAT2) != 0) {
 1773                 /* Link state change, duplex changes etc. */
 1774                 status &= ~VR_ISR_LINKSTAT2;
 1775         }
 1776         if ((status & VR_ISR_STATSOFLOW) != 0) {
 1777                 status &= ~VR_ISR_STATSOFLOW;
 1778                 if (sc->vr_revid >= REV_ID_VT6105M_A0) {
 1779                         /* Update MIB counters. */
 1780                 }
 1781         }
 1782 
 1783         if (status != 0)
 1784                 device_printf(sc->vr_dev,
 1785                     "unhandled interrupt, status = 0x%04x\n", status);
 1786         return (0);
 1787 }
 1788 
 1789 /*
 1790  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
 1791  * pointers to the fragment pointers.
 1792  */
 1793 static int
 1794 vr_encap(struct vr_softc *sc, struct mbuf **m_head)
 1795 {
 1796         struct vr_txdesc        *txd;
 1797         struct vr_desc          *desc;
 1798         struct mbuf             *m;
 1799         bus_dma_segment_t       txsegs[VR_MAXFRAGS];
 1800         uint32_t                csum_flags, txctl;
 1801         int                     error, i, nsegs, prod, si;
 1802         int                     padlen;
 1803 
 1804         VR_LOCK_ASSERT(sc);
 1805 
 1806         M_ASSERTPKTHDR((*m_head));
 1807 
 1808         /*
 1809          * Some VIA Rhine wants packet buffers to be longword
 1810          * aligned, but very often our mbufs aren't. Rather than
 1811          * waste time trying to decide when to copy and when not
 1812          * to copy, just do it all the time.
 1813          */
 1814         if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) {
 1815                 m = m_defrag(*m_head, M_NOWAIT);
 1816                 if (m == NULL) {
 1817                         m_freem(*m_head);
 1818                         *m_head = NULL;
 1819                         return (ENOBUFS);
 1820                 }
 1821                 *m_head = m;
 1822         }
 1823 
 1824         /*
 1825          * The Rhine chip doesn't auto-pad, so we have to make
 1826          * sure to pad short frames out to the minimum frame length
 1827          * ourselves.
 1828          */
 1829         if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) {
 1830                 m = *m_head;
 1831                 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len;
 1832                 if (M_WRITABLE(m) == 0) {
 1833                         /* Get a writable copy. */
 1834                         m = m_dup(*m_head, M_NOWAIT);
 1835                         m_freem(*m_head);
 1836                         if (m == NULL) {
 1837                                 *m_head = NULL;
 1838                                 return (ENOBUFS);
 1839                         }
 1840                         *m_head = m;
 1841                 }
 1842                 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
 1843                         m = m_defrag(m, M_NOWAIT);
 1844                         if (m == NULL) {
 1845                                 m_freem(*m_head);
 1846                                 *m_head = NULL;
 1847                                 return (ENOBUFS);
 1848                         }
 1849                 }
 1850                 /*
 1851                  * Manually pad short frames, and zero the pad space
 1852                  * to avoid leaking data.
 1853                  */
 1854                 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
 1855                 m->m_pkthdr.len += padlen;
 1856                 m->m_len = m->m_pkthdr.len;
 1857                 *m_head = m;
 1858         }
 1859 
 1860         prod = sc->vr_cdata.vr_tx_prod;
 1861         txd = &sc->vr_cdata.vr_txdesc[prod];
 1862         error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
 1863             *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
 1864         if (error == EFBIG) {
 1865                 m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS);
 1866                 if (m == NULL) {
 1867                         m_freem(*m_head);
 1868                         *m_head = NULL;
 1869                         return (ENOBUFS);
 1870                 }
 1871                 *m_head = m;
 1872                 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag,
 1873                     txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
 1874                 if (error != 0) {
 1875                         m_freem(*m_head);
 1876                         *m_head = NULL;
 1877                         return (error);
 1878                 }
 1879         } else if (error != 0)
 1880                 return (error);
 1881         if (nsegs == 0) {
 1882                 m_freem(*m_head);
 1883                 *m_head = NULL;
 1884                 return (EIO);
 1885         }
 1886 
 1887         /* Check number of available descriptors. */
 1888         if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) {
 1889                 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
 1890                 return (ENOBUFS);
 1891         }
 1892 
 1893         txd->tx_m = *m_head;
 1894         bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
 1895             BUS_DMASYNC_PREWRITE);
 1896 
 1897         /* Set checksum offload. */
 1898         csum_flags = 0;
 1899         if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) {
 1900                 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
 1901                         csum_flags |= VR_TXCTL_IPCSUM;
 1902                 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
 1903                         csum_flags |= VR_TXCTL_TCPCSUM;
 1904                 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
 1905                         csum_flags |= VR_TXCTL_UDPCSUM;
 1906         }
 1907 
 1908         /*
 1909          * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit
 1910          * is required for all descriptors regardless of single or
 1911          * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for
 1912          * the first descriptor for a multi-fragmented frames. Without
 1913          * that VIA Rhine chip generates Tx underrun interrupts and can't
 1914          * send any frames.
 1915          */
 1916         si = prod;
 1917         for (i = 0; i < nsegs; i++) {
 1918                 desc = &sc->vr_rdata.vr_tx_ring[prod];
 1919                 desc->vr_status = 0;
 1920                 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags;
 1921                 if (i == 0)
 1922                         txctl |= VR_TXCTL_FIRSTFRAG;
 1923                 desc->vr_ctl = htole32(txctl);
 1924                 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr));
 1925                 sc->vr_cdata.vr_tx_cnt++;
 1926                 VR_INC(prod, VR_TX_RING_CNT);
 1927         }
 1928         /* Update producer index. */
 1929         sc->vr_cdata.vr_tx_prod = prod;
 1930 
 1931         prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT;
 1932         desc = &sc->vr_rdata.vr_tx_ring[prod];
 1933 
 1934         /*
 1935          * Set EOP on the last descriptor and request Tx completion
 1936          * interrupt for every VR_TX_INTR_THRESH-th frames.
 1937          */
 1938         VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH);
 1939         if (sc->vr_cdata.vr_tx_pkts == 0)
 1940                 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
 1941         else
 1942                 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG);
 1943 
 1944         /* Lastly turn the first descriptor ownership to hardware. */
 1945         desc = &sc->vr_rdata.vr_tx_ring[si];
 1946         desc->vr_status |= htole32(VR_TXSTAT_OWN);
 1947 
 1948         /* Sync descriptors. */
 1949         bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
 1950             sc->vr_cdata.vr_tx_ring_map,
 1951             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1952 
 1953         return (0);
 1954 }
 1955 
 1956 static void
 1957 vr_start(struct ifnet *ifp)
 1958 {
 1959         struct vr_softc         *sc;
 1960 
 1961         sc = ifp->if_softc;
 1962         VR_LOCK(sc);
 1963         vr_start_locked(ifp);
 1964         VR_UNLOCK(sc);
 1965 }
 1966 
 1967 static void
 1968 vr_start_locked(struct ifnet *ifp)
 1969 {
 1970         struct vr_softc         *sc;
 1971         struct mbuf             *m_head;
 1972         int                     enq;
 1973 
 1974         sc = ifp->if_softc;
 1975 
 1976         VR_LOCK_ASSERT(sc);
 1977 
 1978         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1979             IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0)
 1980                 return;
 1981 
 1982         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 1983             sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) {
 1984                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 1985                 if (m_head == NULL)
 1986                         break;
 1987                 /*
 1988                  * Pack the data into the transmit ring. If we
 1989                  * don't have room, set the OACTIVE flag and wait
 1990                  * for the NIC to drain the ring.
 1991                  */
 1992                 if (vr_encap(sc, &m_head)) {
 1993                         if (m_head == NULL)
 1994                                 break;
 1995                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 1996                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1997                         break;
 1998                 }
 1999 
 2000                 enq++;
 2001                 /*
 2002                  * If there's a BPF listener, bounce a copy of this frame
 2003                  * to him.
 2004                  */
 2005                 ETHER_BPF_MTAP(ifp, m_head);
 2006         }
 2007 
 2008         if (enq > 0) {
 2009                 /* Tell the chip to start transmitting. */
 2010                 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
 2011                 /* Set a timeout in case the chip goes out to lunch. */
 2012                 sc->vr_watchdog_timer = 5;
 2013         }
 2014 }
 2015 
 2016 static void
 2017 vr_init(void *xsc)
 2018 {
 2019         struct vr_softc         *sc;
 2020 
 2021         sc = (struct vr_softc *)xsc;
 2022         VR_LOCK(sc);
 2023         vr_init_locked(sc);
 2024         VR_UNLOCK(sc);
 2025 }
 2026 
 2027 static void
 2028 vr_init_locked(struct vr_softc *sc)
 2029 {
 2030         struct ifnet            *ifp;
 2031         struct mii_data         *mii;
 2032         bus_addr_t              addr;
 2033         int                     i;
 2034 
 2035         VR_LOCK_ASSERT(sc);
 2036 
 2037         ifp = sc->vr_ifp;
 2038         mii = device_get_softc(sc->vr_miibus);
 2039 
 2040         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2041                 return;
 2042 
 2043         /* Cancel pending I/O and free all RX/TX buffers. */
 2044         vr_stop(sc);
 2045         vr_reset(sc);
 2046 
 2047         /* Set our station address. */
 2048         for (i = 0; i < ETHER_ADDR_LEN; i++)
 2049                 CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]);
 2050 
 2051         /* Set DMA size. */
 2052         VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
 2053         VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
 2054 
 2055         /*
 2056          * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
 2057          * so we must set both.
 2058          */
 2059         VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
 2060         VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
 2061 
 2062         VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
 2063         VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg);
 2064 
 2065         VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
 2066         VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
 2067 
 2068         VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
 2069         VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg);
 2070 
 2071         /* Init circular RX list. */
 2072         if (vr_rx_ring_init(sc) != 0) {
 2073                 device_printf(sc->vr_dev,
 2074                     "initialization failed: no memory for rx buffers\n");
 2075                 vr_stop(sc);
 2076                 return;
 2077         }
 2078 
 2079         /* Init tx descriptors. */
 2080         vr_tx_ring_init(sc);
 2081 
 2082         if ((sc->vr_quirks & VR_Q_CAM) != 0) {
 2083                 uint8_t vcam[2] = { 0, 0 };
 2084 
 2085                 /* Disable VLAN hardware tag insertion/stripping. */
 2086                 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL);
 2087                 /* Disable VLAN hardware filtering. */
 2088                 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB);
 2089                 /* Disable all CAM entries. */
 2090                 vr_cam_mask(sc, VR_MCAST_CAM, 0);
 2091                 vr_cam_mask(sc, VR_VLAN_CAM, 0);
 2092                 /* Enable the first VLAN CAM. */
 2093                 vr_cam_data(sc, VR_VLAN_CAM, 0, vcam);
 2094                 vr_cam_mask(sc, VR_VLAN_CAM, 1);
 2095         }
 2096 
 2097         /*
 2098          * Set up receive filter.
 2099          */
 2100         vr_set_filter(sc);
 2101 
 2102         /*
 2103          * Load the address of the RX ring.
 2104          */
 2105         addr = VR_RX_RING_ADDR(sc, 0);
 2106         CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
 2107         /*
 2108          * Load the address of the TX ring.
 2109          */
 2110         addr = VR_TX_RING_ADDR(sc, 0);
 2111         CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
 2112         /* Default : full-duplex, no Tx poll. */
 2113         CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL);
 2114 
 2115         /* Set flow-control parameters for Rhine III. */
 2116         if (sc->vr_revid >= REV_ID_VT6105_A0) {
 2117                 /*
 2118                  * Configure Rx buffer count available for incoming
 2119                  * packet.
 2120                  * Even though data sheet says almost nothing about
 2121                  * this register, this register should be updated
 2122                  * whenever driver adds new RX buffers to controller.
 2123                  * Otherwise, XON frame is not sent to link partner
 2124                  * even if controller has enough RX buffers and you
 2125                  * would be isolated from network.
 2126                  * The controller is not smart enough to know number
 2127                  * of available RX buffers so driver have to let
 2128                  * controller know how many RX buffers are posted.
 2129                  * In other words, this register works like a residue
 2130                  * counter for RX buffers and should be initialized
 2131                  * to the number of total RX buffers  - 1 before
 2132                  * enabling RX MAC.  Note, this register is 8bits so
 2133                  * it effectively limits the maximum number of RX
 2134                  * buffer to be configured by controller is 255.
 2135                  */
 2136                 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1);
 2137                 /*
 2138                  * Tx pause low threshold : 8 free receive buffers
 2139                  * Tx pause XON high threshold : 24 free receive buffers
 2140                  */
 2141                 CSR_WRITE_1(sc, VR_FLOWCR1,
 2142                     VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF);
 2143                 /* Set Tx pause timer. */
 2144                 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff);
 2145         }
 2146 
 2147         /* Enable receiver and transmitter. */
 2148         CSR_WRITE_1(sc, VR_CR0,
 2149             VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO);
 2150 
 2151         CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
 2152 #ifdef DEVICE_POLLING
 2153         /*
 2154          * Disable interrupts if we are polling.
 2155          */
 2156         if (ifp->if_capenable & IFCAP_POLLING)
 2157                 CSR_WRITE_2(sc, VR_IMR, 0);
 2158         else
 2159 #endif
 2160         /*
 2161          * Enable interrupts and disable MII intrs.
 2162          */
 2163         CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
 2164         if (sc->vr_revid > REV_ID_VT6102_A)
 2165                 CSR_WRITE_2(sc, VR_MII_IMR, 0);
 2166 
 2167         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2168         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2169 
 2170         sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
 2171         mii_mediachg(mii);
 2172 
 2173         callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
 2174 }
 2175 
 2176 /*
 2177  * Set media options.
 2178  */
 2179 static int
 2180 vr_ifmedia_upd(struct ifnet *ifp)
 2181 {
 2182         struct vr_softc         *sc;
 2183         struct mii_data         *mii;
 2184         struct mii_softc        *miisc;
 2185         int                     error;
 2186 
 2187         sc = ifp->if_softc;
 2188         VR_LOCK(sc);
 2189         mii = device_get_softc(sc->vr_miibus);
 2190         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
 2191                 PHY_RESET(miisc);
 2192         sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
 2193         error = mii_mediachg(mii);
 2194         VR_UNLOCK(sc);
 2195 
 2196         return (error);
 2197 }
 2198 
 2199 /*
 2200  * Report current media status.
 2201  */
 2202 static void
 2203 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 2204 {
 2205         struct vr_softc         *sc;
 2206         struct mii_data         *mii;
 2207 
 2208         sc = ifp->if_softc;
 2209         mii = device_get_softc(sc->vr_miibus);
 2210         VR_LOCK(sc);
 2211         if ((ifp->if_flags & IFF_UP) == 0) {
 2212                 VR_UNLOCK(sc);
 2213                 return;
 2214         }
 2215         mii_pollstat(mii);
 2216         ifmr->ifm_active = mii->mii_media_active;
 2217         ifmr->ifm_status = mii->mii_media_status;
 2218         VR_UNLOCK(sc);
 2219 }
 2220 
 2221 static int
 2222 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 2223 {
 2224         struct vr_softc         *sc;
 2225         struct ifreq            *ifr;
 2226         struct mii_data         *mii;
 2227         int                     error, mask;
 2228 
 2229         sc = ifp->if_softc;
 2230         ifr = (struct ifreq *)data;
 2231         error = 0;
 2232 
 2233         switch (command) {
 2234         case SIOCSIFFLAGS:
 2235                 VR_LOCK(sc);
 2236                 if (ifp->if_flags & IFF_UP) {
 2237                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2238                                 if ((ifp->if_flags ^ sc->vr_if_flags) &
 2239                                     (IFF_PROMISC | IFF_ALLMULTI))
 2240                                         vr_set_filter(sc);
 2241                         } else {
 2242                                 if ((sc->vr_flags & VR_F_DETACHED) == 0)
 2243                                         vr_init_locked(sc);
 2244                         }
 2245                 } else {
 2246                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2247                                 vr_stop(sc);
 2248                 }
 2249                 sc->vr_if_flags = ifp->if_flags;
 2250                 VR_UNLOCK(sc);
 2251                 break;
 2252         case SIOCADDMULTI:
 2253         case SIOCDELMULTI:
 2254                 VR_LOCK(sc);
 2255                 vr_set_filter(sc);
 2256                 VR_UNLOCK(sc);
 2257                 break;
 2258         case SIOCGIFMEDIA:
 2259         case SIOCSIFMEDIA:
 2260                 mii = device_get_softc(sc->vr_miibus);
 2261                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
 2262                 break;
 2263         case SIOCSIFCAP:
 2264                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 2265 #ifdef DEVICE_POLLING
 2266                 if (mask & IFCAP_POLLING) {
 2267                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
 2268                                 error = ether_poll_register(vr_poll, ifp);
 2269                                 if (error != 0)
 2270                                         break;
 2271                                 VR_LOCK(sc);
 2272                                 /* Disable interrupts. */
 2273                                 CSR_WRITE_2(sc, VR_IMR, 0x0000);
 2274                                 ifp->if_capenable |= IFCAP_POLLING;
 2275                                 VR_UNLOCK(sc);
 2276                         } else {
 2277                                 error = ether_poll_deregister(ifp);
 2278                                 /* Enable interrupts. */
 2279                                 VR_LOCK(sc);
 2280                                 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
 2281                                 ifp->if_capenable &= ~IFCAP_POLLING;
 2282                                 VR_UNLOCK(sc);
 2283                         }
 2284                 }
 2285 #endif /* DEVICE_POLLING */
 2286                 if ((mask & IFCAP_TXCSUM) != 0 &&
 2287                     (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
 2288                         ifp->if_capenable ^= IFCAP_TXCSUM;
 2289                         if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
 2290                                 ifp->if_hwassist |= VR_CSUM_FEATURES;
 2291                         else
 2292                                 ifp->if_hwassist &= ~VR_CSUM_FEATURES;
 2293                 }
 2294                 if ((mask & IFCAP_RXCSUM) != 0 &&
 2295                     (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
 2296                         ifp->if_capenable ^= IFCAP_RXCSUM;
 2297                 if ((mask & IFCAP_WOL_UCAST) != 0 &&
 2298                     (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
 2299                         ifp->if_capenable ^= IFCAP_WOL_UCAST;
 2300                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 2301                     (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
 2302                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 2303                 break;
 2304         default:
 2305                 error = ether_ioctl(ifp, command, data);
 2306                 break;
 2307         }
 2308 
 2309         return (error);
 2310 }
 2311 
 2312 static void
 2313 vr_watchdog(struct vr_softc *sc)
 2314 {
 2315         struct ifnet            *ifp;
 2316 
 2317         VR_LOCK_ASSERT(sc);
 2318 
 2319         if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer)
 2320                 return;
 2321 
 2322         ifp = sc->vr_ifp;
 2323         /*
 2324          * Reclaim first as we don't request interrupt for every packets.
 2325          */
 2326         vr_txeof(sc);
 2327         if (sc->vr_cdata.vr_tx_cnt == 0)
 2328                 return;
 2329 
 2330         if ((sc->vr_flags & VR_F_LINK) == 0) {
 2331                 if (bootverbose)
 2332                         if_printf(sc->vr_ifp, "watchdog timeout "
 2333                            "(missed link)\n");
 2334                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2335                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2336                 vr_init_locked(sc);
 2337                 return;
 2338         }
 2339 
 2340         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2341         if_printf(ifp, "watchdog timeout\n");
 2342 
 2343         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2344         vr_init_locked(sc);
 2345 
 2346         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2347                 vr_start_locked(ifp);
 2348 }
 2349 
 2350 static void
 2351 vr_tx_start(struct vr_softc *sc)
 2352 {
 2353         bus_addr_t      addr;
 2354         uint8_t         cmd;
 2355 
 2356         cmd = CSR_READ_1(sc, VR_CR0);
 2357         if ((cmd & VR_CR0_TX_ON) == 0) {
 2358                 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons);
 2359                 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
 2360                 cmd |= VR_CR0_TX_ON;
 2361                 CSR_WRITE_1(sc, VR_CR0, cmd);
 2362         }
 2363         if (sc->vr_cdata.vr_tx_cnt != 0) {
 2364                 sc->vr_watchdog_timer = 5;
 2365                 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
 2366         }
 2367 }
 2368 
 2369 static void
 2370 vr_rx_start(struct vr_softc *sc)
 2371 {
 2372         bus_addr_t      addr;
 2373         uint8_t         cmd;
 2374 
 2375         cmd = CSR_READ_1(sc, VR_CR0);
 2376         if ((cmd & VR_CR0_RX_ON) == 0) {
 2377                 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons);
 2378                 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
 2379                 cmd |= VR_CR0_RX_ON;
 2380                 CSR_WRITE_1(sc, VR_CR0, cmd);
 2381         }
 2382         CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO);
 2383 }
 2384 
 2385 static int
 2386 vr_tx_stop(struct vr_softc *sc)
 2387 {
 2388         int             i;
 2389         uint8_t         cmd;
 2390 
 2391         cmd = CSR_READ_1(sc, VR_CR0);
 2392         if ((cmd & VR_CR0_TX_ON) != 0) {
 2393                 cmd &= ~VR_CR0_TX_ON;
 2394                 CSR_WRITE_1(sc, VR_CR0, cmd);
 2395                 for (i = VR_TIMEOUT; i > 0; i--) {
 2396                         DELAY(5);
 2397                         cmd = CSR_READ_1(sc, VR_CR0);
 2398                         if ((cmd & VR_CR0_TX_ON) == 0)
 2399                                 break;
 2400                 }
 2401                 if (i == 0)
 2402                         return (ETIMEDOUT);
 2403         }
 2404         return (0);
 2405 }
 2406 
 2407 static int
 2408 vr_rx_stop(struct vr_softc *sc)
 2409 {
 2410         int             i;
 2411         uint8_t         cmd;
 2412 
 2413         cmd = CSR_READ_1(sc, VR_CR0);
 2414         if ((cmd & VR_CR0_RX_ON) != 0) {
 2415                 cmd &= ~VR_CR0_RX_ON;
 2416                 CSR_WRITE_1(sc, VR_CR0, cmd);
 2417                 for (i = VR_TIMEOUT; i > 0; i--) {
 2418                         DELAY(5);
 2419                         cmd = CSR_READ_1(sc, VR_CR0);
 2420                         if ((cmd & VR_CR0_RX_ON) == 0)
 2421                                 break;
 2422                 }
 2423                 if (i == 0)
 2424                         return (ETIMEDOUT);
 2425         }
 2426         return (0);
 2427 }
 2428 
 2429 /*
 2430  * Stop the adapter and free any mbufs allocated to the
 2431  * RX and TX lists.
 2432  */
 2433 static void
 2434 vr_stop(struct vr_softc *sc)
 2435 {
 2436         struct vr_txdesc        *txd;
 2437         struct vr_rxdesc        *rxd;
 2438         struct ifnet            *ifp;
 2439         int                     i;
 2440 
 2441         VR_LOCK_ASSERT(sc);
 2442 
 2443         ifp = sc->vr_ifp;
 2444         sc->vr_watchdog_timer = 0;
 2445 
 2446         callout_stop(&sc->vr_stat_callout);
 2447         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 2448 
 2449         CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP);
 2450         if (vr_rx_stop(sc) != 0)
 2451                 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__);
 2452         if (vr_tx_stop(sc) != 0)
 2453                 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__);
 2454         /* Clear pending interrupts. */
 2455         CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
 2456         CSR_WRITE_2(sc, VR_IMR, 0x0000);
 2457         CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
 2458         CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
 2459 
 2460         /*
 2461          * Free RX and TX mbufs still in the queues.
 2462          */
 2463         for (i = 0; i < VR_RX_RING_CNT; i++) {
 2464                 rxd = &sc->vr_cdata.vr_rxdesc[i];
 2465                 if (rxd->rx_m != NULL) {
 2466                         bus_dmamap_sync(sc->vr_cdata.vr_rx_tag,
 2467                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 2468                         bus_dmamap_unload(sc->vr_cdata.vr_rx_tag,
 2469                             rxd->rx_dmamap);
 2470                         m_freem(rxd->rx_m);
 2471                         rxd->rx_m = NULL;
 2472                 }
 2473         }
 2474         for (i = 0; i < VR_TX_RING_CNT; i++) {
 2475                 txd = &sc->vr_cdata.vr_txdesc[i];
 2476                 if (txd->tx_m != NULL) {
 2477                         bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
 2478                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 2479                         bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
 2480                             txd->tx_dmamap);
 2481                         m_freem(txd->tx_m);
 2482                         txd->tx_m = NULL;
 2483                 }
 2484         }
 2485 }
 2486 
 2487 /*
 2488  * Stop all chip I/O so that the kernel's probe routines don't
 2489  * get confused by errant DMAs when rebooting.
 2490  */
 2491 static int
 2492 vr_shutdown(device_t dev)
 2493 {
 2494 
 2495         return (vr_suspend(dev));
 2496 }
 2497 
 2498 static int
 2499 vr_suspend(device_t dev)
 2500 {
 2501         struct vr_softc         *sc;
 2502 
 2503         sc = device_get_softc(dev);
 2504 
 2505         VR_LOCK(sc);
 2506         vr_stop(sc);
 2507         vr_setwol(sc);
 2508         sc->vr_flags |= VR_F_SUSPENDED;
 2509         VR_UNLOCK(sc);
 2510 
 2511         return (0);
 2512 }
 2513 
 2514 static int
 2515 vr_resume(device_t dev)
 2516 {
 2517         struct vr_softc         *sc;
 2518         struct ifnet            *ifp;
 2519 
 2520         sc = device_get_softc(dev);
 2521 
 2522         VR_LOCK(sc);
 2523         ifp = sc->vr_ifp;
 2524         vr_clrwol(sc);
 2525         vr_reset(sc);
 2526         if (ifp->if_flags & IFF_UP)
 2527                 vr_init_locked(sc);
 2528 
 2529         sc->vr_flags &= ~VR_F_SUSPENDED;
 2530         VR_UNLOCK(sc);
 2531 
 2532         return (0);
 2533 }
 2534 
 2535 static void
 2536 vr_setwol(struct vr_softc *sc)
 2537 {
 2538         struct ifnet            *ifp;
 2539         int                     pmc;
 2540         uint16_t                pmstat;
 2541         uint8_t                 v;
 2542 
 2543         VR_LOCK_ASSERT(sc);
 2544 
 2545         if (sc->vr_revid < REV_ID_VT6102_A ||
 2546             pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0)
 2547                 return;
 2548 
 2549         ifp = sc->vr_ifp;
 2550 
 2551         /* Clear WOL configuration. */
 2552         CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
 2553         CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
 2554         CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
 2555         CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
 2556         if (sc->vr_revid > REV_ID_VT6105_B0) {
 2557                 /* Newer Rhine III supports two additional patterns. */
 2558                 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
 2559                 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
 2560                 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
 2561         }
 2562         if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
 2563                 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST);
 2564         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 2565                 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC);
 2566         /*
 2567          * It seems that multicast wakeup frames require programming pattern
 2568          * registers and valid CRC as well as pattern mask for each pattern.
 2569          * While it's possible to setup such a pattern it would complicate
 2570          * WOL configuration so ignore multicast wakeup frames.
 2571          */
 2572         if ((ifp->if_capenable & IFCAP_WOL) != 0) {
 2573                 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
 2574                 v = CSR_READ_1(sc, VR_STICKHW);
 2575                 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB);
 2576                 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN);
 2577         }
 2578 
 2579         /* Put hardware into sleep. */
 2580         v = CSR_READ_1(sc, VR_STICKHW);
 2581         v |= VR_STICKHW_DS0 | VR_STICKHW_DS1;
 2582         CSR_WRITE_1(sc, VR_STICKHW, v);
 2583 
 2584         /* Request PME if WOL is requested. */
 2585         pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2);
 2586         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 2587         if ((ifp->if_capenable & IFCAP_WOL) != 0)
 2588                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 2589         pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 2590 }
 2591 
 2592 static void
 2593 vr_clrwol(struct vr_softc *sc)
 2594 {
 2595         uint8_t                 v;
 2596 
 2597         VR_LOCK_ASSERT(sc);
 2598 
 2599         if (sc->vr_revid < REV_ID_VT6102_A)
 2600                 return;
 2601 
 2602         /* Take hardware out of sleep. */
 2603         v = CSR_READ_1(sc, VR_STICKHW);
 2604         v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB);
 2605         CSR_WRITE_1(sc, VR_STICKHW, v);
 2606 
 2607         /* Clear WOL configuration as WOL may interfere normal operation. */
 2608         CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
 2609         CSR_WRITE_1(sc, VR_WOLCFG_CLR,
 2610             VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR);
 2611         CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
 2612         CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
 2613         if (sc->vr_revid > REV_ID_VT6105_B0) {
 2614                 /* Newer Rhine III supports two additional patterns. */
 2615                 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
 2616                 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
 2617                 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
 2618         }
 2619 }
 2620 
 2621 static int
 2622 vr_sysctl_stats(SYSCTL_HANDLER_ARGS)
 2623 {
 2624         struct vr_softc         *sc;
 2625         struct vr_statistics    *stat;
 2626         int                     error;
 2627         int                     result;
 2628 
 2629         result = -1;
 2630         error = sysctl_handle_int(oidp, &result, 0, req);
 2631 
 2632         if (error != 0 || req->newptr == NULL)
 2633                 return (error);
 2634 
 2635         if (result == 1) {
 2636                 sc = (struct vr_softc *)arg1;
 2637                 stat = &sc->vr_stat;
 2638 
 2639                 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev));
 2640                 printf("Outbound good frames : %ju\n",
 2641                     (uintmax_t)stat->tx_ok);
 2642                 printf("Inbound good frames : %ju\n",
 2643                     (uintmax_t)stat->rx_ok);
 2644                 printf("Outbound errors : %u\n", stat->tx_errors);
 2645                 printf("Inbound errors : %u\n", stat->rx_errors);
 2646                 printf("Inbound no buffers : %u\n", stat->rx_no_buffers);
 2647                 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs);
 2648                 printf("Inbound FIFO overflows : %d\n",
 2649                     stat->rx_fifo_overflows);
 2650                 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors);
 2651                 printf("Inbound frame alignment errors : %u\n",
 2652                     stat->rx_alignment);
 2653                 printf("Inbound giant frames : %u\n", stat->rx_giants);
 2654                 printf("Inbound runt frames : %u\n", stat->rx_runts);
 2655                 printf("Outbound aborted with excessive collisions : %u\n",
 2656                     stat->tx_abort);
 2657                 printf("Outbound collisions : %u\n", stat->tx_collisions);
 2658                 printf("Outbound late collisions : %u\n",
 2659                     stat->tx_late_collisions);
 2660                 printf("Outbound underrun : %u\n", stat->tx_underrun);
 2661                 printf("PCI bus errors : %u\n", stat->bus_errors);
 2662                 printf("driver restarted due to Rx/Tx shutdown failure : %u\n",
 2663                     stat->num_restart);
 2664         }
 2665 
 2666         return (error);
 2667 }

Cache object: 6c2ea98eba273953a159c7bb685b62a7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.