The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/vge/if_vge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-4-Clause
    3  *
    4  * Copyright (c) 2004
    5  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. All advertising materials mentioning features or use of this software
   16  *    must display the following acknowledgement:
   17  *      This product includes software developed by Bill Paul.
   18  * 4. Neither the name of the author nor the names of any co-contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   32  * THE POSSIBILITY OF SUCH DAMAGE.
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD$");
   37 
   38 /*
   39  * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
   40  *
   41  * Written by Bill Paul <wpaul@windriver.com>
   42  * Senior Networking Software Engineer
   43  * Wind River Systems
   44  */
   45 
   46 /*
   47  * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
   48  * combines a tri-speed ethernet MAC and PHY, with the following
   49  * features:
   50  *
   51  *      o Jumbo frame support up to 16K
   52  *      o Transmit and receive flow control
   53  *      o IPv4 checksum offload
   54  *      o VLAN tag insertion and stripping
   55  *      o TCP large send
   56  *      o 64-bit multicast hash table filter
   57  *      o 64 entry CAM filter
   58  *      o 16K RX FIFO and 48K TX FIFO memory
   59  *      o Interrupt moderation
   60  *
   61  * The VT6122 supports up to four transmit DMA queues. The descriptors
   62  * in the transmit ring can address up to 7 data fragments; frames which
   63  * span more than 7 data buffers must be coalesced, but in general the
   64  * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
   65  * long. The receive descriptors address only a single buffer.
   66  *
   67  * There are two peculiar design issues with the VT6122. One is that
   68  * receive data buffers must be aligned on a 32-bit boundary. This is
   69  * not a problem where the VT6122 is used as a LOM device in x86-based
   70  * systems, but on architectures that generate unaligned access traps, we
   71  * have to do some copying.
   72  *
   73  * The other issue has to do with the way 64-bit addresses are handled.
   74  * The DMA descriptors only allow you to specify 48 bits of addressing
   75  * information. The remaining 16 bits are specified using one of the
   76  * I/O registers. If you only have a 32-bit system, then this isn't
   77  * an issue, but if you have a 64-bit system and more than 4GB of
   78  * memory, you must have to make sure your network data buffers reside
   79  * in the same 48-bit 'segment.'
   80  *
   81  * Special thanks to Ryan Fu at VIA Networking for providing documentation
   82  * and sample NICs for testing.
   83  */
   84 
   85 #ifdef HAVE_KERNEL_OPTION_HEADERS
   86 #include "opt_device_polling.h"
   87 #endif
   88 
   89 #include <sys/param.h>
   90 #include <sys/endian.h>
   91 #include <sys/systm.h>
   92 #include <sys/sockio.h>
   93 #include <sys/mbuf.h>
   94 #include <sys/malloc.h>
   95 #include <sys/module.h>
   96 #include <sys/kernel.h>
   97 #include <sys/socket.h>
   98 #include <sys/sysctl.h>
   99 
  100 #include <net/if.h>
  101 #include <net/if_arp.h>
  102 #include <net/ethernet.h>
  103 #include <net/if_dl.h>
  104 #include <net/if_var.h>
  105 #include <net/if_media.h>
  106 #include <net/if_types.h>
  107 #include <net/if_vlan_var.h>
  108 
  109 #include <net/bpf.h>
  110 
  111 #include <machine/bus.h>
  112 #include <machine/resource.h>
  113 #include <sys/bus.h>
  114 #include <sys/rman.h>
  115 
  116 #include <dev/mii/mii.h>
  117 #include <dev/mii/miivar.h>
  118 
  119 #include <dev/pci/pcireg.h>
  120 #include <dev/pci/pcivar.h>
  121 
  122 MODULE_DEPEND(vge, pci, 1, 1, 1);
  123 MODULE_DEPEND(vge, ether, 1, 1, 1);
  124 MODULE_DEPEND(vge, miibus, 1, 1, 1);
  125 
  126 /* "device miibus" required.  See GENERIC if you get errors here. */
  127 #include "miibus_if.h"
  128 
  129 #include <dev/vge/if_vgereg.h>
  130 #include <dev/vge/if_vgevar.h>
  131 
  132 #define VGE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
  133 
  134 /* Tunables */
  135 static int msi_disable = 0;
  136 TUNABLE_INT("hw.vge.msi_disable", &msi_disable);
  137 
  138 /*
  139  * The SQE error counter of MIB seems to report bogus value.
  140  * Vendor's workaround does not seem to work on PCIe based
  141  * controllers. Disable it until we find better workaround.
  142  */
  143 #undef VGE_ENABLE_SQEERR
  144 
  145 /*
  146  * Various supported device vendors/types and their names.
  147  */
  148 static struct vge_type vge_devs[] = {
  149         { VIA_VENDORID, VIA_DEVICEID_61XX,
  150                 "VIA Networking Velocity Gigabit Ethernet" },
  151         { 0, 0, NULL }
  152 };
  153 
  154 static int      vge_attach(device_t);
  155 static int      vge_detach(device_t);
  156 static int      vge_probe(device_t);
  157 static int      vge_resume(device_t);
  158 static int      vge_shutdown(device_t);
  159 static int      vge_suspend(device_t);
  160 
  161 static void     vge_cam_clear(struct vge_softc *);
  162 static int      vge_cam_set(struct vge_softc *, uint8_t *);
  163 static void     vge_clrwol(struct vge_softc *);
  164 static void     vge_discard_rxbuf(struct vge_softc *, int);
  165 static int      vge_dma_alloc(struct vge_softc *);
  166 static void     vge_dma_free(struct vge_softc *);
  167 static void     vge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  168 #ifdef VGE_EEPROM
  169 static void     vge_eeprom_getword(struct vge_softc *, int, uint16_t *);
  170 #endif
  171 static int      vge_encap(struct vge_softc *, struct mbuf **);
  172 #ifndef __NO_STRICT_ALIGNMENT
  173 static __inline void
  174                 vge_fixup_rx(struct mbuf *);
  175 #endif
  176 static void     vge_freebufs(struct vge_softc *);
  177 static void     vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  178 static int      vge_ifmedia_upd(struct ifnet *);
  179 static int      vge_ifmedia_upd_locked(struct vge_softc *);
  180 static void     vge_init(void *);
  181 static void     vge_init_locked(struct vge_softc *);
  182 static void     vge_intr(void *);
  183 static void     vge_intr_holdoff(struct vge_softc *);
  184 static int      vge_ioctl(struct ifnet *, u_long, caddr_t);
  185 static void     vge_link_statchg(void *);
  186 static int      vge_miibus_readreg(device_t, int, int);
  187 static int      vge_miibus_writereg(device_t, int, int, int);
  188 static void     vge_miipoll_start(struct vge_softc *);
  189 static void     vge_miipoll_stop(struct vge_softc *);
  190 static int      vge_newbuf(struct vge_softc *, int);
  191 static void     vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int);
  192 static void     vge_reset(struct vge_softc *);
  193 static int      vge_rx_list_init(struct vge_softc *);
  194 static int      vge_rxeof(struct vge_softc *, int);
  195 static void     vge_rxfilter(struct vge_softc *);
  196 static void     vge_setmedia(struct vge_softc *);
  197 static void     vge_setvlan(struct vge_softc *);
  198 static void     vge_setwol(struct vge_softc *);
  199 static void     vge_start(struct ifnet *);
  200 static void     vge_start_locked(struct ifnet *);
  201 static void     vge_stats_clear(struct vge_softc *);
  202 static void     vge_stats_update(struct vge_softc *);
  203 static void     vge_stop(struct vge_softc *);
  204 static void     vge_sysctl_node(struct vge_softc *);
  205 static int      vge_tx_list_init(struct vge_softc *);
  206 static void     vge_txeof(struct vge_softc *);
  207 static void     vge_watchdog(void *);
  208 
  209 static device_method_t vge_methods[] = {
  210         /* Device interface */
  211         DEVMETHOD(device_probe,         vge_probe),
  212         DEVMETHOD(device_attach,        vge_attach),
  213         DEVMETHOD(device_detach,        vge_detach),
  214         DEVMETHOD(device_suspend,       vge_suspend),
  215         DEVMETHOD(device_resume,        vge_resume),
  216         DEVMETHOD(device_shutdown,      vge_shutdown),
  217 
  218         /* MII interface */
  219         DEVMETHOD(miibus_readreg,       vge_miibus_readreg),
  220         DEVMETHOD(miibus_writereg,      vge_miibus_writereg),
  221 
  222         DEVMETHOD_END
  223 };
  224 
  225 static driver_t vge_driver = {
  226         "vge",
  227         vge_methods,
  228         sizeof(struct vge_softc)
  229 };
  230 
  231 DRIVER_MODULE(vge, pci, vge_driver, 0, 0);
  232 DRIVER_MODULE(miibus, vge, miibus_driver, 0, 0);
  233 
  234 #ifdef VGE_EEPROM
  235 /*
  236  * Read a word of data stored in the EEPROM at address 'addr.'
  237  */
  238 static void
  239 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest)
  240 {
  241         int i;
  242         uint16_t word = 0;
  243 
  244         /*
  245          * Enter EEPROM embedded programming mode. In order to
  246          * access the EEPROM at all, we first have to set the
  247          * EELOAD bit in the CHIPCFG2 register.
  248          */
  249         CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
  250         CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
  251 
  252         /* Select the address of the word we want to read */
  253         CSR_WRITE_1(sc, VGE_EEADDR, addr);
  254 
  255         /* Issue read command */
  256         CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
  257 
  258         /* Wait for the done bit to be set. */
  259         for (i = 0; i < VGE_TIMEOUT; i++) {
  260                 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
  261                         break;
  262         }
  263 
  264         if (i == VGE_TIMEOUT) {
  265                 device_printf(sc->vge_dev, "EEPROM read timed out\n");
  266                 *dest = 0;
  267                 return;
  268         }
  269 
  270         /* Read the result */
  271         word = CSR_READ_2(sc, VGE_EERDDAT);
  272 
  273         /* Turn off EEPROM access mode. */
  274         CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
  275         CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
  276 
  277         *dest = word;
  278 }
  279 #endif
  280 
  281 /*
  282  * Read a sequence of words from the EEPROM.
  283  */
  284 static void
  285 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap)
  286 {
  287         int i;
  288 #ifdef VGE_EEPROM
  289         uint16_t word = 0, *ptr;
  290 
  291         for (i = 0; i < cnt; i++) {
  292                 vge_eeprom_getword(sc, off + i, &word);
  293                 ptr = (uint16_t *)(dest + (i * 2));
  294                 if (swap)
  295                         *ptr = ntohs(word);
  296                 else
  297                         *ptr = word;
  298         }
  299 #else
  300         for (i = 0; i < ETHER_ADDR_LEN; i++)
  301                 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
  302 #endif
  303 }
  304 
  305 static void
  306 vge_miipoll_stop(struct vge_softc *sc)
  307 {
  308         int i;
  309 
  310         CSR_WRITE_1(sc, VGE_MIICMD, 0);
  311 
  312         for (i = 0; i < VGE_TIMEOUT; i++) {
  313                 DELAY(1);
  314                 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
  315                         break;
  316         }
  317 
  318         if (i == VGE_TIMEOUT)
  319                 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
  320 }
  321 
  322 static void
  323 vge_miipoll_start(struct vge_softc *sc)
  324 {
  325         int i;
  326 
  327         /* First, make sure we're idle. */
  328 
  329         CSR_WRITE_1(sc, VGE_MIICMD, 0);
  330         CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
  331 
  332         for (i = 0; i < VGE_TIMEOUT; i++) {
  333                 DELAY(1);
  334                 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
  335                         break;
  336         }
  337 
  338         if (i == VGE_TIMEOUT) {
  339                 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
  340                 return;
  341         }
  342 
  343         /* Now enable auto poll mode. */
  344 
  345         CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
  346 
  347         /* And make sure it started. */
  348 
  349         for (i = 0; i < VGE_TIMEOUT; i++) {
  350                 DELAY(1);
  351                 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
  352                         break;
  353         }
  354 
  355         if (i == VGE_TIMEOUT)
  356                 device_printf(sc->vge_dev, "failed to start MII autopoll\n");
  357 }
  358 
  359 static int
  360 vge_miibus_readreg(device_t dev, int phy, int reg)
  361 {
  362         struct vge_softc *sc;
  363         int i;
  364         uint16_t rval = 0;
  365 
  366         sc = device_get_softc(dev);
  367 
  368         vge_miipoll_stop(sc);
  369 
  370         /* Specify the register we want to read. */
  371         CSR_WRITE_1(sc, VGE_MIIADDR, reg);
  372 
  373         /* Issue read command. */
  374         CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
  375 
  376         /* Wait for the read command bit to self-clear. */
  377         for (i = 0; i < VGE_TIMEOUT; i++) {
  378                 DELAY(1);
  379                 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
  380                         break;
  381         }
  382 
  383         if (i == VGE_TIMEOUT)
  384                 device_printf(sc->vge_dev, "MII read timed out\n");
  385         else
  386                 rval = CSR_READ_2(sc, VGE_MIIDATA);
  387 
  388         vge_miipoll_start(sc);
  389 
  390         return (rval);
  391 }
  392 
  393 static int
  394 vge_miibus_writereg(device_t dev, int phy, int reg, int data)
  395 {
  396         struct vge_softc *sc;
  397         int i, rval = 0;
  398 
  399         sc = device_get_softc(dev);
  400 
  401         vge_miipoll_stop(sc);
  402 
  403         /* Specify the register we want to write. */
  404         CSR_WRITE_1(sc, VGE_MIIADDR, reg);
  405 
  406         /* Specify the data we want to write. */
  407         CSR_WRITE_2(sc, VGE_MIIDATA, data);
  408 
  409         /* Issue write command. */
  410         CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
  411 
  412         /* Wait for the write command bit to self-clear. */
  413         for (i = 0; i < VGE_TIMEOUT; i++) {
  414                 DELAY(1);
  415                 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
  416                         break;
  417         }
  418 
  419         if (i == VGE_TIMEOUT) {
  420                 device_printf(sc->vge_dev, "MII write timed out\n");
  421                 rval = EIO;
  422         }
  423 
  424         vge_miipoll_start(sc);
  425 
  426         return (rval);
  427 }
  428 
  429 static void
  430 vge_cam_clear(struct vge_softc *sc)
  431 {
  432         int i;
  433 
  434         /*
  435          * Turn off all the mask bits. This tells the chip
  436          * that none of the entries in the CAM filter are valid.
  437          * desired entries will be enabled as we fill the filter in.
  438          */
  439 
  440         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  441         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
  442         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
  443         for (i = 0; i < 8; i++)
  444                 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
  445 
  446         /* Clear the VLAN filter too. */
  447 
  448         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
  449         for (i = 0; i < 8; i++)
  450                 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
  451 
  452         CSR_WRITE_1(sc, VGE_CAMADDR, 0);
  453         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  454         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
  455 
  456         sc->vge_camidx = 0;
  457 }
  458 
  459 static int
  460 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
  461 {
  462         int i, error = 0;
  463 
  464         if (sc->vge_camidx == VGE_CAM_MAXADDRS)
  465                 return (ENOSPC);
  466 
  467         /* Select the CAM data page. */
  468         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  469         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
  470 
  471         /* Set the filter entry we want to update and enable writing. */
  472         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
  473 
  474         /* Write the address to the CAM registers */
  475         for (i = 0; i < ETHER_ADDR_LEN; i++)
  476                 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
  477 
  478         /* Issue a write command. */
  479         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
  480 
  481         /* Wake for it to clear. */
  482         for (i = 0; i < VGE_TIMEOUT; i++) {
  483                 DELAY(1);
  484                 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
  485                         break;
  486         }
  487 
  488         if (i == VGE_TIMEOUT) {
  489                 device_printf(sc->vge_dev, "setting CAM filter failed\n");
  490                 error = EIO;
  491                 goto fail;
  492         }
  493 
  494         /* Select the CAM mask page. */
  495         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  496         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
  497 
  498         /* Set the mask bit that enables this filter. */
  499         CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
  500             1<<(sc->vge_camidx & 7));
  501 
  502         sc->vge_camidx++;
  503 
  504 fail:
  505         /* Turn off access to CAM. */
  506         CSR_WRITE_1(sc, VGE_CAMADDR, 0);
  507         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  508         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
  509 
  510         return (error);
  511 }
  512 
  513 static void
  514 vge_setvlan(struct vge_softc *sc)
  515 {
  516         struct ifnet *ifp;
  517         uint8_t cfg;
  518 
  519         VGE_LOCK_ASSERT(sc);
  520 
  521         ifp = sc->vge_ifp;
  522         cfg = CSR_READ_1(sc, VGE_RXCFG);
  523         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
  524                 cfg |= VGE_VTAG_OPT2;
  525         else
  526                 cfg &= ~VGE_VTAG_OPT2;
  527         CSR_WRITE_1(sc, VGE_RXCFG, cfg);
  528 }
  529 
  530 static u_int
  531 vge_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
  532 {
  533         struct vge_softc *sc = arg;
  534 
  535         if (sc->vge_camidx == VGE_CAM_MAXADDRS)
  536                 return (0);
  537 
  538         (void )vge_cam_set(sc, LLADDR(sdl));
  539 
  540         return (1);
  541 }
  542 
  543 static u_int
  544 vge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
  545 {
  546         uint32_t h, *hashes = arg;
  547 
  548         h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
  549         if (h < 32)
  550                 hashes[0] |= (1 << h);
  551         else
  552                 hashes[1] |= (1 << (h - 32));
  553 
  554         return (1);
  555 }
  556 
  557 /*
  558  * Program the multicast filter. We use the 64-entry CAM filter
  559  * for perfect filtering. If there's more than 64 multicast addresses,
  560  * we use the hash filter instead.
  561  */
  562 static void
  563 vge_rxfilter(struct vge_softc *sc)
  564 {
  565         struct ifnet *ifp;
  566         uint32_t hashes[2];
  567         uint8_t rxcfg;
  568 
  569         VGE_LOCK_ASSERT(sc);
  570 
  571         /* First, zot all the multicast entries. */
  572         hashes[0] = 0;
  573         hashes[1] = 0;
  574 
  575         rxcfg = CSR_READ_1(sc, VGE_RXCTL);
  576         rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST |
  577             VGE_RXCTL_RX_PROMISC);
  578         /*
  579          * Always allow VLAN oversized frames and frames for
  580          * this host.
  581          */
  582         rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST;
  583 
  584         ifp = sc->vge_ifp;
  585         if ((ifp->if_flags & IFF_BROADCAST) != 0)
  586                 rxcfg |= VGE_RXCTL_RX_BCAST;
  587         if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
  588                 if ((ifp->if_flags & IFF_PROMISC) != 0)
  589                         rxcfg |= VGE_RXCTL_RX_PROMISC;
  590                 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
  591                         hashes[0] = 0xFFFFFFFF;
  592                         hashes[1] = 0xFFFFFFFF;
  593                 }
  594                 goto done;
  595         }
  596 
  597         vge_cam_clear(sc);
  598 
  599         /* Now program new ones */
  600         if_foreach_llmaddr(ifp, vge_set_maddr, sc);
  601 
  602         /* If there were too many addresses, use the hash filter. */
  603         if (sc->vge_camidx == VGE_CAM_MAXADDRS) {
  604                 vge_cam_clear(sc);
  605                  if_foreach_llmaddr(ifp, vge_hash_maddr, hashes);
  606         }
  607 
  608 done:
  609         if (hashes[0] != 0 || hashes[1] != 0)
  610                 rxcfg |= VGE_RXCTL_RX_MCAST;
  611         CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
  612         CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
  613         CSR_WRITE_1(sc, VGE_RXCTL, rxcfg);
  614 }
  615 
  616 static void
  617 vge_reset(struct vge_softc *sc)
  618 {
  619         int i;
  620 
  621         CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
  622 
  623         for (i = 0; i < VGE_TIMEOUT; i++) {
  624                 DELAY(5);
  625                 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
  626                         break;
  627         }
  628 
  629         if (i == VGE_TIMEOUT) {
  630                 device_printf(sc->vge_dev, "soft reset timed out\n");
  631                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
  632                 DELAY(2000);
  633         }
  634 
  635         DELAY(5000);
  636 }
  637 
  638 /*
  639  * Probe for a VIA gigabit chip. Check the PCI vendor and device
  640  * IDs against our list and return a device name if we find a match.
  641  */
  642 static int
  643 vge_probe(device_t dev)
  644 {
  645         struct vge_type *t;
  646 
  647         t = vge_devs;
  648 
  649         while (t->vge_name != NULL) {
  650                 if ((pci_get_vendor(dev) == t->vge_vid) &&
  651                     (pci_get_device(dev) == t->vge_did)) {
  652                         device_set_desc(dev, t->vge_name);
  653                         return (BUS_PROBE_DEFAULT);
  654                 }
  655                 t++;
  656         }
  657 
  658         return (ENXIO);
  659 }
  660 
  661 /*
  662  * Map a single buffer address.
  663  */
  664 
  665 struct vge_dmamap_arg {
  666         bus_addr_t      vge_busaddr;
  667 };
  668 
  669 static void
  670 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  671 {
  672         struct vge_dmamap_arg *ctx;
  673 
  674         if (error != 0)
  675                 return;
  676 
  677         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  678 
  679         ctx = (struct vge_dmamap_arg *)arg;
  680         ctx->vge_busaddr = segs[0].ds_addr;
  681 }
  682 
  683 static int
  684 vge_dma_alloc(struct vge_softc *sc)
  685 {
  686         struct vge_dmamap_arg ctx;
  687         struct vge_txdesc *txd;
  688         struct vge_rxdesc *rxd;
  689         bus_addr_t lowaddr, tx_ring_end, rx_ring_end;
  690         int error, i;
  691 
  692         /*
  693          * It seems old PCI controllers do not support DAC.  DAC
  694          * configuration can be enabled by accessing VGE_CHIPCFG3
  695          * register but honor EEPROM configuration instead of
  696          * blindly overriding DAC configuration.  PCIe based
  697          * controllers are supposed to support 64bit DMA so enable
  698          * 64bit DMA on these controllers.
  699          */
  700         if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
  701                 lowaddr = BUS_SPACE_MAXADDR;
  702         else
  703                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
  704 
  705 again:
  706         /* Create parent ring tag. */
  707         error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
  708             1, 0,                       /* algnmnt, boundary */
  709             lowaddr,                    /* lowaddr */
  710             BUS_SPACE_MAXADDR,          /* highaddr */
  711             NULL, NULL,                 /* filter, filterarg */
  712             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
  713             0,                          /* nsegments */
  714             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
  715             0,                          /* flags */
  716             NULL, NULL,                 /* lockfunc, lockarg */
  717             &sc->vge_cdata.vge_ring_tag);
  718         if (error != 0) {
  719                 device_printf(sc->vge_dev,
  720                     "could not create parent DMA tag.\n");
  721                 goto fail;
  722         }
  723 
  724         /* Create tag for Tx ring. */
  725         error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
  726             VGE_TX_RING_ALIGN, 0,       /* algnmnt, boundary */
  727             BUS_SPACE_MAXADDR,          /* lowaddr */
  728             BUS_SPACE_MAXADDR,          /* highaddr */
  729             NULL, NULL,                 /* filter, filterarg */
  730             VGE_TX_LIST_SZ,             /* maxsize */
  731             1,                          /* nsegments */
  732             VGE_TX_LIST_SZ,             /* maxsegsize */
  733             0,                          /* flags */
  734             NULL, NULL,                 /* lockfunc, lockarg */
  735             &sc->vge_cdata.vge_tx_ring_tag);
  736         if (error != 0) {
  737                 device_printf(sc->vge_dev,
  738                     "could not allocate Tx ring DMA tag.\n");
  739                 goto fail;
  740         }
  741 
  742         /* Create tag for Rx ring. */
  743         error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
  744             VGE_RX_RING_ALIGN, 0,       /* algnmnt, boundary */
  745             BUS_SPACE_MAXADDR,          /* lowaddr */
  746             BUS_SPACE_MAXADDR,          /* highaddr */
  747             NULL, NULL,                 /* filter, filterarg */
  748             VGE_RX_LIST_SZ,             /* maxsize */
  749             1,                          /* nsegments */
  750             VGE_RX_LIST_SZ,             /* maxsegsize */
  751             0,                          /* flags */
  752             NULL, NULL,                 /* lockfunc, lockarg */
  753             &sc->vge_cdata.vge_rx_ring_tag);
  754         if (error != 0) {
  755                 device_printf(sc->vge_dev,
  756                     "could not allocate Rx ring DMA tag.\n");
  757                 goto fail;
  758         }
  759 
  760         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
  761         error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
  762             (void **)&sc->vge_rdata.vge_tx_ring,
  763             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
  764             &sc->vge_cdata.vge_tx_ring_map);
  765         if (error != 0) {
  766                 device_printf(sc->vge_dev,
  767                     "could not allocate DMA'able memory for Tx ring.\n");
  768                 goto fail;
  769         }
  770 
  771         ctx.vge_busaddr = 0;
  772         error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
  773             sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
  774             VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
  775         if (error != 0 || ctx.vge_busaddr == 0) {
  776                 device_printf(sc->vge_dev,
  777                     "could not load DMA'able memory for Tx ring.\n");
  778                 goto fail;
  779         }
  780         sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
  781 
  782         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
  783         error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
  784             (void **)&sc->vge_rdata.vge_rx_ring,
  785             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
  786             &sc->vge_cdata.vge_rx_ring_map);
  787         if (error != 0) {
  788                 device_printf(sc->vge_dev,
  789                     "could not allocate DMA'able memory for Rx ring.\n");
  790                 goto fail;
  791         }
  792 
  793         ctx.vge_busaddr = 0;
  794         error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
  795             sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
  796             VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
  797         if (error != 0 || ctx.vge_busaddr == 0) {
  798                 device_printf(sc->vge_dev,
  799                     "could not load DMA'able memory for Rx ring.\n");
  800                 goto fail;
  801         }
  802         sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
  803 
  804         /* Tx/Rx descriptor queue should reside within 4GB boundary. */
  805         tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
  806         rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
  807         if ((VGE_ADDR_HI(tx_ring_end) !=
  808             VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
  809             (VGE_ADDR_HI(rx_ring_end) !=
  810             VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
  811             VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
  812                 device_printf(sc->vge_dev, "4GB boundary crossed, "
  813                     "switching to 32bit DMA address mode.\n");
  814                 vge_dma_free(sc);
  815                 /* Limit DMA address space to 32bit and try again. */
  816                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
  817                 goto again;
  818         }
  819 
  820         if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
  821                 lowaddr = VGE_BUF_DMA_MAXADDR;
  822         else
  823                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
  824         /* Create parent buffer tag. */
  825         error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
  826             1, 0,                       /* algnmnt, boundary */
  827             lowaddr,                    /* lowaddr */
  828             BUS_SPACE_MAXADDR,          /* highaddr */
  829             NULL, NULL,                 /* filter, filterarg */
  830             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
  831             0,                          /* nsegments */
  832             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
  833             0,                          /* flags */
  834             NULL, NULL,                 /* lockfunc, lockarg */
  835             &sc->vge_cdata.vge_buffer_tag);
  836         if (error != 0) {
  837                 device_printf(sc->vge_dev,
  838                     "could not create parent buffer DMA tag.\n");
  839                 goto fail;
  840         }
  841 
  842         /* Create tag for Tx buffers. */
  843         error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
  844             1, 0,                       /* algnmnt, boundary */
  845             BUS_SPACE_MAXADDR,          /* lowaddr */
  846             BUS_SPACE_MAXADDR,          /* highaddr */
  847             NULL, NULL,                 /* filter, filterarg */
  848             MCLBYTES * VGE_MAXTXSEGS,   /* maxsize */
  849             VGE_MAXTXSEGS,              /* nsegments */
  850             MCLBYTES,                   /* maxsegsize */
  851             0,                          /* flags */
  852             NULL, NULL,                 /* lockfunc, lockarg */
  853             &sc->vge_cdata.vge_tx_tag);
  854         if (error != 0) {
  855                 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
  856                 goto fail;
  857         }
  858 
  859         /* Create tag for Rx buffers. */
  860         error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
  861             VGE_RX_BUF_ALIGN, 0,        /* algnmnt, boundary */
  862             BUS_SPACE_MAXADDR,          /* lowaddr */
  863             BUS_SPACE_MAXADDR,          /* highaddr */
  864             NULL, NULL,                 /* filter, filterarg */
  865             MCLBYTES,                   /* maxsize */
  866             1,                          /* nsegments */
  867             MCLBYTES,                   /* maxsegsize */
  868             0,                          /* flags */
  869             NULL, NULL,                 /* lockfunc, lockarg */
  870             &sc->vge_cdata.vge_rx_tag);
  871         if (error != 0) {
  872                 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
  873                 goto fail;
  874         }
  875 
  876         /* Create DMA maps for Tx buffers. */
  877         for (i = 0; i < VGE_TX_DESC_CNT; i++) {
  878                 txd = &sc->vge_cdata.vge_txdesc[i];
  879                 txd->tx_m = NULL;
  880                 txd->tx_dmamap = NULL;
  881                 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
  882                     &txd->tx_dmamap);
  883                 if (error != 0) {
  884                         device_printf(sc->vge_dev,
  885                             "could not create Tx dmamap.\n");
  886                         goto fail;
  887                 }
  888         }
  889         /* Create DMA maps for Rx buffers. */
  890         if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
  891             &sc->vge_cdata.vge_rx_sparemap)) != 0) {
  892                 device_printf(sc->vge_dev,
  893                     "could not create spare Rx dmamap.\n");
  894                 goto fail;
  895         }
  896         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
  897                 rxd = &sc->vge_cdata.vge_rxdesc[i];
  898                 rxd->rx_m = NULL;
  899                 rxd->rx_dmamap = NULL;
  900                 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
  901                     &rxd->rx_dmamap);
  902                 if (error != 0) {
  903                         device_printf(sc->vge_dev,
  904                             "could not create Rx dmamap.\n");
  905                         goto fail;
  906                 }
  907         }
  908 
  909 fail:
  910         return (error);
  911 }
  912 
  913 static void
  914 vge_dma_free(struct vge_softc *sc)
  915 {
  916         struct vge_txdesc *txd;
  917         struct vge_rxdesc *rxd;
  918         int i;
  919 
  920         /* Tx ring. */
  921         if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
  922                 if (sc->vge_rdata.vge_tx_ring_paddr)
  923                         bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
  924                             sc->vge_cdata.vge_tx_ring_map);
  925                 if (sc->vge_rdata.vge_tx_ring)
  926                         bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
  927                             sc->vge_rdata.vge_tx_ring,
  928                             sc->vge_cdata.vge_tx_ring_map);
  929                 sc->vge_rdata.vge_tx_ring = NULL;
  930                 sc->vge_rdata.vge_tx_ring_paddr = 0;
  931                 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
  932                 sc->vge_cdata.vge_tx_ring_tag = NULL;
  933         }
  934         /* Rx ring. */
  935         if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
  936                 if (sc->vge_rdata.vge_rx_ring_paddr)
  937                         bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
  938                             sc->vge_cdata.vge_rx_ring_map);
  939                 if (sc->vge_rdata.vge_rx_ring)
  940                         bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
  941                             sc->vge_rdata.vge_rx_ring,
  942                             sc->vge_cdata.vge_rx_ring_map);
  943                 sc->vge_rdata.vge_rx_ring = NULL;
  944                 sc->vge_rdata.vge_rx_ring_paddr = 0;
  945                 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
  946                 sc->vge_cdata.vge_rx_ring_tag = NULL;
  947         }
  948         /* Tx buffers. */
  949         if (sc->vge_cdata.vge_tx_tag != NULL) {
  950                 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
  951                         txd = &sc->vge_cdata.vge_txdesc[i];
  952                         if (txd->tx_dmamap != NULL) {
  953                                 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
  954                                     txd->tx_dmamap);
  955                                 txd->tx_dmamap = NULL;
  956                         }
  957                 }
  958                 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
  959                 sc->vge_cdata.vge_tx_tag = NULL;
  960         }
  961         /* Rx buffers. */
  962         if (sc->vge_cdata.vge_rx_tag != NULL) {
  963                 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
  964                         rxd = &sc->vge_cdata.vge_rxdesc[i];
  965                         if (rxd->rx_dmamap != NULL) {
  966                                 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
  967                                     rxd->rx_dmamap);
  968                                 rxd->rx_dmamap = NULL;
  969                         }
  970                 }
  971                 if (sc->vge_cdata.vge_rx_sparemap != NULL) {
  972                         bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
  973                             sc->vge_cdata.vge_rx_sparemap);
  974                         sc->vge_cdata.vge_rx_sparemap = NULL;
  975                 }
  976                 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
  977                 sc->vge_cdata.vge_rx_tag = NULL;
  978         }
  979 
  980         if (sc->vge_cdata.vge_buffer_tag != NULL) {
  981                 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
  982                 sc->vge_cdata.vge_buffer_tag = NULL;
  983         }
  984         if (sc->vge_cdata.vge_ring_tag != NULL) {
  985                 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
  986                 sc->vge_cdata.vge_ring_tag = NULL;
  987         }
  988 }
  989 
  990 /*
  991  * Attach the interface. Allocate softc structures, do ifmedia
  992  * setup and ethernet/BPF attach.
  993  */
  994 static int
  995 vge_attach(device_t dev)
  996 {
  997         u_char eaddr[ETHER_ADDR_LEN];
  998         struct vge_softc *sc;
  999         struct ifnet *ifp;
 1000         int error = 0, cap, i, msic, rid;
 1001 
 1002         sc = device_get_softc(dev);
 1003         sc->vge_dev = dev;
 1004 
 1005         mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1006             MTX_DEF);
 1007         callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
 1008 
 1009         /*
 1010          * Map control/status registers.
 1011          */
 1012         pci_enable_busmaster(dev);
 1013 
 1014         rid = PCIR_BAR(1);
 1015         sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 1016             RF_ACTIVE);
 1017 
 1018         if (sc->vge_res == NULL) {
 1019                 device_printf(dev, "couldn't map ports/memory\n");
 1020                 error = ENXIO;
 1021                 goto fail;
 1022         }
 1023 
 1024         if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
 1025                 sc->vge_flags |= VGE_FLAG_PCIE;
 1026                 sc->vge_expcap = cap;
 1027         } else
 1028                 sc->vge_flags |= VGE_FLAG_JUMBO;
 1029         if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) {
 1030                 sc->vge_flags |= VGE_FLAG_PMCAP;
 1031                 sc->vge_pmcap = cap;
 1032         }
 1033         rid = 0;
 1034         msic = pci_msi_count(dev);
 1035         if (msi_disable == 0 && msic > 0) {
 1036                 msic = 1;
 1037                 if (pci_alloc_msi(dev, &msic) == 0) {
 1038                         if (msic == 1) {
 1039                                 sc->vge_flags |= VGE_FLAG_MSI;
 1040                                 device_printf(dev, "Using %d MSI message\n",
 1041                                     msic);
 1042                                 rid = 1;
 1043                         } else
 1044                                 pci_release_msi(dev);
 1045                 }
 1046         }
 1047 
 1048         /* Allocate interrupt */
 1049         sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 1050             ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE);
 1051         if (sc->vge_irq == NULL) {
 1052                 device_printf(dev, "couldn't map interrupt\n");
 1053                 error = ENXIO;
 1054                 goto fail;
 1055         }
 1056 
 1057         /* Reset the adapter. */
 1058         vge_reset(sc);
 1059         /* Reload EEPROM. */
 1060         CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
 1061         for (i = 0; i < VGE_TIMEOUT; i++) {
 1062                 DELAY(5);
 1063                 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
 1064                         break;
 1065         }
 1066         if (i == VGE_TIMEOUT)
 1067                 device_printf(dev, "EEPROM reload timed out\n");
 1068         /*
 1069          * Clear PACPI as EEPROM reload will set the bit. Otherwise
 1070          * MAC will receive magic packet which in turn confuses
 1071          * controller.
 1072          */
 1073         CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
 1074 
 1075         /*
 1076          * Get station address from the EEPROM.
 1077          */
 1078         vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
 1079         /*
 1080          * Save configured PHY address.
 1081          * It seems the PHY address of PCIe controllers just
 1082          * reflects media jump strapping status so we assume the
 1083          * internal PHY address of PCIe controller is at 1.
 1084          */
 1085         if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
 1086                 sc->vge_phyaddr = 1;
 1087         else
 1088                 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) &
 1089                     VGE_MIICFG_PHYADDR;
 1090         /* Clear WOL and take hardware from powerdown. */
 1091         vge_clrwol(sc);
 1092         vge_sysctl_node(sc);
 1093         error = vge_dma_alloc(sc);
 1094         if (error)
 1095                 goto fail;
 1096 
 1097         ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
 1098         if (ifp == NULL) {
 1099                 device_printf(dev, "can not if_alloc()\n");
 1100                 error = ENOSPC;
 1101                 goto fail;
 1102         }
 1103 
 1104         vge_miipoll_start(sc);
 1105         /* Do MII setup */
 1106         error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd,
 1107             vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY,
 1108             MIIF_DOPAUSE);
 1109         if (error != 0) {
 1110                 device_printf(dev, "attaching PHYs failed\n");
 1111                 goto fail;
 1112         }
 1113 
 1114         ifp->if_softc = sc;
 1115         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1116         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1117         ifp->if_ioctl = vge_ioctl;
 1118         ifp->if_capabilities = IFCAP_VLAN_MTU;
 1119         ifp->if_start = vge_start;
 1120         ifp->if_hwassist = VGE_CSUM_FEATURES;
 1121         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
 1122             IFCAP_VLAN_HWTAGGING;
 1123         if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0)
 1124                 ifp->if_capabilities |= IFCAP_WOL;
 1125         ifp->if_capenable = ifp->if_capabilities;
 1126 #ifdef DEVICE_POLLING
 1127         ifp->if_capabilities |= IFCAP_POLLING;
 1128 #endif
 1129         ifp->if_init = vge_init;
 1130         IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1);
 1131         ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1;
 1132         IFQ_SET_READY(&ifp->if_snd);
 1133 
 1134         /*
 1135          * Call MI attach routine.
 1136          */
 1137         ether_ifattach(ifp, eaddr);
 1138 
 1139         /* Tell the upper layer(s) we support long frames. */
 1140         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
 1141 
 1142         /* Hook interrupt last to avoid having to lock softc */
 1143         error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
 1144             NULL, vge_intr, sc, &sc->vge_intrhand);
 1145 
 1146         if (error) {
 1147                 device_printf(dev, "couldn't set up irq\n");
 1148                 ether_ifdetach(ifp);
 1149                 goto fail;
 1150         }
 1151 
 1152 fail:
 1153         if (error)
 1154                 vge_detach(dev);
 1155 
 1156         return (error);
 1157 }
 1158 
 1159 /*
 1160  * Shutdown hardware and free up resources. This can be called any
 1161  * time after the mutex has been initialized. It is called in both
 1162  * the error case in attach and the normal detach case so it needs
 1163  * to be careful about only freeing resources that have actually been
 1164  * allocated.
 1165  */
 1166 static int
 1167 vge_detach(device_t dev)
 1168 {
 1169         struct vge_softc *sc;
 1170         struct ifnet *ifp;
 1171 
 1172         sc = device_get_softc(dev);
 1173         KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
 1174         ifp = sc->vge_ifp;
 1175 
 1176 #ifdef DEVICE_POLLING
 1177         if (ifp->if_capenable & IFCAP_POLLING)
 1178                 ether_poll_deregister(ifp);
 1179 #endif
 1180 
 1181         /* These should only be active if attach succeeded */
 1182         if (device_is_attached(dev)) {
 1183                 ether_ifdetach(ifp);
 1184                 VGE_LOCK(sc);
 1185                 vge_stop(sc);
 1186                 VGE_UNLOCK(sc);
 1187                 callout_drain(&sc->vge_watchdog);
 1188         }
 1189         if (sc->vge_miibus)
 1190                 device_delete_child(dev, sc->vge_miibus);
 1191         bus_generic_detach(dev);
 1192 
 1193         if (sc->vge_intrhand)
 1194                 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
 1195         if (sc->vge_irq)
 1196                 bus_release_resource(dev, SYS_RES_IRQ,
 1197                     sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq);
 1198         if (sc->vge_flags & VGE_FLAG_MSI)
 1199                 pci_release_msi(dev);
 1200         if (sc->vge_res)
 1201                 bus_release_resource(dev, SYS_RES_MEMORY,
 1202                     PCIR_BAR(1), sc->vge_res);
 1203         if (ifp)
 1204                 if_free(ifp);
 1205 
 1206         vge_dma_free(sc);
 1207         mtx_destroy(&sc->vge_mtx);
 1208 
 1209         return (0);
 1210 }
 1211 
 1212 static void
 1213 vge_discard_rxbuf(struct vge_softc *sc, int prod)
 1214 {
 1215         struct vge_rxdesc *rxd;
 1216         int i;
 1217 
 1218         rxd = &sc->vge_cdata.vge_rxdesc[prod];
 1219         rxd->rx_desc->vge_sts = 0;
 1220         rxd->rx_desc->vge_ctl = 0;
 1221 
 1222         /*
 1223          * Note: the manual fails to document the fact that for
 1224          * proper opration, the driver needs to replentish the RX
 1225          * DMA ring 4 descriptors at a time (rather than one at a
 1226          * time, like most chips). We can allocate the new buffers
 1227          * but we should not set the OWN bits until we're ready
 1228          * to hand back 4 of them in one shot.
 1229          */
 1230         if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
 1231                 for (i = VGE_RXCHUNK; i > 0; i--) {
 1232                         rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
 1233                         rxd = rxd->rxd_prev;
 1234                 }
 1235                 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
 1236         }
 1237 }
 1238 
 1239 static int
 1240 vge_newbuf(struct vge_softc *sc, int prod)
 1241 {
 1242         struct vge_rxdesc *rxd;
 1243         struct mbuf *m;
 1244         bus_dma_segment_t segs[1];
 1245         bus_dmamap_t map;
 1246         int i, nsegs;
 1247 
 1248         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 1249         if (m == NULL)
 1250                 return (ENOBUFS);
 1251         /*
 1252          * This is part of an evil trick to deal with strict-alignment
 1253          * architectures. The VIA chip requires RX buffers to be aligned
 1254          * on 32-bit boundaries, but that will hose strict-alignment
 1255          * architectures. To get around this, we leave some empty space
 1256          * at the start of each buffer and for non-strict-alignment hosts,
 1257          * we copy the buffer back two bytes to achieve word alignment.
 1258          * This is slightly more efficient than allocating a new buffer,
 1259          * copying the contents, and discarding the old buffer.
 1260          */
 1261         m->m_len = m->m_pkthdr.len = MCLBYTES;
 1262         m_adj(m, VGE_RX_BUF_ALIGN);
 1263 
 1264         if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
 1265             sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
 1266                 m_freem(m);
 1267                 return (ENOBUFS);
 1268         }
 1269         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1270 
 1271         rxd = &sc->vge_cdata.vge_rxdesc[prod];
 1272         if (rxd->rx_m != NULL) {
 1273                 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
 1274                     BUS_DMASYNC_POSTREAD);
 1275                 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
 1276         }
 1277         map = rxd->rx_dmamap;
 1278         rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
 1279         sc->vge_cdata.vge_rx_sparemap = map;
 1280         bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
 1281             BUS_DMASYNC_PREREAD);
 1282         rxd->rx_m = m;
 1283 
 1284         rxd->rx_desc->vge_sts = 0;
 1285         rxd->rx_desc->vge_ctl = 0;
 1286         rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
 1287         rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
 1288             (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
 1289 
 1290         /*
 1291          * Note: the manual fails to document the fact that for
 1292          * proper operation, the driver needs to replenish the RX
 1293          * DMA ring 4 descriptors at a time (rather than one at a
 1294          * time, like most chips). We can allocate the new buffers
 1295          * but we should not set the OWN bits until we're ready
 1296          * to hand back 4 of them in one shot.
 1297          */
 1298         if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
 1299                 for (i = VGE_RXCHUNK; i > 0; i--) {
 1300                         rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
 1301                         rxd = rxd->rxd_prev;
 1302                 }
 1303                 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
 1304         }
 1305 
 1306         return (0);
 1307 }
 1308 
 1309 static int
 1310 vge_tx_list_init(struct vge_softc *sc)
 1311 {
 1312         struct vge_ring_data *rd;
 1313         struct vge_txdesc *txd;
 1314         int i;
 1315 
 1316         VGE_LOCK_ASSERT(sc);
 1317 
 1318         sc->vge_cdata.vge_tx_prodidx = 0;
 1319         sc->vge_cdata.vge_tx_considx = 0;
 1320         sc->vge_cdata.vge_tx_cnt = 0;
 1321 
 1322         rd = &sc->vge_rdata;
 1323         bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
 1324         for (i = 0; i < VGE_TX_DESC_CNT; i++) {
 1325                 txd = &sc->vge_cdata.vge_txdesc[i];
 1326                 txd->tx_m = NULL;
 1327                 txd->tx_desc = &rd->vge_tx_ring[i];
 1328         }
 1329 
 1330         bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
 1331             sc->vge_cdata.vge_tx_ring_map,
 1332             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1333 
 1334         return (0);
 1335 }
 1336 
 1337 static int
 1338 vge_rx_list_init(struct vge_softc *sc)
 1339 {
 1340         struct vge_ring_data *rd;
 1341         struct vge_rxdesc *rxd;
 1342         int i;
 1343 
 1344         VGE_LOCK_ASSERT(sc);
 1345 
 1346         sc->vge_cdata.vge_rx_prodidx = 0;
 1347         sc->vge_cdata.vge_head = NULL;
 1348         sc->vge_cdata.vge_tail = NULL;
 1349         sc->vge_cdata.vge_rx_commit = 0;
 1350 
 1351         rd = &sc->vge_rdata;
 1352         bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
 1353         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
 1354                 rxd = &sc->vge_cdata.vge_rxdesc[i];
 1355                 rxd->rx_m = NULL;
 1356                 rxd->rx_desc = &rd->vge_rx_ring[i];
 1357                 if (i == 0)
 1358                         rxd->rxd_prev =
 1359                             &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
 1360                 else
 1361                         rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
 1362                 if (vge_newbuf(sc, i) != 0)
 1363                         return (ENOBUFS);
 1364         }
 1365 
 1366         bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
 1367             sc->vge_cdata.vge_rx_ring_map,
 1368             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1369 
 1370         sc->vge_cdata.vge_rx_commit = 0;
 1371 
 1372         return (0);
 1373 }
 1374 
 1375 static void
 1376 vge_freebufs(struct vge_softc *sc)
 1377 {
 1378         struct vge_txdesc *txd;
 1379         struct vge_rxdesc *rxd;
 1380         struct ifnet *ifp;
 1381         int i;
 1382 
 1383         VGE_LOCK_ASSERT(sc);
 1384 
 1385         ifp = sc->vge_ifp;
 1386         /*
 1387          * Free RX and TX mbufs still in the queues.
 1388          */
 1389         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
 1390                 rxd = &sc->vge_cdata.vge_rxdesc[i];
 1391                 if (rxd->rx_m != NULL) {
 1392                         bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
 1393                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 1394                         bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
 1395                             rxd->rx_dmamap);
 1396                         m_freem(rxd->rx_m);
 1397                         rxd->rx_m = NULL;
 1398                 }
 1399         }
 1400 
 1401         for (i = 0; i < VGE_TX_DESC_CNT; i++) {
 1402                 txd = &sc->vge_cdata.vge_txdesc[i];
 1403                 if (txd->tx_m != NULL) {
 1404                         bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
 1405                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 1406                         bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
 1407                             txd->tx_dmamap);
 1408                         m_freem(txd->tx_m);
 1409                         txd->tx_m = NULL;
 1410                         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 1411                 }
 1412         }
 1413 }
 1414 
 1415 #ifndef __NO_STRICT_ALIGNMENT
 1416 static __inline void
 1417 vge_fixup_rx(struct mbuf *m)
 1418 {
 1419         int i;
 1420         uint16_t *src, *dst;
 1421 
 1422         src = mtod(m, uint16_t *);
 1423         dst = src - 1;
 1424 
 1425         for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
 1426                 *dst++ = *src++;
 1427 
 1428         m->m_data -= ETHER_ALIGN;
 1429 }
 1430 #endif
 1431 
 1432 /*
 1433  * RX handler. We support the reception of jumbo frames that have
 1434  * been fragmented across multiple 2K mbuf cluster buffers.
 1435  */
 1436 static int
 1437 vge_rxeof(struct vge_softc *sc, int count)
 1438 {
 1439         struct mbuf *m;
 1440         struct ifnet *ifp;
 1441         int prod, prog, total_len;
 1442         struct vge_rxdesc *rxd;
 1443         struct vge_rx_desc *cur_rx;
 1444         uint32_t rxstat, rxctl;
 1445 
 1446         VGE_LOCK_ASSERT(sc);
 1447 
 1448         ifp = sc->vge_ifp;
 1449 
 1450         bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
 1451             sc->vge_cdata.vge_rx_ring_map,
 1452             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1453 
 1454         prod = sc->vge_cdata.vge_rx_prodidx;
 1455         for (prog = 0; count > 0 &&
 1456             (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
 1457             VGE_RX_DESC_INC(prod)) {
 1458                 cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
 1459                 rxstat = le32toh(cur_rx->vge_sts);
 1460                 if ((rxstat & VGE_RDSTS_OWN) != 0)
 1461                         break;
 1462                 count--;
 1463                 prog++;
 1464                 rxctl = le32toh(cur_rx->vge_ctl);
 1465                 total_len = VGE_RXBYTES(rxstat);
 1466                 rxd = &sc->vge_cdata.vge_rxdesc[prod];
 1467                 m = rxd->rx_m;
 1468 
 1469                 /*
 1470                  * If the 'start of frame' bit is set, this indicates
 1471                  * either the first fragment in a multi-fragment receive,
 1472                  * or an intermediate fragment. Either way, we want to
 1473                  * accumulate the buffers.
 1474                  */
 1475                 if ((rxstat & VGE_RXPKT_SOF) != 0) {
 1476                         if (vge_newbuf(sc, prod) != 0) {
 1477                                 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 1478                                 VGE_CHAIN_RESET(sc);
 1479                                 vge_discard_rxbuf(sc, prod);
 1480                                 continue;
 1481                         }
 1482                         m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
 1483                         if (sc->vge_cdata.vge_head == NULL) {
 1484                                 sc->vge_cdata.vge_head = m;
 1485                                 sc->vge_cdata.vge_tail = m;
 1486                         } else {
 1487                                 m->m_flags &= ~M_PKTHDR;
 1488                                 sc->vge_cdata.vge_tail->m_next = m;
 1489                                 sc->vge_cdata.vge_tail = m;
 1490                         }
 1491                         continue;
 1492                 }
 1493 
 1494                 /*
 1495                  * Bad/error frames will have the RXOK bit cleared.
 1496                  * However, there's one error case we want to allow:
 1497                  * if a VLAN tagged frame arrives and the chip can't
 1498                  * match it against the CAM filter, it considers this
 1499                  * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
 1500                  * We don't want to drop the frame though: our VLAN
 1501                  * filtering is done in software.
 1502                  * We also want to receive bad-checksummed frames and
 1503                  * and frames with bad-length.
 1504                  */
 1505                 if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
 1506                     (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
 1507                     VGE_RDSTS_CSUMERR)) == 0) {
 1508                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 1509                         /*
 1510                          * If this is part of a multi-fragment packet,
 1511                          * discard all the pieces.
 1512                          */
 1513                         VGE_CHAIN_RESET(sc);
 1514                         vge_discard_rxbuf(sc, prod);
 1515                         continue;
 1516                 }
 1517 
 1518                 if (vge_newbuf(sc, prod) != 0) {
 1519                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 1520                         VGE_CHAIN_RESET(sc);
 1521                         vge_discard_rxbuf(sc, prod);
 1522                         continue;
 1523                 }
 1524 
 1525                 /* Chain received mbufs. */
 1526                 if (sc->vge_cdata.vge_head != NULL) {
 1527                         m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
 1528                         /*
 1529                          * Special case: if there's 4 bytes or less
 1530                          * in this buffer, the mbuf can be discarded:
 1531                          * the last 4 bytes is the CRC, which we don't
 1532                          * care about anyway.
 1533                          */
 1534                         if (m->m_len <= ETHER_CRC_LEN) {
 1535                                 sc->vge_cdata.vge_tail->m_len -=
 1536                                     (ETHER_CRC_LEN - m->m_len);
 1537                                 m_freem(m);
 1538                         } else {
 1539                                 m->m_len -= ETHER_CRC_LEN;
 1540                                 m->m_flags &= ~M_PKTHDR;
 1541                                 sc->vge_cdata.vge_tail->m_next = m;
 1542                         }
 1543                         m = sc->vge_cdata.vge_head;
 1544                         m->m_flags |= M_PKTHDR;
 1545                         m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
 1546                 } else {
 1547                         m->m_flags |= M_PKTHDR;
 1548                         m->m_pkthdr.len = m->m_len =
 1549                             (total_len - ETHER_CRC_LEN);
 1550                 }
 1551 
 1552 #ifndef __NO_STRICT_ALIGNMENT
 1553                 vge_fixup_rx(m);
 1554 #endif
 1555                 m->m_pkthdr.rcvif = ifp;
 1556 
 1557                 /* Do RX checksumming if enabled */
 1558                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
 1559                     (rxctl & VGE_RDCTL_FRAG) == 0) {
 1560                         /* Check IP header checksum */
 1561                         if ((rxctl & VGE_RDCTL_IPPKT) != 0)
 1562                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1563                         if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
 1564                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1565 
 1566                         /* Check TCP/UDP checksum */
 1567                         if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
 1568                             rxctl & VGE_RDCTL_PROTOCSUMOK) {
 1569                                 m->m_pkthdr.csum_flags |=
 1570                                     CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
 1571                                 m->m_pkthdr.csum_data = 0xffff;
 1572                         }
 1573                 }
 1574 
 1575                 if ((rxstat & VGE_RDSTS_VTAG) != 0) {
 1576                         /*
 1577                          * The 32-bit rxctl register is stored in little-endian.
 1578                          * However, the 16-bit vlan tag is stored in big-endian,
 1579                          * so we have to byte swap it.
 1580                          */
 1581                         m->m_pkthdr.ether_vtag =
 1582                             bswap16(rxctl & VGE_RDCTL_VLANID);
 1583                         m->m_flags |= M_VLANTAG;
 1584                 }
 1585 
 1586                 VGE_UNLOCK(sc);
 1587                 (*ifp->if_input)(ifp, m);
 1588                 VGE_LOCK(sc);
 1589                 sc->vge_cdata.vge_head = NULL;
 1590                 sc->vge_cdata.vge_tail = NULL;
 1591         }
 1592 
 1593         if (prog > 0) {
 1594                 sc->vge_cdata.vge_rx_prodidx = prod;
 1595                 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
 1596                     sc->vge_cdata.vge_rx_ring_map,
 1597                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1598                 /* Update residue counter. */
 1599                 if (sc->vge_cdata.vge_rx_commit != 0) {
 1600                         CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
 1601                             sc->vge_cdata.vge_rx_commit);
 1602                         sc->vge_cdata.vge_rx_commit = 0;
 1603                 }
 1604         }
 1605         return (prog);
 1606 }
 1607 
 1608 static void
 1609 vge_txeof(struct vge_softc *sc)
 1610 {
 1611         struct ifnet *ifp;
 1612         struct vge_tx_desc *cur_tx;
 1613         struct vge_txdesc *txd;
 1614         uint32_t txstat;
 1615         int cons, prod;
 1616 
 1617         VGE_LOCK_ASSERT(sc);
 1618 
 1619         ifp = sc->vge_ifp;
 1620 
 1621         if (sc->vge_cdata.vge_tx_cnt == 0)
 1622                 return;
 1623 
 1624         bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
 1625             sc->vge_cdata.vge_tx_ring_map,
 1626             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1627 
 1628         /*
 1629          * Go through our tx list and free mbufs for those
 1630          * frames that have been transmitted.
 1631          */
 1632         cons = sc->vge_cdata.vge_tx_considx;
 1633         prod = sc->vge_cdata.vge_tx_prodidx;
 1634         for (; cons != prod; VGE_TX_DESC_INC(cons)) {
 1635                 cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
 1636                 txstat = le32toh(cur_tx->vge_sts);
 1637                 if ((txstat & VGE_TDSTS_OWN) != 0)
 1638                         break;
 1639                 sc->vge_cdata.vge_tx_cnt--;
 1640                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1641 
 1642                 txd = &sc->vge_cdata.vge_txdesc[cons];
 1643                 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
 1644                     BUS_DMASYNC_POSTWRITE);
 1645                 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
 1646 
 1647                 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
 1648                     __func__));
 1649                 m_freem(txd->tx_m);
 1650                 txd->tx_m = NULL;
 1651                 txd->tx_desc->vge_frag[0].vge_addrhi = 0;
 1652         }
 1653         bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
 1654             sc->vge_cdata.vge_tx_ring_map,
 1655             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1656         sc->vge_cdata.vge_tx_considx = cons;
 1657         if (sc->vge_cdata.vge_tx_cnt == 0)
 1658                 sc->vge_timer = 0;
 1659 }
 1660 
 1661 static void
 1662 vge_link_statchg(void *xsc)
 1663 {
 1664         struct vge_softc *sc;
 1665         struct ifnet *ifp;
 1666         uint8_t physts;
 1667 
 1668         sc = xsc;
 1669         ifp = sc->vge_ifp;
 1670         VGE_LOCK_ASSERT(sc);
 1671 
 1672         physts = CSR_READ_1(sc, VGE_PHYSTS0);
 1673         if ((physts & VGE_PHYSTS_RESETSTS) == 0) {
 1674                 if ((physts & VGE_PHYSTS_LINK) == 0) {
 1675                         sc->vge_flags &= ~VGE_FLAG_LINK;
 1676                         if_link_state_change(sc->vge_ifp,
 1677                             LINK_STATE_DOWN);
 1678                 } else {
 1679                         sc->vge_flags |= VGE_FLAG_LINK;
 1680                         if_link_state_change(sc->vge_ifp,
 1681                             LINK_STATE_UP);
 1682                         CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE |
 1683                             VGE_CR2_FDX_RXFLOWCTL_ENABLE);
 1684                         if ((physts & VGE_PHYSTS_FDX) != 0) {
 1685                                 if ((physts & VGE_PHYSTS_TXFLOWCAP) != 0)
 1686                                         CSR_WRITE_1(sc, VGE_CRS2,
 1687                                             VGE_CR2_FDX_TXFLOWCTL_ENABLE);
 1688                                 if ((physts & VGE_PHYSTS_RXFLOWCAP) != 0)
 1689                                         CSR_WRITE_1(sc, VGE_CRS2,
 1690                                             VGE_CR2_FDX_RXFLOWCTL_ENABLE);
 1691                         }
 1692                         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1693                                 vge_start_locked(ifp);
 1694                 }
 1695         }
 1696         /*
 1697          * Restart MII auto-polling because link state change interrupt
 1698          * will disable it.
 1699          */
 1700         vge_miipoll_start(sc);
 1701 }
 1702 
 1703 #ifdef DEVICE_POLLING
 1704 static int
 1705 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
 1706 {
 1707         struct vge_softc *sc = ifp->if_softc;
 1708         int rx_npkts = 0;
 1709 
 1710         VGE_LOCK(sc);
 1711         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1712                 goto done;
 1713 
 1714         rx_npkts = vge_rxeof(sc, count);
 1715         vge_txeof(sc);
 1716 
 1717         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1718                 vge_start_locked(ifp);
 1719 
 1720         if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
 1721                 uint32_t       status;
 1722                 status = CSR_READ_4(sc, VGE_ISR);
 1723                 if (status == 0xFFFFFFFF)
 1724                         goto done;
 1725                 if (status)
 1726                         CSR_WRITE_4(sc, VGE_ISR, status);
 1727 
 1728                 /*
 1729                  * XXX check behaviour on receiver stalls.
 1730                  */
 1731 
 1732                 if (status & VGE_ISR_TXDMA_STALL ||
 1733                     status & VGE_ISR_RXDMA_STALL) {
 1734                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1735                         vge_init_locked(sc);
 1736                 }
 1737 
 1738                 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
 1739                         vge_rxeof(sc, count);
 1740                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
 1741                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
 1742                 }
 1743         }
 1744 done:
 1745         VGE_UNLOCK(sc);
 1746         return (rx_npkts);
 1747 }
 1748 #endif /* DEVICE_POLLING */
 1749 
 1750 static void
 1751 vge_intr(void *arg)
 1752 {
 1753         struct vge_softc *sc;
 1754         struct ifnet *ifp;
 1755         uint32_t status;
 1756 
 1757         sc = arg;
 1758         VGE_LOCK(sc);
 1759 
 1760         ifp = sc->vge_ifp;
 1761         if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 ||
 1762             (ifp->if_flags & IFF_UP) == 0) {
 1763                 VGE_UNLOCK(sc);
 1764                 return;
 1765         }
 1766 
 1767 #ifdef DEVICE_POLLING
 1768         if  (ifp->if_capenable & IFCAP_POLLING) {
 1769                 status = CSR_READ_4(sc, VGE_ISR);
 1770                 CSR_WRITE_4(sc, VGE_ISR, status);
 1771                 if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0)
 1772                         vge_link_statchg(sc);
 1773                 VGE_UNLOCK(sc);
 1774                 return;
 1775         }
 1776 #endif
 1777 
 1778         /* Disable interrupts */
 1779         CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
 1780         status = CSR_READ_4(sc, VGE_ISR);
 1781         CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD);
 1782         /* If the card has gone away the read returns 0xffff. */
 1783         if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0)
 1784                 goto done;
 1785         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1786                 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
 1787                         vge_rxeof(sc, VGE_RX_DESC_CNT);
 1788                 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
 1789                         vge_rxeof(sc, VGE_RX_DESC_CNT);
 1790                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
 1791                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
 1792                 }
 1793 
 1794                 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO))
 1795                         vge_txeof(sc);
 1796 
 1797                 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
 1798                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1799                         vge_init_locked(sc);
 1800                 }
 1801 
 1802                 if (status & VGE_ISR_LINKSTS)
 1803                         vge_link_statchg(sc);
 1804         }
 1805 done:
 1806         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1807                 /* Re-enable interrupts */
 1808                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
 1809 
 1810                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1811                         vge_start_locked(ifp);
 1812         }
 1813         VGE_UNLOCK(sc);
 1814 }
 1815 
 1816 static int
 1817 vge_encap(struct vge_softc *sc, struct mbuf **m_head)
 1818 {
 1819         struct vge_txdesc *txd;
 1820         struct vge_tx_frag *frag;
 1821         struct mbuf *m;
 1822         bus_dma_segment_t txsegs[VGE_MAXTXSEGS];
 1823         int error, i, nsegs, padlen;
 1824         uint32_t cflags;
 1825 
 1826         VGE_LOCK_ASSERT(sc);
 1827 
 1828         M_ASSERTPKTHDR((*m_head));
 1829 
 1830         /* Argh. This chip does not autopad short frames. */
 1831         if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
 1832                 m = *m_head;
 1833                 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
 1834                 if (M_WRITABLE(m) == 0) {
 1835                         /* Get a writable copy. */
 1836                         m = m_dup(*m_head, M_NOWAIT);
 1837                         m_freem(*m_head);
 1838                         if (m == NULL) {
 1839                                 *m_head = NULL;
 1840                                 return (ENOBUFS);
 1841                         }
 1842                         *m_head = m;
 1843                 }
 1844                 if (M_TRAILINGSPACE(m) < padlen) {
 1845                         m = m_defrag(m, M_NOWAIT);
 1846                         if (m == NULL) {
 1847                                 m_freem(*m_head);
 1848                                 *m_head = NULL;
 1849                                 return (ENOBUFS);
 1850                         }
 1851                 }
 1852                 /*
 1853                  * Manually pad short frames, and zero the pad space
 1854                  * to avoid leaking data.
 1855                  */
 1856                 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
 1857                 m->m_pkthdr.len += padlen;
 1858                 m->m_len = m->m_pkthdr.len;
 1859                 *m_head = m;
 1860         }
 1861 
 1862         txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
 1863 
 1864         error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
 1865             txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
 1866         if (error == EFBIG) {
 1867                 m = m_collapse(*m_head, M_NOWAIT, VGE_MAXTXSEGS);
 1868                 if (m == NULL) {
 1869                         m_freem(*m_head);
 1870                         *m_head = NULL;
 1871                         return (ENOMEM);
 1872                 }
 1873                 *m_head = m;
 1874                 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
 1875                     txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
 1876                 if (error != 0) {
 1877                         m_freem(*m_head);
 1878                         *m_head = NULL;
 1879                         return (error);
 1880                 }
 1881         } else if (error != 0)
 1882                 return (error);
 1883         bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
 1884             BUS_DMASYNC_PREWRITE);
 1885 
 1886         m = *m_head;
 1887         cflags = 0;
 1888 
 1889         /* Configure checksum offload. */
 1890         if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
 1891                 cflags |= VGE_TDCTL_IPCSUM;
 1892         if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
 1893                 cflags |= VGE_TDCTL_TCPCSUM;
 1894         if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
 1895                 cflags |= VGE_TDCTL_UDPCSUM;
 1896 
 1897         /* Configure VLAN. */
 1898         if ((m->m_flags & M_VLANTAG) != 0)
 1899                 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
 1900         txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
 1901         /*
 1902          * XXX
 1903          * Velocity family seems to support TSO but no information
 1904          * for MSS configuration is available. Also the number of
 1905          * fragments supported by a descriptor is too small to hold
 1906          * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
 1907          * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
 1908          * longer chain of buffers but no additional information is
 1909          * available.
 1910          *
 1911          * When telling the chip how many segments there are, we
 1912          * must use nsegs + 1 instead of just nsegs. Darned if I
 1913          * know why. This also means we can't use the last fragment
 1914          * field of Tx descriptor.
 1915          */
 1916         txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
 1917             VGE_TD_LS_NORM);
 1918         for (i = 0; i < nsegs; i++) {
 1919                 frag = &txd->tx_desc->vge_frag[i];
 1920                 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
 1921                 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
 1922                     (VGE_BUFLEN(txsegs[i].ds_len) << 16));
 1923         }
 1924 
 1925         sc->vge_cdata.vge_tx_cnt++;
 1926         VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
 1927 
 1928         /*
 1929          * Finally request interrupt and give the first descriptor
 1930          * ownership to hardware.
 1931          */
 1932         txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
 1933         txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
 1934         txd->tx_m = m;
 1935 
 1936         return (0);
 1937 }
 1938 
 1939 /*
 1940  * Main transmit routine.
 1941  */
 1942 
 1943 static void
 1944 vge_start(struct ifnet *ifp)
 1945 {
 1946         struct vge_softc *sc;
 1947 
 1948         sc = ifp->if_softc;
 1949         VGE_LOCK(sc);
 1950         vge_start_locked(ifp);
 1951         VGE_UNLOCK(sc);
 1952 }
 1953 
 1954 static void
 1955 vge_start_locked(struct ifnet *ifp)
 1956 {
 1957         struct vge_softc *sc;
 1958         struct vge_txdesc *txd;
 1959         struct mbuf *m_head;
 1960         int enq, idx;
 1961 
 1962         sc = ifp->if_softc;
 1963 
 1964         VGE_LOCK_ASSERT(sc);
 1965 
 1966         if ((sc->vge_flags & VGE_FLAG_LINK) == 0 ||
 1967             (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1968             IFF_DRV_RUNNING)
 1969                 return;
 1970 
 1971         idx = sc->vge_cdata.vge_tx_prodidx;
 1972         VGE_TX_DESC_DEC(idx);
 1973         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 1974             sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
 1975                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 1976                 if (m_head == NULL)
 1977                         break;
 1978                 /*
 1979                  * Pack the data into the transmit ring. If we
 1980                  * don't have room, set the OACTIVE flag and wait
 1981                  * for the NIC to drain the ring.
 1982                  */
 1983                 if (vge_encap(sc, &m_head)) {
 1984                         if (m_head == NULL)
 1985                                 break;
 1986                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 1987                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1988                         break;
 1989                 }
 1990 
 1991                 txd = &sc->vge_cdata.vge_txdesc[idx];
 1992                 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
 1993                 VGE_TX_DESC_INC(idx);
 1994 
 1995                 enq++;
 1996                 /*
 1997                  * If there's a BPF listener, bounce a copy of this frame
 1998                  * to him.
 1999                  */
 2000                 ETHER_BPF_MTAP(ifp, m_head);
 2001         }
 2002 
 2003         if (enq > 0) {
 2004                 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
 2005                     sc->vge_cdata.vge_tx_ring_map,
 2006                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2007                 /* Issue a transmit command. */
 2008                 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
 2009                 /*
 2010                  * Set a timeout in case the chip goes out to lunch.
 2011                  */
 2012                 sc->vge_timer = 5;
 2013         }
 2014 }
 2015 
 2016 static void
 2017 vge_init(void *xsc)
 2018 {
 2019         struct vge_softc *sc = xsc;
 2020 
 2021         VGE_LOCK(sc);
 2022         vge_init_locked(sc);
 2023         VGE_UNLOCK(sc);
 2024 }
 2025 
 2026 static void
 2027 vge_init_locked(struct vge_softc *sc)
 2028 {
 2029         struct ifnet *ifp = sc->vge_ifp;
 2030         int error, i;
 2031 
 2032         VGE_LOCK_ASSERT(sc);
 2033 
 2034         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2035                 return;
 2036 
 2037         /*
 2038          * Cancel pending I/O and free all RX/TX buffers.
 2039          */
 2040         vge_stop(sc);
 2041         vge_reset(sc);
 2042         vge_miipoll_start(sc);
 2043 
 2044         /*
 2045          * Initialize the RX and TX descriptors and mbufs.
 2046          */
 2047 
 2048         error = vge_rx_list_init(sc);
 2049         if (error != 0) {
 2050                 device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
 2051                 return;
 2052         }
 2053         vge_tx_list_init(sc);
 2054         /* Clear MAC statistics. */
 2055         vge_stats_clear(sc);
 2056         /* Set our station address */
 2057         for (i = 0; i < ETHER_ADDR_LEN; i++)
 2058                 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
 2059 
 2060         /*
 2061          * Set receive FIFO threshold. Also allow transmission and
 2062          * reception of VLAN tagged frames.
 2063          */
 2064         CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
 2065         CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES);
 2066 
 2067         /* Set DMA burst length */
 2068         CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
 2069         CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
 2070 
 2071         CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
 2072 
 2073         /* Set collision backoff algorithm */
 2074         CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
 2075             VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
 2076         CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
 2077 
 2078         /* Disable LPSEL field in priority resolution */
 2079         CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
 2080 
 2081         /*
 2082          * Load the addresses of the DMA queues into the chip.
 2083          * Note that we only use one transmit queue.
 2084          */
 2085 
 2086         CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
 2087             VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
 2088         CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
 2089             VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
 2090         CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
 2091 
 2092         CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
 2093             VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
 2094         CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
 2095         CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
 2096 
 2097         /* Configure interrupt moderation. */
 2098         vge_intr_holdoff(sc);
 2099 
 2100         /* Enable and wake up the RX descriptor queue */
 2101         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
 2102         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
 2103 
 2104         /* Enable the TX descriptor queue */
 2105         CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
 2106 
 2107         /* Init the cam filter. */
 2108         vge_cam_clear(sc);
 2109 
 2110         /* Set up receiver filter. */
 2111         vge_rxfilter(sc);
 2112         vge_setvlan(sc);
 2113 
 2114         /* Initialize pause timer. */
 2115         CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF);
 2116         /*
 2117          * Initialize flow control parameters.
 2118          *  TX XON high threshold : 48
 2119          *  TX pause low threshold : 24
 2120          *  Disable hald-duplex flow control
 2121          */
 2122         CSR_WRITE_1(sc, VGE_CRC2, 0xFF);
 2123         CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B);
 2124 
 2125         /* Enable jumbo frame reception (if desired) */
 2126 
 2127         /* Start the MAC. */
 2128         CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
 2129         CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
 2130         CSR_WRITE_1(sc, VGE_CRS0,
 2131             VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
 2132 
 2133 #ifdef DEVICE_POLLING
 2134         /*
 2135          * Disable interrupts except link state change if we are polling.
 2136          */
 2137         if (ifp->if_capenable & IFCAP_POLLING) {
 2138                 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
 2139         } else  /* otherwise ... */
 2140 #endif
 2141         {
 2142         /*
 2143          * Enable interrupts.
 2144          */
 2145                 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
 2146         }
 2147         CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
 2148         CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
 2149 
 2150         sc->vge_flags &= ~VGE_FLAG_LINK;
 2151         vge_ifmedia_upd_locked(sc);
 2152 
 2153         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2154         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2155         callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
 2156 }
 2157 
 2158 /*
 2159  * Set media options.
 2160  */
 2161 static int
 2162 vge_ifmedia_upd(struct ifnet *ifp)
 2163 {
 2164         struct vge_softc *sc;
 2165         int error;
 2166 
 2167         sc = ifp->if_softc;
 2168         VGE_LOCK(sc);
 2169         error = vge_ifmedia_upd_locked(sc);
 2170         VGE_UNLOCK(sc);
 2171 
 2172         return (error);
 2173 }
 2174 
 2175 static int
 2176 vge_ifmedia_upd_locked(struct vge_softc *sc)
 2177 {
 2178         struct mii_data *mii;
 2179         struct mii_softc *miisc;
 2180         int error;
 2181 
 2182         mii = device_get_softc(sc->vge_miibus);
 2183         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
 2184                 PHY_RESET(miisc);
 2185         vge_setmedia(sc);
 2186         error = mii_mediachg(mii);
 2187 
 2188         return (error);
 2189 }
 2190 
 2191 /*
 2192  * Report current media status.
 2193  */
 2194 static void
 2195 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 2196 {
 2197         struct vge_softc *sc;
 2198         struct mii_data *mii;
 2199 
 2200         sc = ifp->if_softc;
 2201         mii = device_get_softc(sc->vge_miibus);
 2202 
 2203         VGE_LOCK(sc);
 2204         if ((ifp->if_flags & IFF_UP) == 0) {
 2205                 VGE_UNLOCK(sc);
 2206                 return;
 2207         }
 2208         mii_pollstat(mii);
 2209         ifmr->ifm_active = mii->mii_media_active;
 2210         ifmr->ifm_status = mii->mii_media_status;
 2211         VGE_UNLOCK(sc);
 2212 }
 2213 
 2214 static void
 2215 vge_setmedia(struct vge_softc *sc)
 2216 {
 2217         struct mii_data *mii;
 2218         struct ifmedia_entry *ife;
 2219 
 2220         mii = device_get_softc(sc->vge_miibus);
 2221         ife = mii->mii_media.ifm_cur;
 2222 
 2223         /*
 2224          * If the user manually selects a media mode, we need to turn
 2225          * on the forced MAC mode bit in the DIAGCTL register. If the
 2226          * user happens to choose a full duplex mode, we also need to
 2227          * set the 'force full duplex' bit. This applies only to
 2228          * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
 2229          * mode is disabled, and in 1000baseT mode, full duplex is
 2230          * always implied, so we turn on the forced mode bit but leave
 2231          * the FDX bit cleared.
 2232          */
 2233 
 2234         switch (IFM_SUBTYPE(ife->ifm_media)) {
 2235         case IFM_AUTO:
 2236                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 2237                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 2238                 break;
 2239         case IFM_1000_T:
 2240                 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 2241                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 2242                 break;
 2243         case IFM_100_TX:
 2244         case IFM_10_T:
 2245                 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 2246                 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
 2247                         CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 2248                 } else {
 2249                         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 2250                 }
 2251                 break;
 2252         default:
 2253                 device_printf(sc->vge_dev, "unknown media type: %x\n",
 2254                     IFM_SUBTYPE(ife->ifm_media));
 2255                 break;
 2256         }
 2257 }
 2258 
 2259 static int
 2260 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 2261 {
 2262         struct vge_softc *sc = ifp->if_softc;
 2263         struct ifreq *ifr = (struct ifreq *) data;
 2264         struct mii_data *mii;
 2265         int error = 0, mask;
 2266 
 2267         switch (command) {
 2268         case SIOCSIFMTU:
 2269                 VGE_LOCK(sc);
 2270                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU)
 2271                         error = EINVAL;
 2272                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 2273                         if (ifr->ifr_mtu > ETHERMTU &&
 2274                             (sc->vge_flags & VGE_FLAG_JUMBO) == 0)
 2275                                 error = EINVAL;
 2276                         else
 2277                                 ifp->if_mtu = ifr->ifr_mtu;
 2278                 }
 2279                 VGE_UNLOCK(sc);
 2280                 break;
 2281         case SIOCSIFFLAGS:
 2282                 VGE_LOCK(sc);
 2283                 if ((ifp->if_flags & IFF_UP) != 0) {
 2284                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 2285                             ((ifp->if_flags ^ sc->vge_if_flags) &
 2286                             (IFF_PROMISC | IFF_ALLMULTI)) != 0)
 2287                                 vge_rxfilter(sc);
 2288                         else
 2289                                 vge_init_locked(sc);
 2290                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2291                         vge_stop(sc);
 2292                 sc->vge_if_flags = ifp->if_flags;
 2293                 VGE_UNLOCK(sc);
 2294                 break;
 2295         case SIOCADDMULTI:
 2296         case SIOCDELMULTI:
 2297                 VGE_LOCK(sc);
 2298                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2299                         vge_rxfilter(sc);
 2300                 VGE_UNLOCK(sc);
 2301                 break;
 2302         case SIOCGIFMEDIA:
 2303         case SIOCSIFMEDIA:
 2304                 mii = device_get_softc(sc->vge_miibus);
 2305                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
 2306                 break;
 2307         case SIOCSIFCAP:
 2308                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 2309 #ifdef DEVICE_POLLING
 2310                 if (mask & IFCAP_POLLING) {
 2311                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
 2312                                 error = ether_poll_register(vge_poll, ifp);
 2313                                 if (error)
 2314                                         return (error);
 2315                                 VGE_LOCK(sc);
 2316                                         /* Disable interrupts */
 2317                                 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
 2318                                 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
 2319                                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
 2320                                 ifp->if_capenable |= IFCAP_POLLING;
 2321                                 VGE_UNLOCK(sc);
 2322                         } else {
 2323                                 error = ether_poll_deregister(ifp);
 2324                                 /* Enable interrupts. */
 2325                                 VGE_LOCK(sc);
 2326                                 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
 2327                                 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
 2328                                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
 2329                                 ifp->if_capenable &= ~IFCAP_POLLING;
 2330                                 VGE_UNLOCK(sc);
 2331                         }
 2332                 }
 2333 #endif /* DEVICE_POLLING */
 2334                 VGE_LOCK(sc);
 2335                 if ((mask & IFCAP_TXCSUM) != 0 &&
 2336                     (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
 2337                         ifp->if_capenable ^= IFCAP_TXCSUM;
 2338                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 2339                                 ifp->if_hwassist |= VGE_CSUM_FEATURES;
 2340                         else
 2341                                 ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
 2342                 }
 2343                 if ((mask & IFCAP_RXCSUM) != 0 &&
 2344                     (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
 2345                         ifp->if_capenable ^= IFCAP_RXCSUM;
 2346                 if ((mask & IFCAP_WOL_UCAST) != 0 &&
 2347                     (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
 2348                         ifp->if_capenable ^= IFCAP_WOL_UCAST;
 2349                 if ((mask & IFCAP_WOL_MCAST) != 0 &&
 2350                     (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
 2351                         ifp->if_capenable ^= IFCAP_WOL_MCAST;
 2352                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 2353                     (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
 2354                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 2355                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
 2356                     (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
 2357                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 2358                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 2359                     (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
 2360                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 2361                         vge_setvlan(sc);
 2362                 }
 2363                 VGE_UNLOCK(sc);
 2364                 VLAN_CAPABILITIES(ifp);
 2365                 break;
 2366         default:
 2367                 error = ether_ioctl(ifp, command, data);
 2368                 break;
 2369         }
 2370 
 2371         return (error);
 2372 }
 2373 
 2374 static void
 2375 vge_watchdog(void *arg)
 2376 {
 2377         struct vge_softc *sc;
 2378         struct ifnet *ifp;
 2379 
 2380         sc = arg;
 2381         VGE_LOCK_ASSERT(sc);
 2382         vge_stats_update(sc);
 2383         callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
 2384         if (sc->vge_timer == 0 || --sc->vge_timer > 0)
 2385                 return;
 2386 
 2387         ifp = sc->vge_ifp;
 2388         if_printf(ifp, "watchdog timeout\n");
 2389         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2390 
 2391         vge_txeof(sc);
 2392         vge_rxeof(sc, VGE_RX_DESC_CNT);
 2393 
 2394         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2395         vge_init_locked(sc);
 2396 }
 2397 
 2398 /*
 2399  * Stop the adapter and free any mbufs allocated to the
 2400  * RX and TX lists.
 2401  */
 2402 static void
 2403 vge_stop(struct vge_softc *sc)
 2404 {
 2405         struct ifnet *ifp;
 2406 
 2407         VGE_LOCK_ASSERT(sc);
 2408         ifp = sc->vge_ifp;
 2409         sc->vge_timer = 0;
 2410         callout_stop(&sc->vge_watchdog);
 2411 
 2412         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 2413 
 2414         CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
 2415         CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
 2416         CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
 2417         CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
 2418         CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
 2419         CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
 2420 
 2421         vge_stats_update(sc);
 2422         VGE_CHAIN_RESET(sc);
 2423         vge_txeof(sc);
 2424         vge_freebufs(sc);
 2425 }
 2426 
 2427 /*
 2428  * Device suspend routine.  Stop the interface and save some PCI
 2429  * settings in case the BIOS doesn't restore them properly on
 2430  * resume.
 2431  */
 2432 static int
 2433 vge_suspend(device_t dev)
 2434 {
 2435         struct vge_softc *sc;
 2436 
 2437         sc = device_get_softc(dev);
 2438 
 2439         VGE_LOCK(sc);
 2440         vge_stop(sc);
 2441         vge_setwol(sc);
 2442         sc->vge_flags |= VGE_FLAG_SUSPENDED;
 2443         VGE_UNLOCK(sc);
 2444 
 2445         return (0);
 2446 }
 2447 
 2448 /*
 2449  * Device resume routine.  Restore some PCI settings in case the BIOS
 2450  * doesn't, re-enable busmastering, and restart the interface if
 2451  * appropriate.
 2452  */
 2453 static int
 2454 vge_resume(device_t dev)
 2455 {
 2456         struct vge_softc *sc;
 2457         struct ifnet *ifp;
 2458         uint16_t pmstat;
 2459 
 2460         sc = device_get_softc(dev);
 2461         VGE_LOCK(sc);
 2462         if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) {
 2463                 /* Disable PME and clear PME status. */
 2464                 pmstat = pci_read_config(sc->vge_dev,
 2465                     sc->vge_pmcap + PCIR_POWER_STATUS, 2);
 2466                 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
 2467                         pmstat &= ~PCIM_PSTAT_PMEENABLE;
 2468                         pci_write_config(sc->vge_dev,
 2469                             sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2);
 2470                 }
 2471         }
 2472         vge_clrwol(sc);
 2473         /* Restart MII auto-polling. */
 2474         vge_miipoll_start(sc);
 2475         ifp = sc->vge_ifp;
 2476         /* Reinitialize interface if necessary. */
 2477         if ((ifp->if_flags & IFF_UP) != 0) {
 2478                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2479                 vge_init_locked(sc);
 2480         }
 2481         sc->vge_flags &= ~VGE_FLAG_SUSPENDED;
 2482         VGE_UNLOCK(sc);
 2483 
 2484         return (0);
 2485 }
 2486 
 2487 /*
 2488  * Stop all chip I/O so that the kernel's probe routines don't
 2489  * get confused by errant DMAs when rebooting.
 2490  */
 2491 static int
 2492 vge_shutdown(device_t dev)
 2493 {
 2494 
 2495         return (vge_suspend(dev));
 2496 }
 2497 
 2498 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d)    \
 2499             SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
 2500 
 2501 static void
 2502 vge_sysctl_node(struct vge_softc *sc)
 2503 {
 2504         struct sysctl_ctx_list *ctx;
 2505         struct sysctl_oid_list *child, *parent;
 2506         struct sysctl_oid *tree;
 2507         struct vge_hw_stats *stats;
 2508 
 2509         stats = &sc->vge_stats;
 2510         ctx = device_get_sysctl_ctx(sc->vge_dev);
 2511         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev));
 2512 
 2513         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff",
 2514             CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff");
 2515         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt",
 2516             CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet");
 2517         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt",
 2518             CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet");
 2519 
 2520         /* Pull in device tunables. */
 2521         sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT;
 2522         resource_int_value(device_get_name(sc->vge_dev),
 2523             device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff);
 2524         sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT;
 2525         resource_int_value(device_get_name(sc->vge_dev),
 2526             device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt);
 2527         sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT;
 2528         resource_int_value(device_get_name(sc->vge_dev),
 2529             device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt);
 2530 
 2531         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
 2532             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VGE statistics");
 2533         parent = SYSCTL_CHILDREN(tree);
 2534 
 2535         /* Rx statistics. */
 2536         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
 2537             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics");
 2538         child = SYSCTL_CHILDREN(tree);
 2539         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames",
 2540             &stats->rx_frames, "frames");
 2541         VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
 2542             &stats->rx_good_frames, "Good frames");
 2543         VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
 2544             &stats->rx_fifo_oflows, "FIFO overflows");
 2545         VGE_SYSCTL_STAT_ADD32(ctx, child, "runts",
 2546             &stats->rx_runts, "Too short frames");
 2547         VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs",
 2548             &stats->rx_runts_errs, "Too short frames with errors");
 2549         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
 2550             &stats->rx_pkts_64, "64 bytes frames");
 2551         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
 2552             &stats->rx_pkts_65_127, "65 to 127 bytes frames");
 2553         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
 2554             &stats->rx_pkts_128_255, "128 to 255 bytes frames");
 2555         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
 2556             &stats->rx_pkts_256_511, "256 to 511 bytes frames");
 2557         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
 2558             &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
 2559         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
 2560             &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
 2561         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
 2562             &stats->rx_pkts_1519_max, "1519 to max frames");
 2563         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs",
 2564             &stats->rx_pkts_1519_max_errs, "1519 to max frames with error");
 2565         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
 2566             &stats->rx_jumbos, "Jumbo frames");
 2567         VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
 2568             &stats->rx_crcerrs, "CRC errors");
 2569         VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
 2570             &stats->rx_pause_frames, "CRC errors");
 2571         VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
 2572             &stats->rx_alignerrs, "Alignment errors");
 2573         VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs",
 2574             &stats->rx_nobufs, "Frames with no buffer event");
 2575         VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
 2576             &stats->rx_symerrs, "Frames with symbol errors");
 2577         VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
 2578             &stats->rx_lenerrs, "Frames with length mismatched");
 2579 
 2580         /* Tx statistics. */
 2581         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
 2582             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics");
 2583         child = SYSCTL_CHILDREN(tree);
 2584         VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
 2585             &stats->tx_good_frames, "Good frames");
 2586         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
 2587             &stats->tx_pkts_64, "64 bytes frames");
 2588         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
 2589             &stats->tx_pkts_65_127, "65 to 127 bytes frames");
 2590         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
 2591             &stats->tx_pkts_128_255, "128 to 255 bytes frames");
 2592         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
 2593             &stats->tx_pkts_256_511, "256 to 511 bytes frames");
 2594         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
 2595             &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
 2596         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
 2597             &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
 2598         VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
 2599             &stats->tx_jumbos, "Jumbo frames");
 2600         VGE_SYSCTL_STAT_ADD32(ctx, child, "colls",
 2601             &stats->tx_colls, "Collisions");
 2602         VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
 2603             &stats->tx_latecolls, "Late collisions");
 2604         VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
 2605             &stats->tx_pause, "Pause frames");
 2606 #ifdef VGE_ENABLE_SQEERR
 2607         VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs",
 2608             &stats->tx_sqeerrs, "SQE errors");
 2609 #endif
 2610         /* Clear MAC statistics. */
 2611         vge_stats_clear(sc);
 2612 }
 2613 
 2614 #undef  VGE_SYSCTL_STAT_ADD32
 2615 
 2616 static void
 2617 vge_stats_clear(struct vge_softc *sc)
 2618 {
 2619         int i;
 2620 
 2621         CSR_WRITE_1(sc, VGE_MIBCSR,
 2622             CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE);
 2623         CSR_WRITE_1(sc, VGE_MIBCSR,
 2624             CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR);
 2625         for (i = VGE_TIMEOUT; i > 0; i--) {
 2626                 DELAY(1);
 2627                 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0)
 2628                         break;
 2629         }
 2630         if (i == 0)
 2631                 device_printf(sc->vge_dev, "MIB clear timed out!\n");
 2632         CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) &
 2633             ~VGE_MIBCSR_FREEZE);
 2634 }
 2635 
 2636 static void
 2637 vge_stats_update(struct vge_softc *sc)
 2638 {
 2639         struct vge_hw_stats *stats;
 2640         struct ifnet *ifp;
 2641         uint32_t mib[VGE_MIB_CNT], val;
 2642         int i;
 2643 
 2644         VGE_LOCK_ASSERT(sc);
 2645 
 2646         stats = &sc->vge_stats;
 2647         ifp = sc->vge_ifp;
 2648 
 2649         CSR_WRITE_1(sc, VGE_MIBCSR,
 2650             CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH);
 2651         for (i = VGE_TIMEOUT; i > 0; i--) {
 2652                 DELAY(1);
 2653                 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0)
 2654                         break;
 2655         }
 2656         if (i == 0) {
 2657                 device_printf(sc->vge_dev, "MIB counter dump timed out!\n");
 2658                 vge_stats_clear(sc);
 2659                 return;
 2660         }
 2661 
 2662         bzero(mib, sizeof(mib));
 2663 reset_idx:
 2664         /* Set MIB read index to 0. */
 2665         CSR_WRITE_1(sc, VGE_MIBCSR,
 2666             CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI);
 2667         for (i = 0; i < VGE_MIB_CNT; i++) {
 2668                 val = CSR_READ_4(sc, VGE_MIBDATA);
 2669                 if (i != VGE_MIB_DATA_IDX(val)) {
 2670                         /* Reading interrupted. */
 2671                         goto reset_idx;
 2672                 }
 2673                 mib[i] = val & VGE_MIB_DATA_MASK;
 2674         }
 2675 
 2676         /* Rx stats. */
 2677         stats->rx_frames += mib[VGE_MIB_RX_FRAMES];
 2678         stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES];
 2679         stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS];
 2680         stats->rx_runts += mib[VGE_MIB_RX_RUNTS];
 2681         stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS];
 2682         stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64];
 2683         stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127];
 2684         stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255];
 2685         stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511];
 2686         stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023];
 2687         stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518];
 2688         stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX];
 2689         stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS];
 2690         stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS];
 2691         stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS];
 2692         stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE];
 2693         stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS];
 2694         stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS];
 2695         stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS];
 2696         stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS];
 2697 
 2698         /* Tx stats. */
 2699         stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES];
 2700         stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64];
 2701         stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127];
 2702         stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255];
 2703         stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511];
 2704         stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023];
 2705         stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518];
 2706         stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS];
 2707         stats->tx_colls += mib[VGE_MIB_TX_COLLS];
 2708         stats->tx_pause += mib[VGE_MIB_TX_PAUSE];
 2709 #ifdef VGE_ENABLE_SQEERR
 2710         stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS];
 2711 #endif
 2712         stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS];
 2713 
 2714         /* Update counters in ifnet. */
 2715         if_inc_counter(ifp, IFCOUNTER_OPACKETS, mib[VGE_MIB_TX_GOOD_FRAMES]);
 2716 
 2717         if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
 2718             mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]);
 2719 
 2720         if_inc_counter(ifp, IFCOUNTER_OERRORS,
 2721             mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]);
 2722 
 2723         if_inc_counter(ifp, IFCOUNTER_IPACKETS, mib[VGE_MIB_RX_GOOD_FRAMES]);
 2724 
 2725         if_inc_counter(ifp, IFCOUNTER_IERRORS,
 2726             mib[VGE_MIB_RX_FIFO_OVERRUNS] +
 2727             mib[VGE_MIB_RX_RUNTS] +
 2728             mib[VGE_MIB_RX_RUNTS_ERRS] +
 2729             mib[VGE_MIB_RX_CRCERRS] +
 2730             mib[VGE_MIB_RX_ALIGNERRS] +
 2731             mib[VGE_MIB_RX_NOBUFS] +
 2732             mib[VGE_MIB_RX_SYMERRS] +
 2733             mib[VGE_MIB_RX_LENERRS]);
 2734 }
 2735 
 2736 static void
 2737 vge_intr_holdoff(struct vge_softc *sc)
 2738 {
 2739         uint8_t intctl;
 2740 
 2741         VGE_LOCK_ASSERT(sc);
 2742 
 2743         /*
 2744          * Set Tx interrupt supression threshold.
 2745          * It's possible to use single-shot timer in VGE_CRS1 register
 2746          * in Tx path such that driver can remove most of Tx completion
 2747          * interrupts. However this requires additional access to
 2748          * VGE_CRS1 register to reload the timer in addintion to
 2749          * activating Tx kick command. Another downside is we don't know
 2750          * what single-shot timer value should be used in advance so
 2751          * reclaiming transmitted mbufs could be delayed a lot which in
 2752          * turn slows down Tx operation.
 2753          */
 2754         CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR);
 2755         CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt);
 2756 
 2757         /* Set Rx interrupt suppresion threshold. */
 2758         CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
 2759         CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt);
 2760 
 2761         intctl = CSR_READ_1(sc, VGE_INTCTL1);
 2762         intctl &= ~VGE_INTCTL_SC_RELOAD;
 2763         intctl |= VGE_INTCTL_HC_RELOAD;
 2764         if (sc->vge_tx_coal_pkt <= 0)
 2765                 intctl |= VGE_INTCTL_TXINTSUP_DISABLE;
 2766         else
 2767                 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE;
 2768         if (sc->vge_rx_coal_pkt <= 0)
 2769                 intctl |= VGE_INTCTL_RXINTSUP_DISABLE;
 2770         else
 2771                 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE;
 2772         CSR_WRITE_1(sc, VGE_INTCTL1, intctl);
 2773         CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF);
 2774         if (sc->vge_int_holdoff > 0) {
 2775                 /* Set interrupt holdoff timer. */
 2776                 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
 2777                 CSR_WRITE_1(sc, VGE_INTHOLDOFF,
 2778                     VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff));
 2779                 /* Enable holdoff timer. */
 2780                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
 2781         }
 2782 }
 2783 
 2784 static void
 2785 vge_setlinkspeed(struct vge_softc *sc)
 2786 {
 2787         struct mii_data *mii;
 2788         int aneg, i;
 2789 
 2790         VGE_LOCK_ASSERT(sc);
 2791 
 2792         mii = device_get_softc(sc->vge_miibus);
 2793         mii_pollstat(mii);
 2794         aneg = 0;
 2795         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
 2796             (IFM_ACTIVE | IFM_AVALID)) {
 2797                 switch IFM_SUBTYPE(mii->mii_media_active) {
 2798                 case IFM_10_T:
 2799                 case IFM_100_TX:
 2800                         return;
 2801                 case IFM_1000_T:
 2802                         aneg++;
 2803                 default:
 2804                         break;
 2805                 }
 2806         }
 2807         /* Clear forced MAC speed/duplex configuration. */
 2808         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 2809         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 2810         vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0);
 2811         vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR,
 2812             ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
 2813         vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
 2814             BMCR_AUTOEN | BMCR_STARTNEG);
 2815         DELAY(1000);
 2816         if (aneg != 0) {
 2817                 /* Poll link state until vge(4) get a 10/100 link. */
 2818                 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
 2819                         mii_pollstat(mii);
 2820                         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
 2821                             == (IFM_ACTIVE | IFM_AVALID)) {
 2822                                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
 2823                                 case IFM_10_T:
 2824                                 case IFM_100_TX:
 2825                                         return;
 2826                                 default:
 2827                                         break;
 2828                                 }
 2829                         }
 2830                         VGE_UNLOCK(sc);
 2831                         pause("vgelnk", hz);
 2832                         VGE_LOCK(sc);
 2833                 }
 2834                 if (i == MII_ANEGTICKS_GIGE)
 2835                         device_printf(sc->vge_dev, "establishing link failed, "
 2836                             "WOL may not work!");
 2837         }
 2838         /*
 2839          * No link, force MAC to have 100Mbps, full-duplex link.
 2840          * This is the last resort and may/may not work.
 2841          */
 2842         mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
 2843         mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
 2844 }
 2845 
 2846 static void
 2847 vge_setwol(struct vge_softc *sc)
 2848 {
 2849         struct ifnet *ifp;
 2850         uint16_t pmstat;
 2851         uint8_t val;
 2852 
 2853         VGE_LOCK_ASSERT(sc);
 2854 
 2855         if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) {
 2856                 /* No PME capability, PHY power down. */
 2857                 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
 2858                     BMCR_PDOWN);
 2859                 vge_miipoll_stop(sc);
 2860                 return;
 2861         }
 2862 
 2863         ifp = sc->vge_ifp;
 2864 
 2865         /* Clear WOL on pattern match. */
 2866         CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
 2867         /* Disable WOL on magic/unicast packet. */
 2868         CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
 2869         CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
 2870             VGE_WOLCFG_PMEOVR);
 2871         if ((ifp->if_capenable & IFCAP_WOL) != 0) {
 2872                 vge_setlinkspeed(sc);
 2873                 val = 0;
 2874                 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
 2875                         val |= VGE_WOLCR1_UCAST;
 2876                 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 2877                         val |= VGE_WOLCR1_MAGIC;
 2878                 CSR_WRITE_1(sc, VGE_WOLCR1S, val);
 2879                 val = 0;
 2880                 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
 2881                         val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB;
 2882                 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR);
 2883                 /* Disable MII auto-polling. */
 2884                 vge_miipoll_stop(sc);
 2885         }
 2886         CSR_SETBIT_1(sc, VGE_DIAGCTL,
 2887             VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE);
 2888         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
 2889 
 2890         /* Clear WOL status on pattern match. */
 2891         CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
 2892         CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
 2893 
 2894         val = CSR_READ_1(sc, VGE_PWRSTAT);
 2895         val |= VGE_STICKHW_SWPTAG;
 2896         CSR_WRITE_1(sc, VGE_PWRSTAT, val);
 2897         /* Put hardware into sleep. */
 2898         val = CSR_READ_1(sc, VGE_PWRSTAT);
 2899         val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1;
 2900         CSR_WRITE_1(sc, VGE_PWRSTAT, val);
 2901         /* Request PME if WOL is requested. */
 2902         pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap +
 2903             PCIR_POWER_STATUS, 2);
 2904         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 2905         if ((ifp->if_capenable & IFCAP_WOL) != 0)
 2906                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 2907         pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS,
 2908             pmstat, 2);
 2909 }
 2910 
 2911 static void
 2912 vge_clrwol(struct vge_softc *sc)
 2913 {
 2914         uint8_t val;
 2915 
 2916         val = CSR_READ_1(sc, VGE_PWRSTAT);
 2917         val &= ~VGE_STICKHW_SWPTAG;
 2918         CSR_WRITE_1(sc, VGE_PWRSTAT, val);
 2919         /* Disable WOL and clear power state indicator. */
 2920         val = CSR_READ_1(sc, VGE_PWRSTAT);
 2921         val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1);
 2922         CSR_WRITE_1(sc, VGE_PWRSTAT, val);
 2923 
 2924         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
 2925         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 2926 
 2927         /* Clear WOL on pattern match. */
 2928         CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
 2929         /* Disable WOL on magic/unicast packet. */
 2930         CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
 2931         CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
 2932             VGE_WOLCFG_PMEOVR);
 2933         /* Clear WOL status on pattern match. */
 2934         CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
 2935         CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
 2936 }

Cache object: 10d0596aa699e94512d2f16819d3e8b9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.