The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/vge/if_vge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2004
    3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Bill Paul.
   16  * 4. Neither the name of the author nor the names of any co-contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   30  * THE POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 /*
   37  * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
   38  *
   39  * Written by Bill Paul <wpaul@windriver.com>
   40  * Senior Networking Software Engineer
   41  * Wind River Systems
   42  */
   43 
   44 /*
   45  * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
   46  * combines a tri-speed ethernet MAC and PHY, with the following
   47  * features:
   48  *
   49  *      o Jumbo frame support up to 16K
   50  *      o Transmit and receive flow control
   51  *      o IPv4 checksum offload
   52  *      o VLAN tag insertion and stripping
   53  *      o TCP large send
   54  *      o 64-bit multicast hash table filter
   55  *      o 64 entry CAM filter
   56  *      o 16K RX FIFO and 48K TX FIFO memory
   57  *      o Interrupt moderation
   58  *
   59  * The VT6122 supports up to four transmit DMA queues. The descriptors
   60  * in the transmit ring can address up to 7 data fragments; frames which
   61  * span more than 7 data buffers must be coalesced, but in general the
   62  * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
   63  * long. The receive descriptors address only a single buffer.
   64  *
   65  * There are two peculiar design issues with the VT6122. One is that
   66  * receive data buffers must be aligned on a 32-bit boundary. This is
   67  * not a problem where the VT6122 is used as a LOM device in x86-based
   68  * systems, but on architectures that generate unaligned access traps, we
   69  * have to do some copying.
   70  *
   71  * The other issue has to do with the way 64-bit addresses are handled.
   72  * The DMA descriptors only allow you to specify 48 bits of addressing
   73  * information. The remaining 16 bits are specified using one of the
   74  * I/O registers. If you only have a 32-bit system, then this isn't
   75  * an issue, but if you have a 64-bit system and more than 4GB of
   76  * memory, you must have to make sure your network data buffers reside
   77  * in the same 48-bit 'segment.'
   78  *
   79  * Special thanks to Ryan Fu at VIA Networking for providing documentation
   80  * and sample NICs for testing.
   81  */
   82 
   83 #include <sys/param.h>
   84 #include <sys/endian.h>
   85 #include <sys/systm.h>
   86 #include <sys/sockio.h>
   87 #include <sys/mbuf.h>
   88 #include <sys/malloc.h>
   89 #include <sys/module.h>
   90 #include <sys/kernel.h>
   91 #include <sys/socket.h>
   92 #include <sys/taskqueue.h>
   93 
   94 #include <net/if.h>
   95 #include <net/if_arp.h>
   96 #include <net/ethernet.h>
   97 #include <net/if_dl.h>
   98 #include <net/if_media.h>
   99 #include <net/if_vlan_var.h>
  100 #include <net/route.h>
  101 
  102 #include <net/bpf.h>
  103 
  104 #include <machine/bus_pio.h>
  105 #include <machine/bus_memio.h>
  106 #include <machine/bus.h>
  107 #include <machine/resource.h>
  108 #include <sys/bus.h>
  109 #include <sys/rman.h>
  110 
  111 #include <dev/mii/mii.h>
  112 #include <dev/mii/miivar.h>
  113 
  114 #include <dev/pci/pcireg.h>
  115 #include <dev/pci/pcivar.h>
  116 
  117 MODULE_DEPEND(vge, pci, 1, 1, 1);
  118 MODULE_DEPEND(vge, ether, 1, 1, 1);
  119 MODULE_DEPEND(vge, miibus, 1, 1, 1);
  120 
  121 /* "controller miibus0" required.  See GENERIC if you get errors here. */
  122 #include "miibus_if.h"
  123 
  124 #include <dev/vge/if_vgereg.h>
  125 #include <dev/vge/if_vgevar.h>
  126 
  127 #define VGE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
  128 
  129 /*
  130  * Various supported device vendors/types and their names.
  131  */
  132 static struct vge_type vge_devs[] = {
  133         { VIA_VENDORID, VIA_DEVICEID_61XX,
  134                 "VIA Networking Gigabit Ethernet" },
  135         { 0, 0, NULL }
  136 };
  137 
  138 static int vge_probe            (device_t);
  139 static int vge_attach           (device_t);
  140 static int vge_detach           (device_t);
  141 
  142 static int vge_encap            (struct vge_softc *, struct mbuf *, int);
  143 
  144 static void vge_dma_map_addr    (void *, bus_dma_segment_t *, int, int);
  145 static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int,
  146                                     bus_size_t, int);
  147 static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
  148                                     bus_size_t, int);
  149 static int vge_allocmem         (device_t, struct vge_softc *);
  150 static int vge_newbuf           (struct vge_softc *, int, struct mbuf *);
  151 static int vge_rx_list_init     (struct vge_softc *);
  152 static int vge_tx_list_init     (struct vge_softc *);
  153 #ifdef VGE_FIXUP_RX
  154 static __inline void vge_fixup_rx
  155                                 (struct mbuf *);
  156 #endif
  157 static void vge_rxeof           (struct vge_softc *);
  158 static void vge_txeof           (struct vge_softc *);
  159 static void vge_intr            (void *);
  160 static void vge_tick            (void *);
  161 static void vge_tx_task         (void *, int);
  162 static void vge_start           (struct ifnet *);
  163 static int vge_ioctl            (struct ifnet *, u_long, caddr_t);
  164 static void vge_init            (void *);
  165 static void vge_stop            (struct vge_softc *);
  166 static void vge_watchdog        (struct ifnet *);
  167 static int vge_suspend          (device_t);
  168 static int vge_resume           (device_t);
  169 static void vge_shutdown        (device_t);
  170 static int vge_ifmedia_upd      (struct ifnet *);
  171 static void vge_ifmedia_sts     (struct ifnet *, struct ifmediareq *);
  172 
  173 static void vge_eeprom_getword  (struct vge_softc *, int, u_int16_t *);
  174 static void vge_read_eeprom     (struct vge_softc *, caddr_t, int, int, int);
  175 
  176 static void vge_miipoll_start   (struct vge_softc *);
  177 static void vge_miipoll_stop    (struct vge_softc *);
  178 static int vge_miibus_readreg   (device_t, int, int);
  179 static int vge_miibus_writereg  (device_t, int, int, int);
  180 static void vge_miibus_statchg  (device_t);
  181 
  182 static void vge_cam_clear       (struct vge_softc *);
  183 static int vge_cam_set          (struct vge_softc *, uint8_t *);
  184 #if __FreeBSD_version < 502113
  185 static uint32_t vge_mchash      (uint8_t *);
  186 #endif
  187 static void vge_setmulti        (struct vge_softc *);
  188 static void vge_reset           (struct vge_softc *);
  189 
  190 #define VGE_PCI_LOIO             0x10
  191 #define VGE_PCI_LOMEM            0x14
  192 
  193 static device_method_t vge_methods[] = {
  194         /* Device interface */
  195         DEVMETHOD(device_probe,         vge_probe),
  196         DEVMETHOD(device_attach,        vge_attach),
  197         DEVMETHOD(device_detach,        vge_detach),
  198         DEVMETHOD(device_suspend,       vge_suspend),
  199         DEVMETHOD(device_resume,        vge_resume),
  200         DEVMETHOD(device_shutdown,      vge_shutdown),
  201 
  202         /* bus interface */
  203         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  204         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  205 
  206         /* MII interface */
  207         DEVMETHOD(miibus_readreg,       vge_miibus_readreg),
  208         DEVMETHOD(miibus_writereg,      vge_miibus_writereg),
  209         DEVMETHOD(miibus_statchg,       vge_miibus_statchg),
  210 
  211         { 0, 0 }
  212 };
  213 
  214 static driver_t vge_driver = {
  215         "vge",
  216         vge_methods,
  217         sizeof(struct vge_softc)
  218 };
  219 
  220 static devclass_t vge_devclass;
  221 
  222 DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
  223 DRIVER_MODULE(vge, cardbus, vge_driver, vge_devclass, 0, 0);
  224 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
  225 
  226 /*
  227  * Read a word of data stored in the EEPROM at address 'addr.'
  228  */
  229 static void
  230 vge_eeprom_getword(sc, addr, dest)
  231         struct vge_softc        *sc;
  232         int                     addr;
  233         u_int16_t               *dest;
  234 {
  235         register int            i;
  236         u_int16_t               word = 0;
  237 
  238         /*
  239          * Enter EEPROM embedded programming mode. In order to
  240          * access the EEPROM at all, we first have to set the
  241          * EELOAD bit in the CHIPCFG2 register.
  242          */
  243         CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
  244         CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
  245 
  246         /* Select the address of the word we want to read */
  247         CSR_WRITE_1(sc, VGE_EEADDR, addr);
  248 
  249         /* Issue read command */
  250         CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
  251 
  252         /* Wait for the done bit to be set. */
  253         for (i = 0; i < VGE_TIMEOUT; i++) {
  254                 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
  255                         break;
  256         }
  257 
  258         if (i == VGE_TIMEOUT) {
  259                 device_printf(sc->vge_dev, "EEPROM read timed out\n");
  260                 *dest = 0;
  261                 return;
  262         }
  263 
  264         /* Read the result */
  265         word = CSR_READ_2(sc, VGE_EERDDAT);
  266 
  267         /* Turn off EEPROM access mode. */
  268         CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
  269         CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
  270 
  271         *dest = word;
  272 
  273         return;
  274 }
  275 
  276 /*
  277  * Read a sequence of words from the EEPROM.
  278  */
  279 static void
  280 vge_read_eeprom(sc, dest, off, cnt, swap)
  281         struct vge_softc        *sc;
  282         caddr_t                 dest;
  283         int                     off;
  284         int                     cnt;
  285         int                     swap;
  286 {
  287         int                     i;
  288         u_int16_t               word = 0, *ptr;
  289 
  290         for (i = 0; i < cnt; i++) {
  291                 vge_eeprom_getword(sc, off + i, &word);
  292                 ptr = (u_int16_t *)(dest + (i * 2));
  293                 if (swap)
  294                         *ptr = ntohs(word);
  295                 else
  296                         *ptr = word;
  297         }
  298 }
  299 
  300 static void
  301 vge_miipoll_stop(sc)
  302         struct vge_softc        *sc;
  303 {
  304         int                     i;
  305 
  306         CSR_WRITE_1(sc, VGE_MIICMD, 0);
  307 
  308         for (i = 0; i < VGE_TIMEOUT; i++) {
  309                 DELAY(1);
  310                 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
  311                         break;
  312         }
  313 
  314         if (i == VGE_TIMEOUT)
  315                 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
  316 
  317         return;
  318 }
  319 
  320 static void
  321 vge_miipoll_start(sc)
  322         struct vge_softc        *sc;
  323 {
  324         int                     i;
  325 
  326         /* First, make sure we're idle. */
  327 
  328         CSR_WRITE_1(sc, VGE_MIICMD, 0);
  329         CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
  330 
  331         for (i = 0; i < VGE_TIMEOUT; i++) {
  332                 DELAY(1);
  333                 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
  334                         break;
  335         }
  336 
  337         if (i == VGE_TIMEOUT) {
  338                 device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
  339                 return;
  340         }
  341 
  342         /* Now enable auto poll mode. */
  343 
  344         CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
  345 
  346         /* And make sure it started. */
  347 
  348         for (i = 0; i < VGE_TIMEOUT; i++) {
  349                 DELAY(1);
  350                 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
  351                         break;
  352         }
  353 
  354         if (i == VGE_TIMEOUT)
  355                 device_printf(sc->vge_dev, "failed to start MII autopoll\n");
  356 
  357         return;
  358 }
  359 
  360 static int
  361 vge_miibus_readreg(dev, phy, reg)
  362         device_t                dev;
  363         int                     phy, reg;
  364 {
  365         struct vge_softc        *sc;
  366         int                     i;
  367         u_int16_t               rval = 0;
  368 
  369         sc = device_get_softc(dev);
  370 
  371         if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
  372                 return(0);
  373 
  374         VGE_LOCK(sc);
  375         vge_miipoll_stop(sc);
  376 
  377         /* Specify the register we want to read. */
  378         CSR_WRITE_1(sc, VGE_MIIADDR, reg);
  379 
  380         /* Issue read command. */
  381         CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
  382 
  383         /* Wait for the read command bit to self-clear. */
  384         for (i = 0; i < VGE_TIMEOUT; i++) {
  385                 DELAY(1);
  386                 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
  387                         break;
  388         }
  389 
  390         if (i == VGE_TIMEOUT)
  391                 device_printf(sc->vge_dev, "MII read timed out\n");
  392         else
  393                 rval = CSR_READ_2(sc, VGE_MIIDATA);
  394 
  395         vge_miipoll_start(sc);
  396         VGE_UNLOCK(sc);
  397 
  398         return (rval);
  399 }
  400 
  401 static int
  402 vge_miibus_writereg(dev, phy, reg, data)
  403         device_t                dev;
  404         int                     phy, reg, data;
  405 {
  406         struct vge_softc        *sc;
  407         int                     i, rval = 0;
  408 
  409         sc = device_get_softc(dev);
  410 
  411         if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
  412                 return(0);
  413 
  414         VGE_LOCK(sc);
  415         vge_miipoll_stop(sc);
  416 
  417         /* Specify the register we want to write. */
  418         CSR_WRITE_1(sc, VGE_MIIADDR, reg);
  419 
  420         /* Specify the data we want to write. */
  421         CSR_WRITE_2(sc, VGE_MIIDATA, data);
  422 
  423         /* Issue write command. */
  424         CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
  425 
  426         /* Wait for the write command bit to self-clear. */
  427         for (i = 0; i < VGE_TIMEOUT; i++) {
  428                 DELAY(1);
  429                 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
  430                         break;
  431         }
  432 
  433         if (i == VGE_TIMEOUT) {
  434                 device_printf(sc->vge_dev, "MII write timed out\n");
  435                 rval = EIO;
  436         }
  437 
  438         vge_miipoll_start(sc);
  439         VGE_UNLOCK(sc);
  440 
  441         return (rval);
  442 }
  443 
  444 static void
  445 vge_cam_clear(sc)
  446         struct vge_softc        *sc;
  447 {
  448         int                     i;
  449 
  450         /*
  451          * Turn off all the mask bits. This tells the chip
  452          * that none of the entries in the CAM filter are valid.
  453          * desired entries will be enabled as we fill the filter in.
  454          */
  455 
  456         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  457         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
  458         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
  459         for (i = 0; i < 8; i++)
  460                 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
  461 
  462         /* Clear the VLAN filter too. */
  463 
  464         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
  465         for (i = 0; i < 8; i++)
  466                 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
  467 
  468         CSR_WRITE_1(sc, VGE_CAMADDR, 0);
  469         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  470         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
  471 
  472         sc->vge_camidx = 0;
  473 
  474         return;
  475 }
  476 
  477 static int
  478 vge_cam_set(sc, addr)
  479         struct vge_softc        *sc;
  480         uint8_t                 *addr;
  481 {
  482         int                     i, error = 0;
  483 
  484         if (sc->vge_camidx == VGE_CAM_MAXADDRS)
  485                 return(ENOSPC);
  486 
  487         /* Select the CAM data page. */
  488         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  489         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
  490 
  491         /* Set the filter entry we want to update and enable writing. */
  492         CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
  493 
  494         /* Write the address to the CAM registers */
  495         for (i = 0; i < ETHER_ADDR_LEN; i++)
  496                 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
  497 
  498         /* Issue a write command. */
  499         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
  500 
  501         /* Wake for it to clear. */
  502         for (i = 0; i < VGE_TIMEOUT; i++) {
  503                 DELAY(1);
  504                 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
  505                         break;
  506         }
  507 
  508         if (i == VGE_TIMEOUT) {
  509                 device_printf(sc->vge_dev, "setting CAM filter failed\n");
  510                 error = EIO;
  511                 goto fail;
  512         }
  513 
  514         /* Select the CAM mask page. */
  515         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  516         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
  517 
  518         /* Set the mask bit that enables this filter. */
  519         CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
  520             1<<(sc->vge_camidx & 7));
  521 
  522         sc->vge_camidx++;
  523 
  524 fail:
  525         /* Turn off access to CAM. */
  526         CSR_WRITE_1(sc, VGE_CAMADDR, 0);
  527         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
  528         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
  529 
  530         return (error);
  531 }
  532 
  533 #if __FreeBSD_version < 502113
  534 static uint32_t
  535 vge_mchash(addr)
  536         uint8_t                 *addr;
  537 {
  538         uint32_t                crc, carry;
  539         int                     idx, bit;
  540         uint8_t                 data;
  541 
  542         /* Compute CRC for the address value. */
  543         crc = 0xFFFFFFFF; /* initial value */
  544 
  545         for (idx = 0; idx < 6; idx++) {
  546                 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
  547                         carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
  548                         crc <<= 1;
  549                         if (carry)
  550                                 crc = (crc ^ 0x04c11db6) | carry;
  551                 }
  552         }
  553 
  554         return(crc);
  555 }
  556 #endif
  557 
  558 /*
  559  * Program the multicast filter. We use the 64-entry CAM filter
  560  * for perfect filtering. If there's more than 64 multicast addresses,
  561  * we use the hash filter insted.
  562  */
  563 static void
  564 vge_setmulti(sc)
  565         struct vge_softc        *sc;
  566 {
  567         struct ifnet            *ifp;
  568         int                     error = 0/*, h = 0*/;
  569         struct ifmultiaddr      *ifma;
  570         u_int32_t               h, hashes[2] = { 0, 0 };
  571 
  572         ifp = &sc->arpcom.ac_if;
  573 
  574         /* First, zot all the multicast entries. */
  575         vge_cam_clear(sc);
  576         CSR_WRITE_4(sc, VGE_MAR0, 0);
  577         CSR_WRITE_4(sc, VGE_MAR1, 0);
  578 
  579         /*
  580          * If the user wants allmulti or promisc mode, enable reception
  581          * of all multicast frames.
  582          */
  583         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
  584                 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
  585                 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
  586                 return;
  587         }
  588 
  589         /* Now program new ones */
  590         IF_ADDR_LOCK(ifp);
  591         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  592                 if (ifma->ifma_addr->sa_family != AF_LINK)
  593                         continue;
  594                 error = vge_cam_set(sc,
  595                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
  596                 if (error)
  597                         break;
  598         }
  599 
  600         /* If there were too many addresses, use the hash filter. */
  601         if (error) {
  602                 vge_cam_clear(sc);
  603 
  604                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  605                         if (ifma->ifma_addr->sa_family != AF_LINK)
  606                                 continue;
  607 #if __FreeBSD_version < 502113
  608                         h = vge_mchash(LLADDR((struct sockaddr_dl *)
  609                             ifma->ifma_addr)) >> 26;
  610 #else
  611                         h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  612                             ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
  613 #endif
  614                         if (h < 32)
  615                                 hashes[0] |= (1 << h);
  616                         else
  617                                 hashes[1] |= (1 << (h - 32));
  618                 }
  619 
  620                 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
  621                 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
  622         }
  623         IF_ADDR_UNLOCK(ifp);
  624 
  625         return;
  626 }
  627 
  628 static void
  629 vge_reset(sc)
  630         struct vge_softc                *sc;
  631 {
  632         register int            i;
  633 
  634         CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
  635 
  636         for (i = 0; i < VGE_TIMEOUT; i++) {
  637                 DELAY(5);
  638                 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
  639                         break;
  640         }
  641 
  642         if (i == VGE_TIMEOUT) {
  643                 device_printf(sc->vge_dev, "soft reset timed out");
  644                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
  645                 DELAY(2000);
  646         }
  647 
  648         DELAY(5000);
  649 
  650         CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
  651 
  652         for (i = 0; i < VGE_TIMEOUT; i++) {
  653                 DELAY(5);
  654                 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
  655                         break;
  656         }
  657 
  658         if (i == VGE_TIMEOUT) {
  659                 device_printf(sc->vge_dev, "EEPROM reload timed out\n");
  660                 return;
  661         }
  662 
  663         CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
  664 
  665         return;
  666 }
  667 
  668 /*
  669  * Probe for a VIA gigabit chip. Check the PCI vendor and device
  670  * IDs against our list and return a device name if we find a match.
  671  */
  672 static int
  673 vge_probe(dev)
  674         device_t                dev;
  675 {
  676         struct vge_type         *t;
  677         struct vge_softc        *sc;
  678 
  679         t = vge_devs;
  680         sc = device_get_softc(dev);
  681 
  682         while (t->vge_name != NULL) {
  683                 if ((pci_get_vendor(dev) == t->vge_vid) &&
  684                     (pci_get_device(dev) == t->vge_did)) {
  685                         device_set_desc(dev, t->vge_name);
  686                         return (BUS_PROBE_DEFAULT);
  687                 }
  688                 t++;
  689         }
  690 
  691         return (ENXIO);
  692 }
  693 
  694 static void
  695 vge_dma_map_rx_desc(arg, segs, nseg, mapsize, error)
  696         void                    *arg;
  697         bus_dma_segment_t       *segs;
  698         int                     nseg;
  699         bus_size_t              mapsize;
  700         int                     error;
  701 {
  702 
  703         struct vge_dmaload_arg  *ctx;
  704         struct vge_rx_desc      *d = NULL;
  705 
  706         if (error)
  707                 return;
  708 
  709         ctx = arg;
  710 
  711         /* Signal error to caller if there's too many segments */
  712         if (nseg > ctx->vge_maxsegs) {
  713                 ctx->vge_maxsegs = 0;
  714                 return;
  715         }
  716 
  717         /*
  718          * Map the segment array into descriptors.
  719          */
  720 
  721         d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx];
  722 
  723         /* If this descriptor is still owned by the chip, bail. */
  724 
  725         if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
  726                 device_printf(ctx->sc->vge_dev,
  727                     "tried to map busy descriptor\n");
  728                 ctx->vge_maxsegs = 0;
  729                 return;
  730         }
  731 
  732         d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I);
  733         d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
  734         d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
  735         d->vge_sts = 0;
  736         d->vge_ctl = 0;
  737 
  738         ctx->vge_maxsegs = 1;
  739 
  740         return;
  741 }
  742 
  743 static void
  744 vge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
  745         void                    *arg;
  746         bus_dma_segment_t       *segs;
  747         int                     nseg;
  748         bus_size_t              mapsize;
  749         int                     error;
  750 {
  751         struct vge_dmaload_arg  *ctx;
  752         struct vge_tx_desc      *d = NULL;
  753         struct vge_tx_frag      *f;
  754         int                     i = 0;
  755 
  756         if (error)
  757                 return;
  758 
  759         ctx = arg;
  760 
  761         /* Signal error to caller if there's too many segments */
  762         if (nseg > ctx->vge_maxsegs) {
  763                 ctx->vge_maxsegs = 0;
  764                 return;
  765         }
  766 
  767         /* Map the segment array into descriptors. */
  768 
  769         d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx];
  770 
  771         /* If this descriptor is still owned by the chip, bail. */
  772 
  773         if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
  774                 ctx->vge_maxsegs = 0;
  775                 return;
  776         }
  777 
  778         for (i = 0; i < nseg; i++) {
  779                 f = &d->vge_frag[i];
  780                 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len));
  781                 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr));
  782                 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF);
  783         }
  784 
  785         /* Argh. This chip does not autopad short frames */
  786 
  787         if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) {
  788                 f = &d->vge_frag[i];
  789                 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN -
  790                     ctx->vge_m0->m_pkthdr.len));
  791                 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
  792                 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
  793                 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN;
  794                 i++;
  795         }
  796 
  797         /*
  798          * When telling the chip how many segments there are, we
  799          * must use nsegs + 1 instead of just nsegs. Darned if I
  800          * know why.
  801          */
  802         i++;
  803 
  804         d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16;
  805         d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM;
  806 
  807         if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN)
  808                 d->vge_ctl |= VGE_TDCTL_JUMBO;
  809 
  810         ctx->vge_maxsegs = nseg;
  811 
  812         return;
  813 }
  814 
  815 /*
  816  * Map a single buffer address.
  817  */
  818 
  819 static void
  820 vge_dma_map_addr(arg, segs, nseg, error)
  821         void                    *arg;
  822         bus_dma_segment_t       *segs;
  823         int                     nseg;
  824         int                     error;
  825 {
  826         bus_addr_t              *addr;
  827 
  828         if (error)
  829                 return;
  830 
  831         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
  832         addr = arg;
  833         *addr = segs->ds_addr;
  834 
  835         return;
  836 }
  837 
  838 static int
  839 vge_allocmem(dev, sc)
  840         device_t                dev;
  841         struct vge_softc                *sc;
  842 {
  843         int                     error;
  844         int                     nseg;
  845         int                     i;
  846 
  847         /*
  848          * Allocate map for RX mbufs.
  849          */
  850         nseg = 32;
  851         error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0,
  852             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
  853             NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW,
  854             NULL, NULL, &sc->vge_ldata.vge_mtag);
  855         if (error) {
  856                 device_printf(dev, "could not allocate dma tag\n");
  857                 return (ENOMEM);
  858         }
  859 
  860         /*
  861          * Allocate map for TX descriptor list.
  862          */
  863         error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN,
  864             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
  865             NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
  866             NULL, NULL, &sc->vge_ldata.vge_tx_list_tag);
  867         if (error) {
  868                 device_printf(dev, "could not allocate dma tag\n");
  869                 return (ENOMEM);
  870         }
  871 
  872         /* Allocate DMA'able memory for the TX ring */
  873 
  874         error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag,
  875             (void **)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
  876             &sc->vge_ldata.vge_tx_list_map);
  877         if (error)
  878                 return (ENOMEM);
  879 
  880         /* Load the map for the TX ring. */
  881 
  882         error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag,
  883              sc->vge_ldata.vge_tx_list_map, sc->vge_ldata.vge_tx_list,
  884              VGE_TX_LIST_SZ, vge_dma_map_addr,
  885              &sc->vge_ldata.vge_tx_list_addr, BUS_DMA_NOWAIT);
  886 
  887         /* Create DMA maps for TX buffers */
  888 
  889         for (i = 0; i < VGE_TX_DESC_CNT; i++) {
  890                 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
  891                             &sc->vge_ldata.vge_tx_dmamap[i]);
  892                 if (error) {
  893                         device_printf(dev, "can't create DMA map for TX\n");
  894                         return (ENOMEM);
  895                 }
  896         }
  897 
  898         /*
  899          * Allocate map for RX descriptor list.
  900          */
  901         error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN,
  902             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
  903             NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
  904             NULL, NULL, &sc->vge_ldata.vge_rx_list_tag);
  905         if (error) {
  906                 device_printf(dev, "could not allocate dma tag\n");
  907                 return (ENOMEM);
  908         }
  909 
  910         /* Allocate DMA'able memory for the RX ring */
  911 
  912         error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag,
  913             (void **)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
  914             &sc->vge_ldata.vge_rx_list_map);
  915         if (error)
  916                 return (ENOMEM);
  917 
  918         /* Load the map for the RX ring. */
  919 
  920         error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag,
  921              sc->vge_ldata.vge_rx_list_map, sc->vge_ldata.vge_rx_list,
  922              VGE_TX_LIST_SZ, vge_dma_map_addr,
  923              &sc->vge_ldata.vge_rx_list_addr, BUS_DMA_NOWAIT);
  924 
  925         /* Create DMA maps for RX buffers */
  926 
  927         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
  928                 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
  929                             &sc->vge_ldata.vge_rx_dmamap[i]);
  930                 if (error) {
  931                         device_printf(dev, "can't create DMA map for RX\n");
  932                         return (ENOMEM);
  933                 }
  934         }
  935 
  936         return (0);
  937 }
  938 
  939 /*
  940  * Attach the interface. Allocate softc structures, do ifmedia
  941  * setup and ethernet/BPF attach.
  942  */
  943 static int
  944 vge_attach(dev)
  945         device_t                dev;
  946 {
  947         u_char                  eaddr[ETHER_ADDR_LEN];
  948         struct vge_softc        *sc;
  949         struct ifnet            *ifp;
  950         int                     unit, error = 0, rid;
  951 
  952         sc = device_get_softc(dev);
  953         unit = device_get_unit(dev);
  954         sc->vge_dev = dev;
  955 
  956         mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  957             MTX_DEF | MTX_RECURSE);
  958         /*
  959          * Map control/status registers.
  960          */
  961         pci_enable_busmaster(dev);
  962 
  963         rid = VGE_PCI_LOMEM;
  964         sc->vge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
  965             0, ~0, 1, RF_ACTIVE);
  966 
  967         if (sc->vge_res == NULL) {
  968                 printf ("vge%d: couldn't map ports/memory\n", unit);
  969                 error = ENXIO;
  970                 goto fail;
  971         }
  972 
  973         sc->vge_btag = rman_get_bustag(sc->vge_res);
  974         sc->vge_bhandle = rman_get_bushandle(sc->vge_res);
  975 
  976         /* Allocate interrupt */
  977         rid = 0;
  978         sc->vge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
  979             0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
  980 
  981         if (sc->vge_irq == NULL) {
  982                 printf("vge%d: couldn't map interrupt\n", unit);
  983                 error = ENXIO;
  984                 goto fail;
  985         }
  986 
  987         /* Reset the adapter. */
  988         vge_reset(sc);
  989 
  990         /*
  991          * Get station address from the EEPROM.
  992          */
  993         vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
  994 
  995         sc->vge_unit = unit;
  996         bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
  997 
  998 #if __FreeBSD_version < 502113
  999         printf("vge%d: Ethernet address: %6D\n", unit, eaddr, ":");
 1000 #endif
 1001 
 1002         /*
 1003          * Allocate the parent bus DMA tag appropriate for PCI.
 1004          */
 1005 #define VGE_NSEG_NEW 32
 1006         error = bus_dma_tag_create(NULL,        /* parent */
 1007                         1, 0,                   /* alignment, boundary */
 1008                         BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
 1009                         BUS_SPACE_MAXADDR,      /* highaddr */
 1010                         NULL, NULL,             /* filter, filterarg */
 1011                         MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */
 1012                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
 1013                         BUS_DMA_ALLOCNOW,       /* flags */
 1014                         NULL, NULL,             /* lockfunc, lockarg */
 1015                         &sc->vge_parent_tag);
 1016         if (error)
 1017                 goto fail;
 1018 
 1019         error = vge_allocmem(dev, sc);
 1020 
 1021         if (error)
 1022                 goto fail;
 1023 
 1024         /* Do MII setup */
 1025         if (mii_phy_probe(dev, &sc->vge_miibus,
 1026             vge_ifmedia_upd, vge_ifmedia_sts)) {
 1027                 printf("vge%d: MII without any phy!\n", sc->vge_unit);
 1028                 error = ENXIO;
 1029                 goto fail;
 1030         }
 1031 
 1032         ifp = &sc->arpcom.ac_if;
 1033         ifp->if_softc = sc;
 1034         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1035         ifp->if_mtu = ETHERMTU;
 1036         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1037         ifp->if_ioctl = vge_ioctl;
 1038         ifp->if_capabilities = IFCAP_VLAN_MTU;
 1039         ifp->if_start = vge_start;
 1040         ifp->if_hwassist = VGE_CSUM_FEATURES;
 1041         ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
 1042 #ifdef DEVICE_POLLING
 1043 #ifdef IFCAP_POLLING
 1044         ifp->if_capabilities |= IFCAP_POLLING;
 1045 #endif
 1046 #endif
 1047         ifp->if_watchdog = vge_watchdog;
 1048         ifp->if_init = vge_init;
 1049         ifp->if_baudrate = 1000000000;
 1050         ifp->if_snd.ifq_maxlen = VGE_IFQ_MAXLEN;
 1051         ifp->if_capenable = ifp->if_capabilities;
 1052 
 1053         TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp);
 1054 
 1055         /*
 1056          * Call MI attach routine.
 1057          */
 1058         ether_ifattach(ifp, eaddr);
 1059 
 1060         /* Hook interrupt last to avoid having to lock softc */
 1061         error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
 1062             vge_intr, sc, &sc->vge_intrhand);
 1063 
 1064         if (error) {
 1065                 printf("vge%d: couldn't set up irq\n", unit);
 1066                 ether_ifdetach(ifp);
 1067                 goto fail;
 1068         }
 1069 
 1070 fail:
 1071         if (error)
 1072                 vge_detach(dev);
 1073 
 1074         return (error);
 1075 }
 1076 
 1077 /*
 1078  * Shutdown hardware and free up resources. This can be called any
 1079  * time after the mutex has been initialized. It is called in both
 1080  * the error case in attach and the normal detach case so it needs
 1081  * to be careful about only freeing resources that have actually been
 1082  * allocated.
 1083  */
 1084 static int
 1085 vge_detach(dev)
 1086         device_t                dev;
 1087 {
 1088         struct vge_softc                *sc;
 1089         struct ifnet            *ifp;
 1090         int                     i;
 1091 
 1092         sc = device_get_softc(dev);
 1093         KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
 1094         ifp = &sc->arpcom.ac_if;
 1095 
 1096         /* These should only be active if attach succeeded */
 1097         if (device_is_attached(dev)) {
 1098                 vge_stop(sc);
 1099                 /*
 1100                  * Force off the IFF_UP flag here, in case someone
 1101                  * still had a BPF descriptor attached to this
 1102                  * interface. If they do, ether_ifattach() will cause
 1103                  * the BPF code to try and clear the promisc mode
 1104                  * flag, which will bubble down to vge_ioctl(),
 1105                  * which will try to call vge_init() again. This will
 1106                  * turn the NIC back on and restart the MII ticker,
 1107                  * which will panic the system when the kernel tries
 1108                  * to invoke the vge_tick() function that isn't there
 1109                  * anymore.
 1110                  */
 1111                 ifp->if_flags &= ~IFF_UP;
 1112                 ether_ifdetach(ifp);
 1113         }
 1114         if (sc->vge_miibus)
 1115                 device_delete_child(dev, sc->vge_miibus);
 1116         bus_generic_detach(dev);
 1117 
 1118         if (sc->vge_intrhand)
 1119                 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
 1120         if (sc->vge_irq)
 1121                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq);
 1122         if (sc->vge_res)
 1123                 bus_release_resource(dev, SYS_RES_MEMORY,
 1124                     VGE_PCI_LOMEM, sc->vge_res);
 1125 
 1126         /* Unload and free the RX DMA ring memory and map */
 1127 
 1128         if (sc->vge_ldata.vge_rx_list_tag) {
 1129                 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag,
 1130                     sc->vge_ldata.vge_rx_list_map);
 1131                 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
 1132                     sc->vge_ldata.vge_rx_list,
 1133                     sc->vge_ldata.vge_rx_list_map);
 1134                 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag);
 1135         }
 1136 
 1137         /* Unload and free the TX DMA ring memory and map */
 1138 
 1139         if (sc->vge_ldata.vge_tx_list_tag) {
 1140                 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag,
 1141                     sc->vge_ldata.vge_tx_list_map);
 1142                 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
 1143                     sc->vge_ldata.vge_tx_list,
 1144                     sc->vge_ldata.vge_tx_list_map);
 1145                 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag);
 1146         }
 1147 
 1148         /* Destroy all the RX and TX buffer maps */
 1149 
 1150         if (sc->vge_ldata.vge_mtag) {
 1151                 for (i = 0; i < VGE_TX_DESC_CNT; i++)
 1152                         bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
 1153                             sc->vge_ldata.vge_tx_dmamap[i]);
 1154                 for (i = 0; i < VGE_RX_DESC_CNT; i++)
 1155                         bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
 1156                             sc->vge_ldata.vge_rx_dmamap[i]);
 1157                 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
 1158         }
 1159 
 1160         if (sc->vge_parent_tag)
 1161                 bus_dma_tag_destroy(sc->vge_parent_tag);
 1162 
 1163         VGE_UNLOCK(sc);
 1164         mtx_destroy(&sc->vge_mtx);
 1165 
 1166         return (0);
 1167 }
 1168 
 1169 static int
 1170 vge_newbuf(sc, idx, m)
 1171         struct vge_softc        *sc;
 1172         int                     idx;
 1173         struct mbuf             *m;
 1174 {
 1175         struct vge_dmaload_arg  arg;
 1176         struct mbuf             *n = NULL;
 1177         int                     i, error;
 1178 
 1179         if (m == NULL) {
 1180                 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 1181                 if (n == NULL)
 1182                         return (ENOBUFS);
 1183                 m = n;
 1184         } else
 1185                 m->m_data = m->m_ext.ext_buf;
 1186 
 1187 
 1188 #ifdef VGE_FIXUP_RX
 1189         /*
 1190          * This is part of an evil trick to deal with non-x86 platforms.
 1191          * The VIA chip requires RX buffers to be aligned on 32-bit
 1192          * boundaries, but that will hose non-x86 machines. To get around
 1193          * this, we leave some empty space at the start of each buffer
 1194          * and for non-x86 hosts, we copy the buffer back two bytes
 1195          * to achieve word alignment. This is slightly more efficient
 1196          * than allocating a new buffer, copying the contents, and
 1197          * discarding the old buffer.
 1198          */
 1199         m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
 1200         m_adj(m, VGE_ETHER_ALIGN);
 1201 #else
 1202         m->m_len = m->m_pkthdr.len = MCLBYTES;
 1203 #endif
 1204 
 1205         arg.sc = sc;
 1206         arg.vge_idx = idx;
 1207         arg.vge_maxsegs = 1;
 1208         arg.vge_flags = 0;
 1209 
 1210         error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag,
 1211             sc->vge_ldata.vge_rx_dmamap[idx], m, vge_dma_map_rx_desc,
 1212             &arg, BUS_DMA_NOWAIT);
 1213         if (error || arg.vge_maxsegs != 1) {
 1214                 if (n != NULL)
 1215                         m_freem(n);
 1216                 return (ENOMEM);
 1217         }
 1218 
 1219         /*
 1220          * Note: the manual fails to document the fact that for
 1221          * proper opration, the driver needs to replentish the RX
 1222          * DMA ring 4 descriptors at a time (rather than one at a
 1223          * time, like most chips). We can allocate the new buffers
 1224          * but we should not set the OWN bits until we're ready
 1225          * to hand back 4 of them in one shot.
 1226          */
 1227 
 1228 #define VGE_RXCHUNK 4
 1229         sc->vge_rx_consumed++;
 1230         if (sc->vge_rx_consumed == VGE_RXCHUNK) {
 1231                 for (i = idx; i != idx - sc->vge_rx_consumed; i--)
 1232                         sc->vge_ldata.vge_rx_list[i].vge_sts |=
 1233                             htole32(VGE_RDSTS_OWN);
 1234                 sc->vge_rx_consumed = 0;
 1235         }
 1236 
 1237         sc->vge_ldata.vge_rx_mbuf[idx] = m;
 1238 
 1239         bus_dmamap_sync(sc->vge_ldata.vge_mtag,
 1240             sc->vge_ldata.vge_rx_dmamap[idx],
 1241             BUS_DMASYNC_PREREAD);
 1242 
 1243         return (0);
 1244 }
 1245 
 1246 static int
 1247 vge_tx_list_init(sc)
 1248         struct vge_softc                *sc;
 1249 {
 1250         bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
 1251         bzero ((char *)&sc->vge_ldata.vge_tx_mbuf,
 1252             (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
 1253 
 1254         bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
 1255             sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE);
 1256         sc->vge_ldata.vge_tx_prodidx = 0;
 1257         sc->vge_ldata.vge_tx_considx = 0;
 1258         sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
 1259 
 1260         return (0);
 1261 }
 1262 
 1263 static int
 1264 vge_rx_list_init(sc)
 1265         struct vge_softc                *sc;
 1266 {
 1267         int                     i;
 1268 
 1269         bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
 1270         bzero ((char *)&sc->vge_ldata.vge_rx_mbuf,
 1271             (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
 1272 
 1273         sc->vge_rx_consumed = 0;
 1274 
 1275         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
 1276                 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
 1277                         return (ENOBUFS);
 1278         }
 1279 
 1280         /* Flush the RX descriptors */
 1281 
 1282         bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
 1283             sc->vge_ldata.vge_rx_list_map,
 1284             BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
 1285 
 1286         sc->vge_ldata.vge_rx_prodidx = 0;
 1287         sc->vge_rx_consumed = 0;
 1288         sc->vge_head = sc->vge_tail = NULL;
 1289 
 1290         return (0);
 1291 }
 1292 
 1293 #ifdef VGE_FIXUP_RX
 1294 static __inline void
 1295 vge_fixup_rx(m)
 1296         struct mbuf             *m;
 1297 {
 1298         int                     i;
 1299         uint16_t                *src, *dst;
 1300 
 1301         src = mtod(m, uint16_t *);
 1302         dst = src - 1;
 1303 
 1304         for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
 1305                 *dst++ = *src++;
 1306 
 1307         m->m_data -= ETHER_ALIGN;
 1308 
 1309         return;
 1310 }
 1311 #endif
 1312 
 1313 /*
 1314  * RX handler. We support the reception of jumbo frames that have
 1315  * been fragmented across multiple 2K mbuf cluster buffers.
 1316  */
 1317 static void
 1318 vge_rxeof(sc)
 1319         struct vge_softc        *sc;
 1320 {
 1321         struct mbuf             *m;
 1322         struct ifnet            *ifp;
 1323         int                     i, total_len;
 1324         int                     lim = 0;
 1325         struct vge_rx_desc      *cur_rx;
 1326         u_int32_t               rxstat, rxctl;
 1327 
 1328         VGE_LOCK_ASSERT(sc);
 1329         ifp = &sc->arpcom.ac_if;
 1330         i = sc->vge_ldata.vge_rx_prodidx;
 1331 
 1332         /* Invalidate the descriptor memory */
 1333 
 1334         bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
 1335             sc->vge_ldata.vge_rx_list_map,
 1336             BUS_DMASYNC_POSTREAD);
 1337 
 1338         while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
 1339 
 1340 #ifdef DEVICE_POLLING
 1341                 if (ifp->if_flags & IFF_POLLING) {
 1342                         if (sc->rxcycles <= 0)
 1343                                 break;
 1344                         sc->rxcycles--;
 1345                 }
 1346 #endif /* DEVICE_POLLING */
 1347 
 1348                 cur_rx = &sc->vge_ldata.vge_rx_list[i];
 1349                 m = sc->vge_ldata.vge_rx_mbuf[i];
 1350                 total_len = VGE_RXBYTES(cur_rx);
 1351                 rxstat = le32toh(cur_rx->vge_sts);
 1352                 rxctl = le32toh(cur_rx->vge_ctl);
 1353 
 1354                 /* Invalidate the RX mbuf and unload its map */
 1355 
 1356                 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
 1357                     sc->vge_ldata.vge_rx_dmamap[i],
 1358                     BUS_DMASYNC_POSTWRITE);
 1359                 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
 1360                     sc->vge_ldata.vge_rx_dmamap[i]);
 1361 
 1362                 /*
 1363                  * If the 'start of frame' bit is set, this indicates
 1364                  * either the first fragment in a multi-fragment receive,
 1365                  * or an intermediate fragment. Either way, we want to
 1366                  * accumulate the buffers.
 1367                  */
 1368                 if (rxstat & VGE_RXPKT_SOF) {
 1369                         m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
 1370                         if (sc->vge_head == NULL)
 1371                                 sc->vge_head = sc->vge_tail = m;
 1372                         else {
 1373                                 m->m_flags &= ~M_PKTHDR;
 1374                                 sc->vge_tail->m_next = m;
 1375                                 sc->vge_tail = m;
 1376                         }
 1377                         vge_newbuf(sc, i, NULL);
 1378                         VGE_RX_DESC_INC(i);
 1379                         continue;
 1380                 }
 1381 
 1382                 /*
 1383                  * Bad/error frames will have the RXOK bit cleared.
 1384                  * However, there's one error case we want to allow:
 1385                  * if a VLAN tagged frame arrives and the chip can't
 1386                  * match it against the CAM filter, it considers this
 1387                  * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
 1388                  * We don't want to drop the frame though: our VLAN
 1389                  * filtering is done in software.
 1390                  */
 1391                 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
 1392                     && !(rxstat & VGE_RDSTS_CSUMERR)) {
 1393                         ifp->if_ierrors++;
 1394                         /*
 1395                          * If this is part of a multi-fragment packet,
 1396                          * discard all the pieces.
 1397                          */
 1398                         if (sc->vge_head != NULL) {
 1399                                 m_freem(sc->vge_head);
 1400                                 sc->vge_head = sc->vge_tail = NULL;
 1401                         }
 1402                         vge_newbuf(sc, i, m);
 1403                         VGE_RX_DESC_INC(i);
 1404                         continue;
 1405                 }
 1406 
 1407                 /*
 1408                  * If allocating a replacement mbuf fails,
 1409                  * reload the current one.
 1410                  */
 1411 
 1412                 if (vge_newbuf(sc, i, NULL)) {
 1413                         ifp->if_ierrors++;
 1414                         if (sc->vge_head != NULL) {
 1415                                 m_freem(sc->vge_head);
 1416                                 sc->vge_head = sc->vge_tail = NULL;
 1417                         }
 1418                         vge_newbuf(sc, i, m);
 1419                         VGE_RX_DESC_INC(i);
 1420                         continue;
 1421                 }
 1422 
 1423                 VGE_RX_DESC_INC(i);
 1424 
 1425                 if (sc->vge_head != NULL) {
 1426                         m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
 1427                         /*
 1428                          * Special case: if there's 4 bytes or less
 1429                          * in this buffer, the mbuf can be discarded:
 1430                          * the last 4 bytes is the CRC, which we don't
 1431                          * care about anyway.
 1432                          */
 1433                         if (m->m_len <= ETHER_CRC_LEN) {
 1434                                 sc->vge_tail->m_len -=
 1435                                     (ETHER_CRC_LEN - m->m_len);
 1436                                 m_freem(m);
 1437                         } else {
 1438                                 m->m_len -= ETHER_CRC_LEN;
 1439                                 m->m_flags &= ~M_PKTHDR;
 1440                                 sc->vge_tail->m_next = m;
 1441                         }
 1442                         m = sc->vge_head;
 1443                         sc->vge_head = sc->vge_tail = NULL;
 1444                         m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
 1445                 } else
 1446                         m->m_pkthdr.len = m->m_len =
 1447                             (total_len - ETHER_CRC_LEN);
 1448 
 1449 #ifdef VGE_FIXUP_RX
 1450                 vge_fixup_rx(m);
 1451 #endif
 1452                 ifp->if_ipackets++;
 1453                 m->m_pkthdr.rcvif = ifp;
 1454 
 1455                 /* Do RX checksumming if enabled */
 1456                 if (ifp->if_capenable & IFCAP_RXCSUM) {
 1457 
 1458                         /* Check IP header checksum */
 1459                         if (rxctl & VGE_RDCTL_IPPKT)
 1460                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1461                         if (rxctl & VGE_RDCTL_IPCSUMOK)
 1462                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1463 
 1464                         /* Check TCP/UDP checksum */
 1465                         if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) &&
 1466                             rxctl & VGE_RDCTL_PROTOCSUMOK) {
 1467                                 m->m_pkthdr.csum_flags |=
 1468                                     CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
 1469                                 m->m_pkthdr.csum_data = 0xffff;
 1470                         }
 1471                 }
 1472 
 1473                 if (rxstat & VGE_RDSTS_VTAG)
 1474                         VLAN_INPUT_TAG(ifp, m,
 1475                             ntohs((rxctl & VGE_RDCTL_VLANID)), continue);
 1476 
 1477                 VGE_UNLOCK(sc);
 1478                 (*ifp->if_input)(ifp, m);
 1479                 VGE_LOCK(sc);
 1480 
 1481                 lim++;
 1482                 if (lim == VGE_RX_DESC_CNT)
 1483                         break;
 1484 
 1485         }
 1486 
 1487         /* Flush the RX DMA ring */
 1488 
 1489         bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
 1490             sc->vge_ldata.vge_rx_list_map,
 1491             BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
 1492 
 1493         sc->vge_ldata.vge_rx_prodidx = i;
 1494         CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
 1495 
 1496 
 1497         return;
 1498 }
 1499 
 1500 static void
 1501 vge_txeof(sc)
 1502         struct vge_softc                *sc;
 1503 {
 1504         struct ifnet            *ifp;
 1505         u_int32_t               txstat;
 1506         int                     idx;
 1507 
 1508         ifp = &sc->arpcom.ac_if;
 1509         idx = sc->vge_ldata.vge_tx_considx;
 1510 
 1511         /* Invalidate the TX descriptor list */
 1512 
 1513         bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
 1514             sc->vge_ldata.vge_tx_list_map,
 1515             BUS_DMASYNC_POSTREAD);
 1516 
 1517         while (idx != sc->vge_ldata.vge_tx_prodidx) {
 1518 
 1519                 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
 1520                 if (txstat & VGE_TDSTS_OWN)
 1521                         break;
 1522 
 1523                 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
 1524                 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
 1525                 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
 1526                     sc->vge_ldata.vge_tx_dmamap[idx]);
 1527                 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
 1528                         ifp->if_collisions++;
 1529                 if (txstat & VGE_TDSTS_TXERR)
 1530                         ifp->if_oerrors++;
 1531                 else
 1532                         ifp->if_opackets++;
 1533 
 1534                 sc->vge_ldata.vge_tx_free++;
 1535                 VGE_TX_DESC_INC(idx);
 1536         }
 1537 
 1538         /* No changes made to the TX ring, so no flush needed */
 1539 
 1540         if (idx != sc->vge_ldata.vge_tx_considx) {
 1541                 sc->vge_ldata.vge_tx_considx = idx;
 1542                 ifp->if_flags &= ~IFF_OACTIVE;
 1543                 ifp->if_timer = 0;
 1544         }
 1545 
 1546         /*
 1547          * If not all descriptors have been released reaped yet,
 1548          * reload the timer so that we will eventually get another
 1549          * interrupt that will cause us to re-enter this routine.
 1550          * This is done in case the transmitter has gone idle.
 1551          */
 1552         if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) {
 1553                 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
 1554                 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
 1555         }
 1556 
 1557         return;
 1558 }
 1559 
 1560 static void
 1561 vge_tick(xsc)
 1562         void                    *xsc;
 1563 {
 1564         struct vge_softc        *sc;
 1565         struct ifnet            *ifp;
 1566         struct mii_data         *mii;
 1567 
 1568         sc = xsc;
 1569         ifp = &sc->arpcom.ac_if;
 1570         VGE_LOCK(sc);
 1571         mii = device_get_softc(sc->vge_miibus);
 1572 
 1573         mii_tick(mii);
 1574         if (sc->vge_link) {
 1575                 if (!(mii->mii_media_status & IFM_ACTIVE)) {
 1576                         sc->vge_link = 0;
 1577 #ifdef LINK_STATE_UP
 1578                         sc->arpcom.ac_if.if_link_state = LINK_STATE_UP;
 1579                         rt_ifmsg(&(sc->arpcom.ac_if));
 1580 #endif /* LINK_STATE_UP */
 1581                 }
 1582         } else {
 1583                 if (mii->mii_media_status & IFM_ACTIVE &&
 1584                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
 1585                         sc->vge_link = 1;
 1586 #ifdef LINK_STATE_DOWN
 1587                         sc->arpcom.ac_if.if_link_state = LINK_STATE_DOWN;
 1588                         rt_ifmsg(&(sc->arpcom.ac_if));
 1589 #endif /* LINK_STATE_DOWN */
 1590 #if __FreeBSD_version < 502114
 1591                         if (ifp->if_snd.ifq_head != NULL)
 1592 #else
 1593                         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1594 #endif
 1595                                 taskqueue_enqueue(taskqueue_swi,
 1596                                     &sc->vge_txtask);
 1597                 }
 1598         }
 1599 
 1600         VGE_UNLOCK(sc);
 1601 
 1602         return;
 1603 }
 1604 
 1605 #ifdef DEVICE_POLLING
 1606 static void
 1607 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
 1608 {
 1609         struct vge_softc *sc = ifp->if_softc;
 1610 
 1611         VGE_LOCK(sc);
 1612 #ifdef IFCAP_POLLING
 1613         if (!(ifp->if_capenable & IFCAP_POLLING)) {
 1614                 ether_poll_deregister(ifp);
 1615                 cmd = POLL_DEREGISTER;
 1616         }
 1617 #endif
 1618         if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
 1619                 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
 1620                 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
 1621                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
 1622                 goto done;
 1623         }
 1624 
 1625         sc->rxcycles = count;
 1626         vge_rxeof(sc);
 1627         vge_txeof(sc);
 1628 
 1629 #if __FreeBSD_version < 502114
 1630         if (ifp->if_snd.ifq_head != NULL)
 1631 #else
 1632         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1633 #endif
 1634                 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
 1635 
 1636         if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
 1637                 u_int32_t       status;
 1638                 status = CSR_READ_4(sc, VGE_ISR);
 1639                 if (status == 0xFFFFFFFF)
 1640                         goto done;
 1641                 if (status)
 1642                         CSR_WRITE_4(sc, VGE_ISR, status);
 1643 
 1644                 /*
 1645                  * XXX check behaviour on receiver stalls.
 1646                  */
 1647 
 1648                 if (status & VGE_ISR_TXDMA_STALL ||
 1649                     status & VGE_ISR_RXDMA_STALL)
 1650                         vge_init(sc);
 1651 
 1652                 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
 1653                         vge_rxeof(sc);
 1654                         ifp->if_ierrors++;
 1655                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
 1656                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
 1657                 }
 1658         }
 1659 done:
 1660         VGE_UNLOCK(sc);
 1661 }
 1662 #endif /* DEVICE_POLLING */
 1663 
 1664 static void
 1665 vge_intr(arg)
 1666         void                    *arg;
 1667 {
 1668         struct vge_softc        *sc;
 1669         struct ifnet            *ifp;
 1670         u_int32_t               status;
 1671 
 1672         sc = arg;
 1673 
 1674         if (sc->suspended) {
 1675                 return;
 1676         }
 1677 
 1678         VGE_LOCK(sc);
 1679         ifp = &sc->arpcom.ac_if;
 1680 
 1681         if (!(ifp->if_flags & IFF_UP)) {
 1682                 VGE_UNLOCK(sc);
 1683                 return;
 1684         }
 1685 
 1686 #ifdef DEVICE_POLLING
 1687         if  (ifp->if_flags & IFF_POLLING)
 1688                 goto done;
 1689         if (
 1690 #ifdef IFCAP_POLLING
 1691             (ifp->if_capenable & IFCAP_POLLING) &&
 1692 #endif
 1693             ether_poll_register(vge_poll, ifp)) { /* ok, disable interrupts */
 1694                 CSR_WRITE_4(sc, VGE_IMR, 0);
 1695                 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
 1696                 vge_poll(ifp, 0, 1);
 1697                 goto done;
 1698         }
 1699 
 1700 #endif /* DEVICE_POLLING */
 1701 
 1702         /* Disable interrupts */
 1703         CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
 1704 
 1705         for (;;) {
 1706 
 1707                 status = CSR_READ_4(sc, VGE_ISR);
 1708                 /* If the card has gone away the read returns 0xffff. */
 1709                 if (status == 0xFFFFFFFF)
 1710                         break;
 1711 
 1712                 if (status)
 1713                         CSR_WRITE_4(sc, VGE_ISR, status);
 1714 
 1715                 if ((status & VGE_INTRS) == 0)
 1716                         break;
 1717 
 1718                 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
 1719                         vge_rxeof(sc);
 1720 
 1721                 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
 1722                         vge_rxeof(sc);
 1723                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
 1724                         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
 1725                 }
 1726 
 1727                 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
 1728                         vge_txeof(sc);
 1729 
 1730                 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
 1731                         vge_init(sc);
 1732 
 1733                 if (status & VGE_ISR_LINKSTS)
 1734                         vge_tick(sc);
 1735         }
 1736 
 1737         /* Re-enable interrupts */
 1738         CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
 1739 
 1740 #ifdef DEVICE_POLLING
 1741 done:
 1742 #endif
 1743         VGE_UNLOCK(sc);
 1744 
 1745 #if __FreeBSD_version < 502114
 1746         if (ifp->if_snd.ifq_head != NULL)
 1747 #else
 1748         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1749 #endif
 1750                 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
 1751 
 1752         return;
 1753 }
 1754 
 1755 static int
 1756 vge_encap(sc, m_head, idx)
 1757         struct vge_softc        *sc;
 1758         struct mbuf             *m_head;
 1759         int                     idx;
 1760 {
 1761         struct mbuf             *m_new = NULL;
 1762         struct vge_dmaload_arg  arg;
 1763         bus_dmamap_t            map;
 1764         int                     error;
 1765         struct m_tag            *mtag;
 1766 
 1767         if (sc->vge_ldata.vge_tx_free <= 2)
 1768                 return (EFBIG);
 1769 
 1770         arg.vge_flags = 0;
 1771 
 1772         if (m_head->m_pkthdr.csum_flags & CSUM_IP)
 1773                 arg.vge_flags |= VGE_TDCTL_IPCSUM;
 1774         if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
 1775                 arg.vge_flags |= VGE_TDCTL_TCPCSUM;
 1776         if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
 1777                 arg.vge_flags |= VGE_TDCTL_UDPCSUM;
 1778 
 1779         arg.sc = sc;
 1780         arg.vge_idx = idx;
 1781         arg.vge_m0 = m_head;
 1782         arg.vge_maxsegs = VGE_TX_FRAGS;
 1783 
 1784         map = sc->vge_ldata.vge_tx_dmamap[idx];
 1785         error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
 1786             m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
 1787 
 1788         if (error && error != EFBIG) {
 1789                 printf("vge%d: can't map mbuf (error %d)\n",
 1790                     sc->vge_unit, error);
 1791                 return (ENOBUFS);
 1792         }
 1793 
 1794         /* Too many segments to map, coalesce into a single mbuf */
 1795 
 1796         if (error || arg.vge_maxsegs == 0) {
 1797                 m_new = m_defrag(m_head, M_DONTWAIT);
 1798                 if (m_new == NULL)
 1799                         return (1);
 1800                 else
 1801                         m_head = m_new;
 1802 
 1803                 arg.sc = sc;
 1804                 arg.vge_m0 = m_head;
 1805                 arg.vge_idx = idx;
 1806                 arg.vge_maxsegs = 1;
 1807 
 1808                 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
 1809                     m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
 1810                 if (error) {
 1811                         printf("vge%d: can't map mbuf (error %d)\n",
 1812                             sc->vge_unit, error);
 1813                         return (EFBIG);
 1814                 }
 1815         }
 1816 
 1817         sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
 1818         sc->vge_ldata.vge_tx_free--;
 1819 
 1820         /*
 1821          * Set up hardware VLAN tagging.
 1822          */
 1823 
 1824         mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
 1825         if (mtag != NULL)
 1826                 sc->vge_ldata.vge_tx_list[idx].vge_ctl |=
 1827                     htole32(htons(VLAN_TAG_VALUE(mtag)) | VGE_TDCTL_VTAG);
 1828 
 1829         sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
 1830 
 1831         return (0);
 1832 }
 1833 
 1834 static void
 1835 vge_tx_task(arg, npending)
 1836         void                    *arg;
 1837         int                     npending;
 1838 {
 1839         struct ifnet            *ifp;
 1840 
 1841         ifp = arg;
 1842         vge_start(ifp);
 1843 
 1844         return;
 1845 }
 1846 
 1847 /*
 1848  * Main transmit routine.
 1849  */
 1850 
 1851 static void
 1852 vge_start(ifp)
 1853         struct ifnet            *ifp;
 1854 {
 1855         struct vge_softc        *sc;
 1856         struct mbuf             *m_head = NULL;
 1857         int                     idx, pidx = 0;
 1858 
 1859         sc = ifp->if_softc;
 1860         VGE_LOCK(sc);
 1861 
 1862         if (!sc->vge_link || ifp->if_flags & IFF_OACTIVE) {
 1863                 VGE_UNLOCK(sc);
 1864                 return;
 1865         }
 1866 
 1867 #if __FreeBSD_version < 502114
 1868         if (ifp->if_snd.ifq_head == NULL) {
 1869 #else
 1870         if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 1871 #endif
 1872                 VGE_UNLOCK(sc);
 1873                 return;
 1874         }
 1875 
 1876         idx = sc->vge_ldata.vge_tx_prodidx;
 1877 
 1878         pidx = idx - 1;
 1879         if (pidx < 0)
 1880                 pidx = VGE_TX_DESC_CNT - 1;
 1881 
 1882 
 1883         while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) {
 1884 #if __FreeBSD_version < 502114
 1885                 IF_DEQUEUE(&ifp->if_snd, m_head);
 1886 #else
 1887                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 1888 #endif
 1889                 if (m_head == NULL)
 1890                         break;
 1891 
 1892                 if (vge_encap(sc, m_head, idx)) {
 1893 #if __FreeBSD_version >= 502114
 1894                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 1895 #else
 1896                         IF_PREPEND(&ifp->if_snd, m_head);
 1897 #endif
 1898                         ifp->if_flags |= IFF_OACTIVE;
 1899                         break;
 1900                 }
 1901 
 1902                 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
 1903                     htole16(VGE_TXDESC_Q);
 1904 
 1905                 pidx = idx;
 1906                 VGE_TX_DESC_INC(idx);
 1907 
 1908                 /*
 1909                  * If there's a BPF listener, bounce a copy of this frame
 1910                  * to him.
 1911                  */
 1912                 BPF_MTAP(ifp, m_head);
 1913         }
 1914 
 1915         if (idx == sc->vge_ldata.vge_tx_prodidx) {
 1916                 VGE_UNLOCK(sc);
 1917                 return;
 1918         }
 1919 
 1920         /* Flush the TX descriptors */
 1921 
 1922         bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
 1923             sc->vge_ldata.vge_tx_list_map,
 1924             BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
 1925 
 1926         /* Issue a transmit command. */
 1927         CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
 1928 
 1929         sc->vge_ldata.vge_tx_prodidx = idx;
 1930 
 1931         /*
 1932          * Use the countdown timer for interrupt moderation.
 1933          * 'TX done' interrupts are disabled. Instead, we reset the
 1934          * countdown timer, which will begin counting until it hits
 1935          * the value in the SSTIMER register, and then trigger an
 1936          * interrupt. Each time we set the TIMER0_ENABLE bit, the
 1937          * the timer count is reloaded. Only when the transmitter
 1938          * is idle will the timer hit 0 and an interrupt fire.
 1939          */
 1940         CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
 1941 
 1942         VGE_UNLOCK(sc);
 1943 
 1944         /*
 1945          * Set a timeout in case the chip goes out to lunch.
 1946          */
 1947         ifp->if_timer = 5;
 1948 
 1949         return;
 1950 }
 1951 
 1952 static void
 1953 vge_init(xsc)
 1954         void                    *xsc;
 1955 {
 1956         struct vge_softc        *sc = xsc;
 1957         struct ifnet            *ifp = &sc->arpcom.ac_if;
 1958         struct mii_data         *mii;
 1959         int                     i;
 1960 
 1961         VGE_LOCK(sc);
 1962         mii = device_get_softc(sc->vge_miibus);
 1963 
 1964         /*
 1965          * Cancel pending I/O and free all RX/TX buffers.
 1966          */
 1967         vge_stop(sc);
 1968         vge_reset(sc);
 1969 
 1970         /*
 1971          * Initialize the RX and TX descriptors and mbufs.
 1972          */
 1973 
 1974         vge_rx_list_init(sc);
 1975         vge_tx_list_init(sc);
 1976 
 1977         /* Set our station address */
 1978         for (i = 0; i < ETHER_ADDR_LEN; i++)
 1979                 CSR_WRITE_1(sc, VGE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
 1980 
 1981         /*
 1982          * Set receive FIFO threshold. Also allow transmission and
 1983          * reception of VLAN tagged frames.
 1984          */
 1985         CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
 1986         CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
 1987 
 1988         /* Set DMA burst length */
 1989         CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
 1990         CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
 1991 
 1992         CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
 1993 
 1994         /* Set collision backoff algorithm */
 1995         CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
 1996             VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
 1997         CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
 1998 
 1999         /* Disable LPSEL field in priority resolution */
 2000         CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
 2001 
 2002         /*
 2003          * Load the addresses of the DMA queues into the chip.
 2004          * Note that we only use one transmit queue.
 2005          */
 2006 
 2007         CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
 2008             VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr));
 2009         CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
 2010 
 2011         CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
 2012             VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr));
 2013         CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
 2014         CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
 2015 
 2016         /* Enable and wake up the RX descriptor queue */
 2017         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
 2018         CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
 2019 
 2020         /* Enable the TX descriptor queue */
 2021         CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
 2022 
 2023         /* Set up the receive filter -- allow large frames for VLANs. */
 2024         CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
 2025 
 2026         /* If we want promiscuous mode, set the allframes bit. */
 2027         if (ifp->if_flags & IFF_PROMISC) {
 2028                 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
 2029         }
 2030 
 2031         /* Set capture broadcast bit to capture broadcast frames. */
 2032         if (ifp->if_flags & IFF_BROADCAST) {
 2033                 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
 2034         }
 2035 
 2036         /* Set multicast bit to capture multicast frames. */
 2037         if (ifp->if_flags & IFF_MULTICAST) {
 2038                 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
 2039         }
 2040 
 2041         /* Init the cam filter. */
 2042         vge_cam_clear(sc);
 2043 
 2044         /* Init the multicast filter. */
 2045         vge_setmulti(sc);
 2046 
 2047         /* Enable flow control */
 2048 
 2049         CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
 2050 
 2051         /* Enable jumbo frame reception (if desired) */
 2052 
 2053         /* Start the MAC. */
 2054         CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
 2055         CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
 2056         CSR_WRITE_1(sc, VGE_CRS0,
 2057             VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
 2058 
 2059         /*
 2060          * Configure one-shot timer for microsecond
 2061          * resulution and load it for 500 usecs.
 2062          */
 2063         CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
 2064         CSR_WRITE_2(sc, VGE_SSTIMER, 400);
 2065 
 2066         /*
 2067          * Configure interrupt moderation for receive. Enable
 2068          * the holdoff counter and load it, and set the RX
 2069          * suppression count to the number of descriptors we
 2070          * want to allow before triggering an interrupt.
 2071          * The holdoff timer is in units of 20 usecs.
 2072          */
 2073 
 2074 #ifdef notyet
 2075         CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
 2076         /* Select the interrupt holdoff timer page. */
 2077         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
 2078         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
 2079         CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
 2080 
 2081         /* Enable use of the holdoff timer. */
 2082         CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
 2083         CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
 2084 
 2085         /* Select the RX suppression threshold page. */
 2086         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
 2087         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
 2088         CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
 2089 
 2090         /* Restore the page select bits. */
 2091         CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
 2092         CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
 2093 #endif
 2094 
 2095 #ifdef DEVICE_POLLING
 2096         /*
 2097          * Disable interrupts if we are polling.
 2098          */
 2099         if (ifp->if_flags & IFF_POLLING) {
 2100                 CSR_WRITE_4(sc, VGE_IMR, 0);
 2101                 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
 2102         } else  /* otherwise ... */
 2103 #endif /* DEVICE_POLLING */
 2104         {
 2105         /*
 2106          * Enable interrupts.
 2107          */
 2108                 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
 2109                 CSR_WRITE_4(sc, VGE_ISR, 0);
 2110                 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
 2111         }
 2112 
 2113         mii_mediachg(mii);
 2114 
 2115         ifp->if_flags |= IFF_RUNNING;
 2116         ifp->if_flags &= ~IFF_OACTIVE;
 2117 
 2118         sc->vge_if_flags = 0;
 2119         sc->vge_link = 0;
 2120 
 2121         VGE_UNLOCK(sc);
 2122 
 2123         return;
 2124 }
 2125 
 2126 /*
 2127  * Set media options.
 2128  */
 2129 static int
 2130 vge_ifmedia_upd(ifp)
 2131         struct ifnet            *ifp;
 2132 {
 2133         struct vge_softc        *sc;
 2134         struct mii_data         *mii;
 2135 
 2136         sc = ifp->if_softc;
 2137         mii = device_get_softc(sc->vge_miibus);
 2138         mii_mediachg(mii);
 2139 
 2140         return (0);
 2141 }
 2142 
 2143 /*
 2144  * Report current media status.
 2145  */
 2146 static void
 2147 vge_ifmedia_sts(ifp, ifmr)
 2148         struct ifnet            *ifp;
 2149         struct ifmediareq       *ifmr;
 2150 {
 2151         struct vge_softc        *sc;
 2152         struct mii_data         *mii;
 2153 
 2154         sc = ifp->if_softc;
 2155         mii = device_get_softc(sc->vge_miibus);
 2156 
 2157         mii_pollstat(mii);
 2158         ifmr->ifm_active = mii->mii_media_active;
 2159         ifmr->ifm_status = mii->mii_media_status;
 2160 
 2161         return;
 2162 }
 2163 
 2164 static void
 2165 vge_miibus_statchg(dev)
 2166         device_t                dev;
 2167 {
 2168         struct vge_softc        *sc;
 2169         struct mii_data         *mii;
 2170         struct ifmedia_entry    *ife;
 2171 
 2172         sc = device_get_softc(dev);
 2173         mii = device_get_softc(sc->vge_miibus);
 2174         ife = mii->mii_media.ifm_cur;
 2175 
 2176         /*
 2177          * If the user manually selects a media mode, we need to turn
 2178          * on the forced MAC mode bit in the DIAGCTL register. If the
 2179          * user happens to choose a full duplex mode, we also need to
 2180          * set the 'force full duplex' bit. This applies only to
 2181          * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
 2182          * mode is disabled, and in 1000baseT mode, full duplex is
 2183          * always implied, so we turn on the forced mode bit but leave
 2184          * the FDX bit cleared.
 2185          */
 2186 
 2187         switch (IFM_SUBTYPE(ife->ifm_media)) {
 2188         case IFM_AUTO:
 2189                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 2190                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 2191                 break;
 2192         case IFM_1000_T:
 2193                 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 2194                 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 2195                 break;
 2196         case IFM_100_TX:
 2197         case IFM_10_T:
 2198                 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
 2199                 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
 2200                         CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 2201                 } else {
 2202                         CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
 2203                 }
 2204                 break;
 2205         default:
 2206                 device_printf(dev, "unknown media type: %x\n",
 2207                     IFM_SUBTYPE(ife->ifm_media));
 2208                 break;
 2209         }
 2210 
 2211         return;
 2212 }
 2213 
 2214 static int
 2215 vge_ioctl(ifp, command, data)
 2216         struct ifnet            *ifp;
 2217         u_long                  command;
 2218         caddr_t                 data;
 2219 {
 2220         struct vge_softc        *sc = ifp->if_softc;
 2221         struct ifreq            *ifr = (struct ifreq *) data;
 2222         struct mii_data         *mii;
 2223         int                     error = 0;
 2224 
 2225         switch (command) {
 2226         case SIOCSIFMTU:
 2227                 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
 2228                         error = EINVAL;
 2229                 ifp->if_mtu = ifr->ifr_mtu;
 2230                 break;
 2231         case SIOCSIFFLAGS:
 2232                 if (ifp->if_flags & IFF_UP) {
 2233                         if (ifp->if_flags & IFF_RUNNING &&
 2234                             ifp->if_flags & IFF_PROMISC &&
 2235                             !(sc->vge_if_flags & IFF_PROMISC)) {
 2236                                 CSR_SETBIT_1(sc, VGE_RXCTL,
 2237                                     VGE_RXCTL_RX_PROMISC);
 2238                                 vge_setmulti(sc);
 2239                         } else if (ifp->if_flags & IFF_RUNNING &&
 2240                             !(ifp->if_flags & IFF_PROMISC) &&
 2241                             sc->vge_if_flags & IFF_PROMISC) {
 2242                                 CSR_CLRBIT_1(sc, VGE_RXCTL,
 2243                                     VGE_RXCTL_RX_PROMISC);
 2244                                 vge_setmulti(sc);
 2245                         } else
 2246                                 vge_init(sc);
 2247                 } else {
 2248                         if (ifp->if_flags & IFF_RUNNING)
 2249                                 vge_stop(sc);
 2250                 }
 2251                 sc->vge_if_flags = ifp->if_flags;
 2252                 break;
 2253         case SIOCADDMULTI:
 2254         case SIOCDELMULTI:
 2255                 vge_setmulti(sc);
 2256                 break;
 2257         case SIOCGIFMEDIA:
 2258         case SIOCSIFMEDIA:
 2259                 mii = device_get_softc(sc->vge_miibus);
 2260                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
 2261                 break;
 2262         case SIOCSIFCAP:
 2263 #ifdef IFCAP_POLLING
 2264                 ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_POLLING);
 2265 #else
 2266                 ifp->if_capenable &= ~(IFCAP_HWCSUM);
 2267 #endif
 2268                 ifp->if_capenable |=
 2269 #ifdef IFCAP_POLLING
 2270                     ifr->ifr_reqcap & (IFCAP_HWCSUM | IFCAP_POLLING);
 2271 #else
 2272                     ifr->ifr_reqcap & (IFCAP_HWCSUM);
 2273 #endif
 2274                 if (ifp->if_capenable & IFCAP_TXCSUM)
 2275                         ifp->if_hwassist = VGE_CSUM_FEATURES;
 2276                 else
 2277                         ifp->if_hwassist = 0;
 2278                 if (ifp->if_flags & IFF_RUNNING)
 2279                         vge_init(sc);
 2280                 break;
 2281         default:
 2282                 error = ether_ioctl(ifp, command, data);
 2283                 break;
 2284         }
 2285 
 2286         return (error);
 2287 }
 2288 
 2289 static void
 2290 vge_watchdog(ifp)
 2291         struct ifnet            *ifp;
 2292 {
 2293         struct vge_softc                *sc;
 2294 
 2295         sc = ifp->if_softc;
 2296         VGE_LOCK(sc);
 2297         printf("vge%d: watchdog timeout\n", sc->vge_unit);
 2298         ifp->if_oerrors++;
 2299 
 2300         vge_txeof(sc);
 2301         vge_rxeof(sc);
 2302 
 2303         vge_init(sc);
 2304 
 2305         VGE_UNLOCK(sc);
 2306 
 2307         return;
 2308 }
 2309 
 2310 /*
 2311  * Stop the adapter and free any mbufs allocated to the
 2312  * RX and TX lists.
 2313  */
 2314 static void
 2315 vge_stop(sc)
 2316         struct vge_softc                *sc;
 2317 {
 2318         register int            i;
 2319         struct ifnet            *ifp;
 2320 
 2321         VGE_LOCK(sc);
 2322         ifp = &sc->arpcom.ac_if;
 2323         ifp->if_timer = 0;
 2324 
 2325         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 2326 #ifdef DEVICE_POLLING
 2327         ether_poll_deregister(ifp);
 2328 #endif /* DEVICE_POLLING */
 2329 
 2330         CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
 2331         CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
 2332         CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
 2333         CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
 2334         CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
 2335         CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
 2336 
 2337         if (sc->vge_head != NULL) {
 2338                 m_freem(sc->vge_head);
 2339                 sc->vge_head = sc->vge_tail = NULL;
 2340         }
 2341 
 2342         /* Free the TX list buffers. */
 2343 
 2344         for (i = 0; i < VGE_TX_DESC_CNT; i++) {
 2345                 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
 2346                         bus_dmamap_unload(sc->vge_ldata.vge_mtag,
 2347                             sc->vge_ldata.vge_tx_dmamap[i]);
 2348                         m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
 2349                         sc->vge_ldata.vge_tx_mbuf[i] = NULL;
 2350                 }
 2351         }
 2352 
 2353         /* Free the RX list buffers. */
 2354 
 2355         for (i = 0; i < VGE_RX_DESC_CNT; i++) {
 2356                 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
 2357                         bus_dmamap_unload(sc->vge_ldata.vge_mtag,
 2358                             sc->vge_ldata.vge_rx_dmamap[i]);
 2359                         m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
 2360                         sc->vge_ldata.vge_rx_mbuf[i] = NULL;
 2361                 }
 2362         }
 2363 
 2364         VGE_UNLOCK(sc);
 2365 
 2366         return;
 2367 }
 2368 
 2369 /*
 2370  * Device suspend routine.  Stop the interface and save some PCI
 2371  * settings in case the BIOS doesn't restore them properly on
 2372  * resume.
 2373  */
 2374 static int
 2375 vge_suspend(dev)
 2376         device_t                dev;
 2377 {
 2378         struct vge_softc        *sc;
 2379         int                     i;
 2380 
 2381         sc = device_get_softc(dev);
 2382 
 2383         vge_stop(sc);
 2384 
 2385         for (i = 0; i < 5; i++)
 2386                 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
 2387         sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
 2388         sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
 2389         sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
 2390         sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
 2391 
 2392         sc->suspended = 1;
 2393 
 2394         return (0);
 2395 }
 2396 
 2397 /*
 2398  * Device resume routine.  Restore some PCI settings in case the BIOS
 2399  * doesn't, re-enable busmastering, and restart the interface if
 2400  * appropriate.
 2401  */
 2402 static int
 2403 vge_resume(dev)
 2404         device_t                dev;
 2405 {
 2406         struct vge_softc        *sc;
 2407         struct ifnet            *ifp;
 2408         int                     i;
 2409 
 2410         sc = device_get_softc(dev);
 2411         ifp = &sc->arpcom.ac_if;
 2412 
 2413         /* better way to do this? */
 2414         for (i = 0; i < 5; i++)
 2415                 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
 2416         pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
 2417         pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
 2418         pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
 2419         pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
 2420 
 2421         /* reenable busmastering */
 2422         pci_enable_busmaster(dev);
 2423         pci_enable_io(dev, SYS_RES_MEMORY);
 2424 
 2425         /* reinitialize interface if necessary */
 2426         if (ifp->if_flags & IFF_UP)
 2427                 vge_init(sc);
 2428 
 2429         sc->suspended = 0;
 2430 
 2431         return (0);
 2432 }
 2433 
 2434 /*
 2435  * Stop all chip I/O so that the kernel's probe routines don't
 2436  * get confused by errant DMAs when rebooting.
 2437  */
 2438 static void
 2439 vge_shutdown(dev)
 2440         device_t                dev;
 2441 {
 2442         struct vge_softc                *sc;
 2443 
 2444         sc = device_get_softc(dev);
 2445 
 2446         vge_stop(sc);
 2447 }

Cache object: ff4c6f9ec9d97435fe7bde2a6dae61cd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.