The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/pci/if_re.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: if_re.c,v 1.4.2.5 2004/06/21 17:20:08 tron Exp $       */
    2 /*
    3  * Copyright (c) 1997, 1998-2003
    4  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. All advertising materials mentioning features or use of this software
   15  *    must display the following acknowledgement:
   16  *      This product includes software developed by Bill Paul.
   17  * 4. Neither the name of the author nor the names of any co-contributors
   18  *    may be used to endorse or promote products derived from this software
   19  *    without specific prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   31  * THE POSSIBILITY OF SUCH DAMAGE.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 /* $FreeBSD: /repoman/r/ncvs/src/sys/dev/re/if_re.c,v 1.20 2004/04/11 20:34:08 ru Exp $ */
   36 
   37 /*
   38  * RealTek 8139C+/8169/8169S/8110S PCI NIC driver
   39  *
   40  * Written by Bill Paul <wpaul@windriver.com>
   41  * Senior Networking Software Engineer
   42  * Wind River Systems
   43  */
   44 
   45 /*
   46  * This driver is designed to support RealTek's next generation of
   47  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
   48  * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S
   49  * and the RTL8110S.
   50  *
   51  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
   52  * with the older 8139 family, however it also supports a special
   53  * C+ mode of operation that provides several new performance enhancing
   54  * features. These include:
   55  *
   56  *      o Descriptor based DMA mechanism. Each descriptor represents
   57  *        a single packet fragment. Data buffers may be aligned on
   58  *        any byte boundary.
   59  *
   60  *      o 64-bit DMA
   61  *
   62  *      o TCP/IP checksum offload for both RX and TX
   63  *
   64  *      o High and normal priority transmit DMA rings
   65  *
   66  *      o VLAN tag insertion and extraction
   67  *
   68  *      o TCP large send (segmentation offload)
   69  *
   70  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
   71  * programming API is fairly straightforward. The RX filtering, EEPROM
   72  * access and PHY access is the same as it is on the older 8139 series
   73  * chips.
   74  *
   75  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
   76  * same programming API and feature set as the 8139C+ with the following
   77  * differences and additions:
   78  *
   79  *      o 1000Mbps mode
   80  *
   81  *      o Jumbo frames
   82  *
   83  *      o GMII and TBI ports/registers for interfacing with copper
   84  *        or fiber PHYs
   85  *
   86  *      o RX and TX DMA rings can have up to 1024 descriptors
   87  *        (the 8139C+ allows a maximum of 64)
   88  *
   89  *      o Slight differences in register layout from the 8139C+
   90  *
   91  * The TX start and timer interrupt registers are at different locations
   92  * on the 8169 than they are on the 8139C+. Also, the status word in the
   93  * RX descriptor has a slightly different bit layout. The 8169 does not
   94  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
   95  * copper gigE PHY.
   96  *
   97  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
   98  * (the 'S' stands for 'single-chip'). These devices have the same
   99  * programming API as the older 8169, but also have some vendor-specific
  100  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
  101  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
  102  * 
  103  * This driver takes advantage of the RX and TX checksum offload and
  104  * VLAN tag insertion/extraction features. It also implements TX
  105  * interrupt moderation using the timer interrupt registers, which
  106  * significantly reduces TX interrupt load. There is also support
  107  * for jumbo frames, however the 8169/8169S/8110S can not transmit
  108  * jumbo frames larger than 7.5K, so the max MTU possible with this
  109  * driver is 7500 bytes.
  110  */
  111 
  112 #include "bpfilter.h"
  113 #include "vlan.h"
  114 
  115 #include <sys/param.h>
  116 #include <sys/endian.h>
  117 #include <sys/systm.h>
  118 #include <sys/sockio.h>
  119 #include <sys/mbuf.h>
  120 #include <sys/malloc.h>
  121 #include <sys/kernel.h>
  122 #include <sys/socket.h>
  123 #include <sys/device.h>
  124 
  125 #include <net/if.h>
  126 #include <net/if_arp.h>
  127 #include <net/if_dl.h>
  128 #include <net/if_ether.h>
  129 #include <net/if_media.h>
  130 #include <net/if_vlanvar.h>
  131 
  132 #if NBPFILTER > 0
  133 #include <net/bpf.h>
  134 #endif
  135 
  136 #include <machine/bus.h>
  137 
  138 #include <dev/mii/mii.h>
  139 #include <dev/mii/miivar.h>
  140 
  141 #include <dev/pci/pcireg.h>
  142 #include <dev/pci/pcivar.h>
  143 #include <dev/pci/pcidevs.h>
  144 
  145 /*
  146  * Default to using PIO access for this driver.
  147  */
  148 #define RE_USEIOSPACE
  149 
  150 #include <dev/ic/rtl81x9reg.h>
  151 #include <dev/ic/rtl81x9var.h>
  152 
  153 struct re_pci_softc {
  154         struct rtk_softc sc_rtk;
  155 
  156         void *sc_ih;
  157         pci_chipset_tag_t sc_pc;
  158         pcitag_t sc_pcitag;
  159 };
  160 
  161 /*
  162  * Various supported device vendors/types and their names.
  163  */
  164 static struct rtk_type re_devs[] = {
  165         { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8139, RTK_HWREV_8139CPLUS,
  166                 "RealTek 8139C+ 10/100BaseTX" },
  167         { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, RTK_HWREV_8169,
  168                 "RealTek 8169 Gigabit Ethernet" },
  169         { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, RTK_HWREV_8169S,
  170                 "RealTek 8169S Single-chip Gigabit Ethernet" },
  171         { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, RTK_HWREV_8110S,
  172                 "RealTek 8110S Single-chip Gigabit Ethernet" },
  173         { 0, 0, 0, NULL }
  174 };
  175 
  176 static struct rtk_hwrev re_hwrevs[] = {
  177         { RTK_HWREV_8139, RTK_8139,  "" },
  178         { RTK_HWREV_8139A, RTK_8139, "A" },
  179         { RTK_HWREV_8139AG, RTK_8139, "A-G" },
  180         { RTK_HWREV_8139B, RTK_8139, "B" },
  181         { RTK_HWREV_8130, RTK_8139, "8130" },
  182         { RTK_HWREV_8139C, RTK_8139, "C" },
  183         { RTK_HWREV_8139D, RTK_8139, "8139D/8100B/8100C" },
  184         { RTK_HWREV_8139CPLUS, RTK_8139CPLUS, "C+"},
  185         { RTK_HWREV_8169, RTK_8169, "8169"},
  186         { RTK_HWREV_8169S, RTK_8169, "8169S"},
  187         { RTK_HWREV_8110S, RTK_8169, "8110S"},
  188         { RTK_HWREV_8100, RTK_8139, "8100"},
  189         { RTK_HWREV_8101, RTK_8139, "8101"},
  190         { 0, 0, NULL }
  191 };
  192 
  193 int re_probe(struct device *, struct cfdata *, void *);
  194 void re_attach(struct device *, struct device *, void *);
  195 #if 0
  196 int re_detach(struct device *, int);
  197 #endif
  198 
  199 static int re_encap             (struct rtk_softc *, struct mbuf *, int *);
  200 
  201 static int re_allocmem          (struct rtk_softc *);
  202 static int re_newbuf            (struct rtk_softc *, int, struct mbuf *);
  203 static int re_rx_list_init      (struct rtk_softc *);
  204 static int re_tx_list_init      (struct rtk_softc *);
  205 static void re_rxeof            (struct rtk_softc *);
  206 static void re_txeof            (struct rtk_softc *);
  207 static int re_intr              (void *);
  208 static void re_tick             (void *);
  209 static void re_start            (struct ifnet *);
  210 static int re_ioctl             (struct ifnet *, u_long, caddr_t);
  211 static int re_init              (struct ifnet *);
  212 static void re_stop             (struct rtk_softc *);
  213 static void re_watchdog         (struct ifnet *);
  214 #if 0
  215 static int re_suspend           (device_t);
  216 static int re_resume            (device_t);
  217 static void re_shutdown         (device_t);
  218 #endif
  219 static int re_ifmedia_upd       (struct ifnet *);
  220 static void re_ifmedia_sts      (struct ifnet *, struct ifmediareq *);
  221 
  222 static int re_gmii_readreg      (struct device *, int, int);
  223 static void re_gmii_writereg    (struct device *, int, int, int);
  224 
  225 static int re_miibus_readreg    (struct device *, int, int);
  226 static void re_miibus_writereg  (struct device *, int, int, int);
  227 static void re_miibus_statchg   (struct device *);
  228 
  229 static void re_reset            (struct rtk_softc *);
  230 
  231 static int re_diag              (struct rtk_softc *);
  232 
  233 #ifdef RE_USEIOSPACE
  234 #define RTK_RES                 SYS_RES_IOPORT
  235 #define RTK_RID                 RTK_PCI_LOIO
  236 #else
  237 #define RTK_RES                 SYS_RES_MEMORY
  238 #define RTK_RID                 RTK_PCI_LOMEM
  239 #endif
  240 
  241 CFATTACH_DECL(re, sizeof(struct re_pci_softc), re_probe, re_attach, NULL,
  242     NULL);
  243 
  244 #define EE_SET(x)                                       \
  245         CSR_WRITE_1(sc, RTK_EECMD,                      \
  246                 CSR_READ_1(sc, RTK_EECMD) | x)
  247 
  248 #define EE_CLR(x)                                       \
  249         CSR_WRITE_1(sc, RTK_EECMD,                      \
  250                 CSR_READ_1(sc, RTK_EECMD) & ~x)
  251 
  252 
  253 static int
  254 re_gmii_readreg(struct device *self, int phy, int reg)
  255 {
  256         struct rtk_softc        *sc = (void *)self;
  257         u_int32_t               rval;
  258         int                     i;
  259 
  260         if (phy != 7)
  261                 return(0);
  262 
  263         /* Let the rgephy driver read the GMEDIASTAT register */
  264 
  265         if (reg == RTK_GMEDIASTAT) {
  266                 rval = CSR_READ_1(sc, RTK_GMEDIASTAT);
  267                 return(rval);
  268         }
  269 
  270         CSR_WRITE_4(sc, RTK_PHYAR, reg << 16);
  271         DELAY(1000);
  272 
  273         for (i = 0; i < RTK_TIMEOUT; i++) {
  274                 rval = CSR_READ_4(sc, RTK_PHYAR);
  275                 if (rval & RTK_PHYAR_BUSY)
  276                         break;
  277                 DELAY(100);
  278         }
  279 
  280         if (i == RTK_TIMEOUT) {
  281                 printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname);
  282                 return (0);
  283         }
  284 
  285         return (rval & RTK_PHYAR_PHYDATA);
  286 }
  287 
  288 static void
  289 re_gmii_writereg(struct device *dev, int phy, int reg, int data)
  290 {
  291         struct rtk_softc        *sc = (void *)dev;
  292         u_int32_t               rval;
  293         int                     i;
  294 
  295         CSR_WRITE_4(sc, RTK_PHYAR, (reg << 16) |
  296             (data & RTK_PHYAR_PHYDATA) | RTK_PHYAR_BUSY);
  297         DELAY(1000);
  298 
  299         for (i = 0; i < RTK_TIMEOUT; i++) {
  300                 rval = CSR_READ_4(sc, RTK_PHYAR);
  301                 if (!(rval & RTK_PHYAR_BUSY))
  302                         break;
  303                 DELAY(100);
  304         }
  305 
  306         if (i == RTK_TIMEOUT) {
  307                 printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname);
  308                 return;
  309         }
  310 
  311         return;
  312 }
  313 
  314 static int
  315 re_miibus_readreg(struct device *dev, int phy, int reg)
  316 {
  317         struct rtk_softc        *sc = (void *)dev;
  318         u_int16_t               rval = 0;
  319         u_int16_t               re8139_reg = 0;
  320         int                     s;
  321 
  322         s = splnet();
  323 
  324         if (sc->rtk_type == RTK_8169) {
  325                 rval = re_gmii_readreg(dev, phy, reg);
  326                 splx(s);
  327                 return (rval);
  328         }
  329 
  330         /* Pretend the internal PHY is only at address 0 */
  331         if (phy) {
  332                 splx(s);
  333                 return(0);
  334         }
  335         switch(reg) {
  336         case MII_BMCR:
  337                 re8139_reg = RTK_BMCR;
  338                 break;
  339         case MII_BMSR:
  340                 re8139_reg = RTK_BMSR;
  341                 break;
  342         case MII_ANAR:
  343                 re8139_reg = RTK_ANAR;
  344                 break;
  345         case MII_ANER:
  346                 re8139_reg = RTK_ANER;
  347                 break;
  348         case MII_ANLPAR:
  349                 re8139_reg = RTK_LPAR;
  350                 break;
  351         case MII_PHYIDR1:
  352         case MII_PHYIDR2:
  353                 splx(s);
  354                 return(0);
  355         /*
  356          * Allow the rlphy driver to read the media status
  357          * register. If we have a link partner which does not
  358          * support NWAY, this is the register which will tell
  359          * us the results of parallel detection.
  360          */
  361         case RTK_MEDIASTAT:
  362                 rval = CSR_READ_1(sc, RTK_MEDIASTAT);
  363                 splx(s);
  364                 return(rval);
  365         default:
  366                 printf("%s: bad phy register\n", sc->sc_dev.dv_xname);
  367                 splx(s);
  368                 return(0);
  369         }
  370         rval = CSR_READ_2(sc, re8139_reg);
  371         splx(s);
  372         return(rval);
  373 }
  374 
  375 static void
  376 re_miibus_writereg(struct device *dev, int phy, int reg, int data)
  377 {
  378         struct rtk_softc        *sc = (void *)dev;
  379         u_int16_t               re8139_reg = 0;
  380         int                     s;
  381 
  382         s = splnet();
  383 
  384         if (sc->rtk_type == RTK_8169) {
  385                 re_gmii_writereg(dev, phy, reg, data);
  386                 splx(s);
  387                 return;
  388         }
  389 
  390         /* Pretend the internal PHY is only at address 0 */
  391         if (phy) {
  392                 splx(s);
  393                 return;
  394         }
  395         switch(reg) {
  396         case MII_BMCR:
  397                 re8139_reg = RTK_BMCR;
  398                 break;
  399         case MII_BMSR:
  400                 re8139_reg = RTK_BMSR;
  401                 break;
  402         case MII_ANAR:
  403                 re8139_reg = RTK_ANAR;
  404                 break;
  405         case MII_ANER:
  406                 re8139_reg = RTK_ANER;
  407                 break;
  408         case MII_ANLPAR:
  409                 re8139_reg = RTK_LPAR;
  410                 break;
  411         case MII_PHYIDR1:
  412         case MII_PHYIDR2:
  413                 splx(s);
  414                 return;
  415                 break;
  416         default:
  417                 printf("%s: bad phy register\n", sc->sc_dev.dv_xname);
  418                 splx(s);
  419                 return;
  420         }
  421         CSR_WRITE_2(sc, re8139_reg, data);
  422         splx(s);
  423         return;
  424 }
  425 
  426 static void
  427 re_miibus_statchg(struct device *dev)
  428 {
  429 
  430         return;
  431 }
  432 
  433 static void
  434 re_reset(sc)
  435         struct rtk_softc        *sc;
  436 {
  437         register int            i;
  438 
  439         CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
  440 
  441         for (i = 0; i < RTK_TIMEOUT; i++) {
  442                 DELAY(10);
  443                 if (!(CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET))
  444                         break;
  445         }
  446         if (i == RTK_TIMEOUT)
  447                 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
  448 
  449         CSR_WRITE_1(sc, 0x82, 1);
  450 
  451         return;
  452 }
  453 
  454 /*
  455  * The following routine is designed to test for a defect on some
  456  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
  457  * lines connected to the bus, however for a 32-bit only card, they
  458  * should be pulled high. The result of this defect is that the
  459  * NIC will not work right if you plug it into a 64-bit slot: DMA
  460  * operations will be done with 64-bit transfers, which will fail
  461  * because the 64-bit data lines aren't connected.
  462  *
  463  * There's no way to work around this (short of talking a soldering
  464  * iron to the board), however we can detect it. The method we use
  465  * here is to put the NIC into digital loopback mode, set the receiver
  466  * to promiscuous mode, and then try to send a frame. We then compare
  467  * the frame data we sent to what was received. If the data matches,
  468  * then the NIC is working correctly, otherwise we know the user has
  469  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
  470  * slot. In the latter case, there's no way the NIC can work correctly,
  471  * so we print out a message on the console and abort the device attach.
  472  */
  473 
  474 static int
  475 re_diag(sc)
  476         struct rtk_softc        *sc;
  477 {
  478         struct ifnet            *ifp = &sc->ethercom.ec_if;
  479         struct mbuf             *m0;
  480         struct ether_header     *eh;
  481         struct rtk_desc         *cur_rx;
  482         bus_dmamap_t            dmamap;
  483         u_int16_t               status;
  484         u_int32_t               rxstat;
  485         int                     total_len, i, s, error = 0;
  486         u_int8_t                dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
  487         u_int8_t                src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
  488 
  489         /* Allocate a single mbuf */
  490 
  491         MGETHDR(m0, M_DONTWAIT, MT_DATA);
  492         if (m0 == NULL)
  493                 return(ENOBUFS);
  494 
  495         /*
  496          * Initialize the NIC in test mode. This sets the chip up
  497          * so that it can send and receive frames, but performs the
  498          * following special functions:
  499          * - Puts receiver in promiscuous mode
  500          * - Enables digital loopback mode
  501          * - Leaves interrupts turned off
  502          */
  503 
  504         ifp->if_flags |= IFF_PROMISC;
  505         sc->rtk_testmode = 1;
  506         re_init(ifp);
  507         re_stop(sc);
  508         DELAY(100000);
  509         re_init(ifp);
  510 
  511         /* Put some data in the mbuf */
  512 
  513         eh = mtod(m0, struct ether_header *);
  514         bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
  515         bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
  516         eh->ether_type = htons(ETHERTYPE_IP);
  517         m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
  518 
  519         /*
  520          * Queue the packet, start transmission.
  521          */
  522 
  523         CSR_WRITE_2(sc, RTK_ISR, 0xFFFF);
  524         s = splnet();
  525         IF_ENQUEUE(&ifp->if_snd, m0);
  526         re_start(ifp);
  527         splx(s);
  528         m0 = NULL;
  529 
  530         /* Wait for it to propagate through the chip */
  531 
  532         DELAY(100000);
  533         for (i = 0; i < RTK_TIMEOUT; i++) {
  534                 status = CSR_READ_2(sc, RTK_ISR);
  535                 if ((status & (RTK_ISR_TIMEOUT_EXPIRED|RTK_ISR_RX_OK)) ==
  536                     (RTK_ISR_TIMEOUT_EXPIRED|RTK_ISR_RX_OK))
  537                         break;
  538                 DELAY(10);
  539         }
  540         if (i == RTK_TIMEOUT) {
  541                 printf("%s: diagnostic failed, failed to receive packet "
  542                     "in loopback mode\n", sc->sc_dev.dv_xname);
  543                 error = EIO;
  544                 goto done;
  545         }
  546 
  547         /*
  548          * The packet should have been dumped into the first
  549          * entry in the RX DMA ring. Grab it from there.
  550          */
  551 
  552         dmamap = sc->rtk_ldata.rtk_rx_list_map;
  553         bus_dmamap_sync(sc->sc_dmat,
  554             dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
  555         dmamap = sc->rtk_ldata.rtk_rx_dmamap[0];
  556         bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
  557             BUS_DMASYNC_POSTWRITE);
  558         bus_dmamap_unload(sc->sc_dmat,
  559             sc->rtk_ldata.rtk_rx_dmamap[0]);
  560 
  561         m0 = sc->rtk_ldata.rtk_rx_mbuf[0];
  562         sc->rtk_ldata.rtk_rx_mbuf[0] = NULL;
  563         eh = mtod(m0, struct ether_header *);
  564 
  565         cur_rx = &sc->rtk_ldata.rtk_rx_list[0];
  566         total_len = RTK_RXBYTES(cur_rx);
  567         rxstat = le32toh(cur_rx->rtk_cmdstat);
  568 
  569         if (total_len != ETHER_MIN_LEN) {
  570                 printf("%s: diagnostic failed, received short packet\n",
  571                     sc->sc_dev.dv_xname);
  572                 error = EIO;
  573                 goto done;
  574         }
  575 
  576         /* Test that the received packet data matches what we sent. */
  577 
  578         if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
  579             bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
  580             ntohs(eh->ether_type) != ETHERTYPE_IP) {
  581                 printf("%s: WARNING, DMA FAILURE!\n", sc->sc_dev.dv_xname);
  582                 printf("%s: expected TX data: %s",
  583                     sc->sc_dev.dv_xname, ether_sprintf(dst));
  584                 printf("/%s/0x%x\n", ether_sprintf(src), ETHERTYPE_IP);
  585                 printf("%s: received RX data: %s",
  586                     sc->sc_dev.dv_xname,
  587                     ether_sprintf(eh->ether_dhost));
  588                 printf("/%s/0x%x\n", ether_sprintf(eh->ether_shost),
  589                     ntohs(eh->ether_type));
  590                 printf("%s: You may have a defective 32-bit NIC plugged "
  591                     "into a 64-bit PCI slot.\n", sc->sc_dev.dv_xname);
  592                 printf("%s: Please re-install the NIC in a 32-bit slot "
  593                     "for proper operation.\n", sc->sc_dev.dv_xname);
  594                 printf("%s: Read the re(4) man page for more details.\n",
  595                     sc->sc_dev.dv_xname);
  596                 error = EIO;
  597         }
  598 
  599 done:
  600         /* Turn interface off, release resources */
  601 
  602         sc->rtk_testmode = 0;
  603         ifp->if_flags &= ~IFF_PROMISC;
  604         re_stop(sc);
  605         if (m0 != NULL)
  606                 m_freem(m0);
  607 
  608         return (error);
  609 }
  610 
  611 /*
  612  * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
  613  * IDs against our list and return a device name if we find a match.
  614  */
  615 int
  616 re_probe(struct device *parent, struct cfdata *match, void *aux)
  617 {
  618         struct rtk_type         *t;
  619         struct pci_attach_args  *pa = aux;
  620         bus_space_tag_t         rtk_btag;
  621         bus_space_handle_t      rtk_bhandle;
  622         bus_size_t              bsize;
  623         u_int32_t               hwrev;
  624 
  625         t = re_devs;
  626 
  627         while(t->rtk_name != NULL) {
  628                 if ((PCI_VENDOR(pa->pa_id) == t->rtk_vid) &&
  629                     (PCI_PRODUCT(pa->pa_id) == t->rtk_did)) {
  630 
  631                         /*
  632                          * Temporarily map the I/O space
  633                          * so we can read the chip ID register.
  634                          */
  635                         if (pci_mapreg_map(pa, RTK_PCI_LOIO,
  636                             PCI_MAPREG_TYPE_IO, 0, &rtk_btag,
  637                             &rtk_bhandle, NULL, &bsize)) {
  638                                 printf("can't map i/o space\n");
  639                                 return 0;
  640                         }
  641                         hwrev = bus_space_read_4(rtk_btag, rtk_bhandle,
  642                             RTK_TXCFG) & RTK_TXCFG_HWREV;
  643                         bus_space_unmap(rtk_btag, rtk_bhandle, bsize);
  644                         if (t->rtk_basetype == hwrev)
  645                                 return 2;       /* defeat rtk(4) */
  646                 }
  647                 t++;
  648         }
  649 
  650         return 0;
  651 }
  652 
  653 static int
  654 re_allocmem(struct rtk_softc *sc)
  655 {
  656         int                     error;
  657         int                     nseg, rseg;
  658         int                     i;
  659 
  660         nseg = 32;
  661 
  662         /* Allocate DMA'able memory for the TX ring */
  663 
  664         error = bus_dmamap_create(sc->sc_dmat, RTK_TX_LIST_SZ, 1,
  665             RTK_TX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
  666             &sc->rtk_ldata.rtk_tx_list_map);
  667         error = bus_dmamem_alloc(sc->sc_dmat, RTK_TX_LIST_SZ,
  668             RTK_ETHER_ALIGN, 0, 
  669             &sc->rtk_ldata.rtk_tx_listseg, 1, &rseg, BUS_DMA_NOWAIT);
  670         if (error)
  671                 return (ENOMEM);
  672 
  673         /* Load the map for the TX ring. */
  674         error = bus_dmamem_map(sc->sc_dmat, &sc->rtk_ldata.rtk_tx_listseg,
  675             1, RTK_TX_LIST_SZ,
  676             (caddr_t *)&sc->rtk_ldata.rtk_tx_list, BUS_DMA_NOWAIT);
  677         memset(sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ);
  678 
  679         error = bus_dmamap_load(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map,
  680             sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
  681 
  682         /* Create DMA maps for TX buffers */
  683 
  684         for (i = 0; i < RTK_TX_DESC_CNT; i++) {
  685                 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg,
  686                     MCLBYTES, 0, BUS_DMA_ALLOCNOW,
  687                     &sc->rtk_ldata.rtk_tx_dmamap[i]);
  688                 if (error) {
  689                         printf("%s: can't create DMA map for TX\n",
  690                             sc->sc_dev.dv_xname);
  691                         return(ENOMEM);
  692                 }
  693         }
  694 
  695         /* Allocate DMA'able memory for the RX ring */
  696 
  697         error = bus_dmamap_create(sc->sc_dmat, RTK_RX_LIST_SZ, 1,
  698             RTK_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
  699             &sc->rtk_ldata.rtk_rx_list_map);
  700         error = bus_dmamem_alloc(sc->sc_dmat, RTK_RX_LIST_SZ, RTK_RING_ALIGN,
  701             0, &sc->rtk_ldata.rtk_rx_listseg, 1, &rseg, BUS_DMA_NOWAIT);
  702         if (error)
  703                 return (ENOMEM);
  704 
  705         /* Load the map for the RX ring. */
  706         error = bus_dmamem_map(sc->sc_dmat, &sc->rtk_ldata.rtk_rx_listseg,
  707             1, RTK_RX_LIST_SZ,
  708             (caddr_t *)&sc->rtk_ldata.rtk_rx_list, BUS_DMA_NOWAIT);
  709         memset(sc->rtk_ldata.rtk_rx_list, 0, RTK_TX_LIST_SZ);
  710 
  711         error = bus_dmamap_load(sc->sc_dmat, sc->rtk_ldata.rtk_rx_list_map,
  712              sc->rtk_ldata.rtk_rx_list, RTK_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
  713 
  714         /* Create DMA maps for RX buffers */
  715 
  716         for (i = 0; i < RTK_RX_DESC_CNT; i++) {
  717                 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * nseg, nseg,
  718                     MCLBYTES, 0, BUS_DMA_ALLOCNOW,
  719                     &sc->rtk_ldata.rtk_rx_dmamap[i]);
  720                 if (error) {
  721                         printf("%s: can't create DMA map for RX\n",
  722                             sc->sc_dev.dv_xname);
  723                         return(ENOMEM);
  724                 }
  725         }
  726 
  727         return(0);
  728 }
  729 
  730 /*
  731  * Attach the interface. Allocate softc structures, do ifmedia
  732  * setup and ethernet/BPF attach.
  733  */
  734 void
  735 re_attach(struct device *parent, struct device *self, void *aux)
  736 {
  737         u_char                  eaddr[ETHER_ADDR_LEN];
  738         u_int16_t               val;
  739         struct re_pci_softc     *psc = (void *)self;
  740         struct rtk_softc        *sc = &psc->sc_rtk;
  741         struct pci_attach_args  *pa = aux;
  742         pci_chipset_tag_t pc = pa->pa_pc;
  743         pci_intr_handle_t ih;
  744         const char *intrstr = NULL;
  745         struct ifnet            *ifp;
  746         struct rtk_hwrev        *hw_rev;
  747         struct rtk_type         *t;
  748         int                     hwrev;
  749         int                     error = 0, i, addr_len;
  750         pcireg_t                command;
  751 
  752 #if 0 /*ndef BURN_BRIDGES*/
  753         /*
  754          * Handle power management nonsense.
  755          */
  756 
  757         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
  758                 u_int32_t               iobase, membase, irq;
  759 
  760                 /* Save important PCI config data. */
  761                 iobase = pci_read_config(dev, RTK_PCI_LOIO, 4);
  762                 membase = pci_read_config(dev, RTK_PCI_LOMEM, 4);
  763                 irq = pci_read_config(dev, RTK_PCI_INTLINE, 4);
  764 
  765                 /* Reset the power state. */
  766                 printf("%s: chip is is in D%d power mode "
  767                     "-- setting to D0\n", unit,
  768                     pci_get_powerstate(dev));
  769 
  770                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
  771 
  772                 /* Restore PCI config data. */
  773                 pci_write_config(dev, RTK_PCI_LOIO, iobase, 4);
  774                 pci_write_config(dev, RTK_PCI_LOMEM, membase, 4);
  775                 pci_write_config(dev, RTK_PCI_INTLINE, irq, 4);
  776         }
  777 #endif
  778         /*
  779          * Map control/status registers.
  780          */
  781         command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
  782         command |= PCI_COMMAND_MASTER_ENABLE;
  783         pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
  784 
  785 #ifdef RE_USEIOSPACE
  786         if (pci_mapreg_map(pa, RTK_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
  787             &sc->rtk_btag, &sc->rtk_bhandle, NULL, NULL)) {
  788                 printf("%s: can't map i/o space\n", sc->sc_dev.dv_xname);
  789                 error = ENXIO;
  790                 goto fail;
  791         }
  792 #else
  793         if (pci_mapreg_map(pa, RTK_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
  794             &sc->rtk_btag, &sc->rtk_bhandle, NULL, NULL)) {
  795                 printf("%s: can't map mem space\n", sc->sc_dev.dv_xname);
  796                 error = ENXIO;
  797                 goto fail;
  798         }
  799 #endif
  800         t = re_devs;
  801         hwrev = CSR_READ_4(sc, RTK_TXCFG) & RTK_TXCFG_HWREV;
  802 
  803         while(t->rtk_name != NULL) {
  804                 if ((PCI_VENDOR(pa->pa_id) == t->rtk_vid) &&
  805                     (PCI_PRODUCT(pa->pa_id) == t->rtk_did)) {
  806 
  807                         if (t->rtk_basetype == hwrev)
  808                                 break;
  809                 }
  810                 t++;
  811         }
  812         printf(": %s\n", t->rtk_name);
  813 
  814         sc->sc_dmat = pa->pa_dmat;
  815         sc->sc_flags |= RTK_ENABLED;
  816 
  817         /* Reset the adapter. */
  818         re_reset(sc);
  819 
  820         hw_rev = re_hwrevs;
  821         hwrev = CSR_READ_4(sc, RTK_TXCFG) & RTK_TXCFG_HWREV;
  822         while (hw_rev->rtk_desc != NULL) {
  823                 if (hw_rev->rtk_rev == hwrev) {
  824                         sc->rtk_type = hw_rev->rtk_type;
  825                         break;
  826                 }
  827                 hw_rev++;
  828         }
  829 
  830         if (sc->rtk_type == RTK_8169) {
  831 
  832                 /* Set RX length mask */
  833 
  834                 sc->rtk_rxlenmask = RTK_RDESC_STAT_GFRAGLEN;
  835 
  836                 /* Force station address autoload from the EEPROM */
  837 
  838                 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_AUTOLOAD);
  839                 for (i = 0; i < RTK_TIMEOUT; i++) {
  840                         if (!(CSR_READ_1(sc, RTK_EECMD) & RTK_EEMODE_AUTOLOAD))
  841                                 break;
  842                         DELAY(100);
  843                 }
  844                 if (i == RTK_TIMEOUT)
  845                         printf ("%s: eeprom autoload timed out\n", sc->sc_dev.dv_xname);
  846 
  847                         for (i = 0; i < ETHER_ADDR_LEN; i++)
  848                                 eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i);
  849         } else {
  850 
  851                 /* Set RX length mask */
  852 
  853                 sc->rtk_rxlenmask = RTK_RDESC_STAT_FRAGLEN;
  854 
  855                 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
  856                         addr_len = RTK_EEADDR_LEN1;
  857                 else
  858                         addr_len = RTK_EEADDR_LEN0;
  859 
  860                 /*
  861                  * Get station address from the EEPROM.
  862                  */
  863                 for (i = 0; i < 3; i++) {
  864                         val = rtk_read_eeprom(sc, RTK_EE_EADDR0 + i, addr_len);
  865                         eaddr[(i * 2) + 0] = val & 0xff;
  866                         eaddr[(i * 2) + 1] = val >> 8;
  867                 }
  868         }
  869 
  870         error = re_allocmem(sc);
  871 
  872         if (error)
  873                 goto fail;
  874 
  875         ifp = &sc->ethercom.ec_if;
  876         ifp->if_softc = sc;
  877         strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
  878         ifp->if_mtu = ETHERMTU;
  879         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  880         ifp->if_ioctl = re_ioctl;
  881         sc->ethercom.ec_capabilities |=
  882             ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
  883         ifp->if_start = re_start;
  884         ifp->if_capabilities |=
  885             IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
  886         ifp->if_watchdog = re_watchdog;
  887         ifp->if_init = re_init;
  888         if (sc->rtk_type == RTK_8169)
  889                 ifp->if_baudrate = 1000000000;
  890         else
  891                 ifp->if_baudrate = 100000000;
  892         ifp->if_snd.ifq_maxlen = RTK_IFQ_MAXLEN;
  893         ifp->if_capenable = ifp->if_capabilities;
  894         IFQ_SET_READY(&ifp->if_snd);
  895 
  896         callout_init(&sc->rtk_tick_ch);
  897 
  898         /* Do MII setup */
  899         sc->mii.mii_ifp = ifp;
  900         sc->mii.mii_readreg = re_miibus_readreg;
  901         sc->mii.mii_writereg = re_miibus_writereg;
  902         sc->mii.mii_statchg = re_miibus_statchg;
  903         ifmedia_init(&sc->mii.mii_media, IFM_IMASK, re_ifmedia_upd,
  904             re_ifmedia_sts);
  905         mii_attach(&sc->sc_dev, &sc->mii, 0xffffffff, MII_PHY_ANY,
  906             MII_OFFSET_ANY, 0);
  907         ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_AUTO);
  908 
  909         /*
  910          * Call MI attach routine.
  911          */
  912         if_attach(ifp);
  913         ether_ifattach(ifp, eaddr);
  914 
  915         /* Perform hardware diagnostic. */
  916         error = re_diag(sc);
  917 
  918         if (error) {
  919                 printf("%s: attach aborted due to hardware diag failure\n",
  920                     sc->sc_dev.dv_xname);
  921                 ether_ifdetach(ifp);
  922                 if_detach(ifp);
  923                 goto fail;
  924         }
  925 
  926         /* Hook interrupt last to avoid having to lock softc */
  927         /* Allocate interrupt */
  928         if (pci_intr_map(pa, &ih)) {
  929                 printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname);
  930                 error = ENXIO;
  931                 ether_ifdetach(ifp);
  932                 if_detach(ifp);
  933                 goto fail;
  934         }
  935         intrstr = pci_intr_string(pc, ih);
  936         psc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, re_intr, sc);
  937         if (psc->sc_ih == NULL) {
  938                 printf("%s: couldn't establish interrupt",
  939                     sc->sc_dev.dv_xname);
  940                 if (intrstr != NULL)
  941                         printf(" at %s", intrstr);
  942                 printf("\n");
  943                 ether_ifdetach(ifp);
  944                 if_detach(ifp);
  945                 return;
  946         }
  947         aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
  948 
  949 fail:
  950 #if 0
  951         if (error)
  952                 re_detach(sc);
  953 #endif
  954 
  955         return;
  956 }
  957 
  958 #if 0
  959 /*
  960  * Shutdown hardware and free up resources. This can be called any
  961  * time after the mutex has been initialized. It is called in both
  962  * the error case in attach and the normal detach case so it needs
  963  * to be careful about only freeing resources that have actually been
  964  * allocated.
  965  */
  966 int
  967 re_detach(struct device *self, int flags)
  968 {
  969         struct rtk_softc        *sc;
  970         struct ifnet            *ifp;
  971         int                     i;
  972 
  973         sc = device_get_softc(dev);
  974         KASSERT(mtx_initialized(&sc->rtk_mtx), ("rl mutex not initialized"));
  975         RTK_LOCK(sc);
  976         ifp = &sc->ethercom.ec_if;
  977 
  978         /* These should only be active if attach succeeded */
  979         if (device_is_attached(dev)) {
  980                 re_stop(sc);
  981                 /*
  982                  * Force off the IFF_UP flag here, in case someone
  983                  * still had a BPF descriptor attached to this
  984                  * interface. If they do, ether_ifattach() will cause
  985                  * the BPF code to try and clear the promisc mode
  986                  * flag, which will bubble down to re_ioctl(),
  987                  * which will try to call re_init() again. This will
  988                  * turn the NIC back on and restart the MII ticker,
  989                  * which will panic the system when the kernel tries
  990                  * to invoke the re_tick() function that isn't there
  991                  * anymore.
  992                  */
  993                 ifp->if_flags &= ~IFF_UP;
  994                 ether_ifdetach(ifp);
  995         }
  996         if (sc->rtk_miibus)
  997                 device_delete_child(dev, sc->rtk_miibus);
  998         bus_generic_detach(dev);
  999 
 1000         if (sc->rtk_intrhand)
 1001                 bus_teardown_intr(dev, sc->rtk_irq, sc->rtk_intrhand);
 1002         if (sc->rtk_irq)
 1003                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rtk_irq);
 1004         if (sc->rtk_res)
 1005                 bus_release_resource(dev, RTK_RES, RTK_RID, sc->rtk_res);
 1006 
 1007 
 1008         /* Unload and free the RX DMA ring memory and map */
 1009 
 1010         bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_rx_list_map);
 1011         bus_dmamem_free(sc->sc_dmat,
 1012             sc->rtk_ldata.rtk_rx_list,
 1013             sc->rtk_ldata.rtk_rx_list_map);
 1014 
 1015         /* Unload and free the TX DMA ring memory and map */
 1016 
 1017         bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
 1018         bus_dmamem_free(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list,
 1019             sc->rtk_ldata.rtk_tx_list_map);
 1020 
 1021         /* Destroy all the RX and TX buffer maps */
 1022 
 1023         for (i = 0; i < RTK_TX_DESC_CNT; i++)
 1024                 bus_dmamap_destroy(sc->sc_dmat,
 1025                     sc->rtk_ldata.rtk_tx_dmamap[i]);
 1026         for (i = 0; i < RTK_RX_DESC_CNT; i++)
 1027                 bus_dmamap_destroy(sc->sc_dmat,
 1028                     sc->rtk_ldata.rtk_rx_dmamap[i]);
 1029 
 1030         /* Unload and free the stats buffer and map */
 1031 
 1032         if (sc->rtk_ldata.rtk_stag) {
 1033                 bus_dmamap_unload(sc->rtk_ldata.rtk_stag,
 1034                     sc->rtk_ldata.rtk_rx_list_map);
 1035                 bus_dmamem_free(sc->rtk_ldata.rtk_stag,
 1036                     sc->rtk_ldata.rtk_stats,
 1037                     sc->rtk_ldata.rtk_smap);
 1038                 bus_dma_tag_destroy(sc->rtk_ldata.rtk_stag);
 1039         }
 1040 
 1041         if (sc->rtk_parent_tag)
 1042                 bus_dma_tag_destroy(sc->rtk_parent_tag);
 1043 
 1044         RTK_UNLOCK(sc);
 1045         mtx_destroy(&sc->rtk_mtx);
 1046 
 1047         return(0);
 1048 }
 1049 #endif
 1050 
 1051 static int
 1052 re_newbuf(sc, idx, m)
 1053         struct rtk_softc        *sc;
 1054         int                     idx;
 1055         struct mbuf             *m;
 1056 {
 1057         struct mbuf             *n = NULL;
 1058         bus_dmamap_t            map;
 1059         struct rtk_desc         *d;
 1060         u_int32_t               cmdstat;
 1061         int                     error;
 1062 
 1063         if (m == NULL) {
 1064                 MGETHDR(n, M_DONTWAIT, MT_DATA);
 1065                 if (n == NULL)
 1066                         return(ENOBUFS);
 1067                 m = n;
 1068 
 1069                 MCLGET(m, M_DONTWAIT);
 1070                 if (! (m->m_flags & M_EXT)) {
 1071                         m_freem(m);
 1072                         return(ENOBUFS);
 1073                 }
 1074         } else
 1075                 m->m_data = m->m_ext.ext_buf;
 1076 
 1077         /*
 1078          * Initialize mbuf length fields and fixup
 1079          * alignment so that the frame payload is
 1080          * longword aligned.
 1081          */
 1082         m->m_len = m->m_pkthdr.len = MCLBYTES;
 1083         m_adj(m, RTK_ETHER_ALIGN);
 1084 
 1085         map = sc->rtk_ldata.rtk_rx_dmamap[idx];
 1086         error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
 1087 
 1088         if (map->dm_nsegs > 1)
 1089                 goto out;
 1090         if (error)
 1091                 goto out;
 1092 
 1093         d = &sc->rtk_ldata.rtk_rx_list[idx];
 1094         if (le32toh(d->rtk_cmdstat) & RTK_RDESC_STAT_OWN)
 1095                 goto out;
 1096 
 1097         cmdstat = map->dm_segs[0].ds_len;
 1098         d->rtk_bufaddr_lo = htole32(RTK_ADDR_LO(map->dm_segs[0].ds_addr));
 1099         d->rtk_bufaddr_hi = htole32(RTK_ADDR_HI(map->dm_segs[0].ds_addr));
 1100         cmdstat |= RTK_TDESC_CMD_SOF;
 1101         if (idx == (RTK_RX_DESC_CNT - 1))
 1102                 cmdstat |= RTK_TDESC_CMD_EOR;
 1103         d->rtk_cmdstat = htole32(cmdstat);
 1104 
 1105         d->rtk_cmdstat |= htole32(RTK_TDESC_CMD_EOF);
 1106 
 1107 
 1108         sc->rtk_ldata.rtk_rx_list[idx].rtk_cmdstat |= htole32(RTK_RDESC_CMD_OWN);
 1109         sc->rtk_ldata.rtk_rx_mbuf[idx] = m;
 1110 
 1111         bus_dmamap_sync(sc->sc_dmat, sc->rtk_ldata.rtk_rx_dmamap[idx], 0,
 1112             sc->rtk_ldata.rtk_rx_dmamap[idx]->dm_mapsize,
 1113             BUS_DMASYNC_PREREAD);
 1114 
 1115         return 0;
 1116 out:
 1117         if (n != NULL)
 1118                 m_freem(n);
 1119         return ENOMEM;
 1120 }
 1121 
 1122 static int
 1123 re_tx_list_init(sc)
 1124         struct rtk_softc        *sc;
 1125 {
 1126         memset((char *)sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ);
 1127         memset((char *)&sc->rtk_ldata.rtk_tx_mbuf, 0,
 1128             (RTK_TX_DESC_CNT * sizeof(struct mbuf *)));
 1129 
 1130         bus_dmamap_sync(sc->sc_dmat,
 1131             sc->rtk_ldata.rtk_tx_list_map, 0,
 1132             sc->rtk_ldata.rtk_tx_list_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
 1133         sc->rtk_ldata.rtk_tx_prodidx = 0;
 1134         sc->rtk_ldata.rtk_tx_considx = 0;
 1135         sc->rtk_ldata.rtk_tx_free = RTK_TX_DESC_CNT;
 1136 
 1137         return(0);
 1138 }
 1139 
 1140 static int
 1141 re_rx_list_init(sc)
 1142         struct rtk_softc        *sc;
 1143 {
 1144         int                     i;
 1145 
 1146         memset((char *)sc->rtk_ldata.rtk_rx_list, 0, RTK_RX_LIST_SZ);
 1147         memset((char *)&sc->rtk_ldata.rtk_rx_mbuf, 0,
 1148             (RTK_RX_DESC_CNT * sizeof(struct mbuf *)));
 1149 
 1150         for (i = 0; i < RTK_RX_DESC_CNT; i++) {
 1151                 if (re_newbuf(sc, i, NULL) == ENOBUFS)
 1152                         return(ENOBUFS);
 1153         }
 1154 
 1155         /* Flush the RX descriptors */
 1156 
 1157         bus_dmamap_sync(sc->sc_dmat,
 1158             sc->rtk_ldata.rtk_rx_list_map,
 1159             0, sc->rtk_ldata.rtk_rx_list_map->dm_mapsize,
 1160             BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
 1161 
 1162         sc->rtk_ldata.rtk_rx_prodidx = 0;
 1163         sc->rtk_head = sc->rtk_tail = NULL;
 1164 
 1165         return(0);
 1166 }
 1167 
 1168 /*
 1169  * RX handler for C+ and 8169. For the gigE chips, we support
 1170  * the reception of jumbo frames that have been fragmented
 1171  * across multiple 2K mbuf cluster buffers.
 1172  */
 1173 static void
 1174 re_rxeof(sc)
 1175         struct rtk_softc        *sc;
 1176 {
 1177         struct mbuf             *m;
 1178         struct ifnet            *ifp;
 1179         int                     i, total_len;
 1180         struct rtk_desc         *cur_rx;
 1181         struct m_tag            *mtag;
 1182         u_int32_t               rxstat, rxvlan;
 1183 
 1184         ifp = &sc->ethercom.ec_if;
 1185         i = sc->rtk_ldata.rtk_rx_prodidx;
 1186 
 1187         /* Invalidate the descriptor memory */
 1188 
 1189         bus_dmamap_sync(sc->sc_dmat,
 1190             sc->rtk_ldata.rtk_rx_list_map,
 1191             0, sc->rtk_ldata.rtk_rx_list_map->dm_mapsize,
 1192             BUS_DMASYNC_POSTREAD);
 1193 
 1194         while (!RTK_OWN(&sc->rtk_ldata.rtk_rx_list[i])) {
 1195 
 1196                 cur_rx = &sc->rtk_ldata.rtk_rx_list[i];
 1197                 m = sc->rtk_ldata.rtk_rx_mbuf[i];
 1198                 total_len = RTK_RXBYTES(cur_rx);
 1199                 rxstat = le32toh(cur_rx->rtk_cmdstat);
 1200                 rxvlan = le32toh(cur_rx->rtk_vlanctl);
 1201 
 1202                 /* Invalidate the RX mbuf and unload its map */
 1203 
 1204                 bus_dmamap_sync(sc->sc_dmat,
 1205                     sc->rtk_ldata.rtk_rx_dmamap[i],
 1206                     0, sc->rtk_ldata.rtk_rx_dmamap[i]->dm_mapsize,
 1207                     BUS_DMASYNC_POSTWRITE);
 1208                 bus_dmamap_unload(sc->sc_dmat,
 1209                     sc->rtk_ldata.rtk_rx_dmamap[i]);
 1210 
 1211                 if (!(rxstat & RTK_RDESC_STAT_EOF)) {
 1212                         m->m_len = MCLBYTES - RTK_ETHER_ALIGN;
 1213                         if (sc->rtk_head == NULL)
 1214                                 sc->rtk_head = sc->rtk_tail = m;
 1215                         else {
 1216                                 m->m_flags &= ~M_PKTHDR;
 1217                                 sc->rtk_tail->m_next = m;
 1218                                 sc->rtk_tail = m;
 1219                         }
 1220                         re_newbuf(sc, i, NULL);
 1221                         RTK_DESC_INC(i);
 1222                         continue;
 1223                 }
 1224 
 1225                 /*
 1226                  * NOTE: for the 8139C+, the frame length field
 1227                  * is always 12 bits in size, but for the gigE chips,
 1228                  * it is 13 bits (since the max RX frame length is 16K).
 1229                  * Unfortunately, all 32 bits in the status word
 1230                  * were already used, so to make room for the extra
 1231                  * length bit, RealTek took out the 'frame alignment
 1232                  * error' bit and shifted the other status bits
 1233                  * over one slot. The OWN, EOR, FS and LS bits are
 1234                  * still in the same places. We have already extracted
 1235                  * the frame length and checked the OWN bit, so rather
 1236                  * than using an alternate bit mapping, we shift the
 1237                  * status bits one space to the right so we can evaluate
 1238                  * them using the 8169 status as though it was in the
 1239                  * same format as that of the 8139C+.
 1240                  */
 1241                 if (sc->rtk_type == RTK_8169)
 1242                         rxstat >>= 1;
 1243 
 1244                 if (rxstat & RTK_RDESC_STAT_RXERRSUM) {
 1245                         ifp->if_ierrors++;
 1246                         /*
 1247                          * If this is part of a multi-fragment packet,
 1248                          * discard all the pieces.
 1249                          */
 1250                         if (sc->rtk_head != NULL) {
 1251                                 m_freem(sc->rtk_head);
 1252                                 sc->rtk_head = sc->rtk_tail = NULL;
 1253                         }
 1254                         re_newbuf(sc, i, m);
 1255                         RTK_DESC_INC(i);
 1256                         continue;
 1257                 }
 1258 
 1259                 /*
 1260                  * If allocating a replacement mbuf fails,
 1261                  * reload the current one.
 1262                  */
 1263 
 1264                 if (re_newbuf(sc, i, NULL)) {
 1265                         ifp->if_ierrors++;
 1266                         if (sc->rtk_head != NULL) {
 1267                                 m_freem(sc->rtk_head);
 1268                                 sc->rtk_head = sc->rtk_tail = NULL;
 1269                         }
 1270                         re_newbuf(sc, i, m);
 1271                         RTK_DESC_INC(i);
 1272                         continue;
 1273                 }
 1274 
 1275                 RTK_DESC_INC(i);
 1276 
 1277                 if (sc->rtk_head != NULL) {
 1278                         m->m_len = total_len % (MCLBYTES - RTK_ETHER_ALIGN);
 1279                         /* 
 1280                          * Special case: if there's 4 bytes or less
 1281                          * in this buffer, the mbuf can be discarded:
 1282                          * the last 4 bytes is the CRC, which we don't
 1283                          * care about anyway.
 1284                          */
 1285                         if (m->m_len <= ETHER_CRC_LEN) {
 1286                                 sc->rtk_tail->m_len -=
 1287                                     (ETHER_CRC_LEN - m->m_len);
 1288                                 m_freem(m);
 1289                         } else {
 1290                                 m->m_len -= ETHER_CRC_LEN;
 1291                                 m->m_flags &= ~M_PKTHDR;
 1292                                 sc->rtk_tail->m_next = m;
 1293                         }
 1294                         m = sc->rtk_head;
 1295                         sc->rtk_head = sc->rtk_tail = NULL;
 1296                         m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
 1297                 } else
 1298                         m->m_pkthdr.len = m->m_len =
 1299                             (total_len - ETHER_CRC_LEN);
 1300 
 1301                 ifp->if_ipackets++;
 1302                 m->m_pkthdr.rcvif = ifp;
 1303 
 1304                 /* Do RX checksumming if enabled */
 1305 
 1306                 if (ifp->if_capenable & IFCAP_CSUM_IPv4) {
 1307 
 1308                         /* Check IP header checksum */
 1309                         if (rxstat & RTK_RDESC_STAT_PROTOID)
 1310                                 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;;
 1311                         if (rxstat & RTK_RDESC_STAT_IPSUMBAD)
 1312                                 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
 1313                 }
 1314 
 1315                 /* Check TCP/UDP checksum */
 1316                 if (RTK_TCPPKT(rxstat) &&
 1317                     (ifp->if_capenable & IFCAP_CSUM_TCPv4)) {
 1318                         m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
 1319                         if (rxstat & RTK_RDESC_STAT_TCPSUMBAD)
 1320                                 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
 1321                 }
 1322                 if (RTK_UDPPKT(rxstat) &&
 1323                     (ifp->if_capenable & IFCAP_CSUM_UDPv4)) {
 1324                         m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
 1325                         if (rxstat & RTK_RDESC_STAT_UDPSUMBAD)
 1326                                 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
 1327                 }
 1328 
 1329                 if (rxvlan & RTK_RDESC_VLANCTL_TAG) {
 1330                         mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
 1331                             M_NOWAIT);
 1332                         if (mtag == NULL) {
 1333                                 ifp->if_ierrors++;
 1334                                 m_freem(m);
 1335                                 continue;
 1336                         }
 1337                         *(u_int *)(mtag + 1) = 
 1338                             be16toh(rxvlan & RTK_RDESC_VLANCTL_DATA);
 1339                         m_tag_prepend(m, mtag);
 1340                 }
 1341 #if NBPFILTER > 0
 1342                 if (ifp->if_bpf)
 1343                         bpf_mtap(ifp->if_bpf, m);
 1344 #endif
 1345                 (*ifp->if_input)(ifp, m);
 1346         }
 1347 
 1348         /* Flush the RX DMA ring */
 1349 
 1350         bus_dmamap_sync(sc->sc_dmat,
 1351             sc->rtk_ldata.rtk_rx_list_map,
 1352             0, sc->rtk_ldata.rtk_rx_list_map->dm_mapsize,
 1353             BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
 1354 
 1355         sc->rtk_ldata.rtk_rx_prodidx = i;
 1356 
 1357         return;
 1358 }
 1359 
 1360 static void
 1361 re_txeof(sc)
 1362         struct rtk_softc        *sc;
 1363 {
 1364         struct ifnet            *ifp;
 1365         u_int32_t               txstat;
 1366         int                     idx;
 1367 
 1368         ifp = &sc->ethercom.ec_if;
 1369         idx = sc->rtk_ldata.rtk_tx_considx;
 1370 
 1371         /* Invalidate the TX descriptor list */
 1372 
 1373         bus_dmamap_sync(sc->sc_dmat,
 1374             sc->rtk_ldata.rtk_tx_list_map,
 1375             0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize,
 1376             BUS_DMASYNC_POSTREAD);
 1377 
 1378         while (idx != sc->rtk_ldata.rtk_tx_prodidx) {
 1379 
 1380                 txstat = le32toh(sc->rtk_ldata.rtk_tx_list[idx].rtk_cmdstat);
 1381                 if (txstat & RTK_TDESC_CMD_OWN)
 1382                         break;
 1383 
 1384                 /*
 1385                  * We only stash mbufs in the last descriptor
 1386                  * in a fragment chain, which also happens to
 1387                  * be the only place where the TX status bits
 1388                  * are valid.
 1389                  */
 1390 
 1391                 if (txstat & RTK_TDESC_CMD_EOF) {
 1392                         m_freem(sc->rtk_ldata.rtk_tx_mbuf[idx]);
 1393                         sc->rtk_ldata.rtk_tx_mbuf[idx] = NULL;
 1394                         bus_dmamap_unload(sc->sc_dmat,
 1395                             sc->rtk_ldata.rtk_tx_dmamap[idx]);
 1396                         if (txstat & (RTK_TDESC_STAT_EXCESSCOL|
 1397                             RTK_TDESC_STAT_COLCNT))
 1398                                 ifp->if_collisions++;
 1399                         if (txstat & RTK_TDESC_STAT_TXERRSUM)
 1400                                 ifp->if_oerrors++;
 1401                         else
 1402                                 ifp->if_opackets++;
 1403                 }
 1404                 sc->rtk_ldata.rtk_tx_free++;
 1405                 RTK_DESC_INC(idx);
 1406         }
 1407 
 1408         /* No changes made to the TX ring, so no flush needed */
 1409 
 1410         if (idx != sc->rtk_ldata.rtk_tx_considx) {
 1411                 sc->rtk_ldata.rtk_tx_considx = idx;
 1412                 ifp->if_flags &= ~IFF_OACTIVE;
 1413                 ifp->if_timer = 0;
 1414         }
 1415 
 1416         /*
 1417          * If not all descriptors have been released reaped yet,
 1418          * reload the timer so that we will eventually get another
 1419          * interrupt that will cause us to re-enter this routine.
 1420          * This is done in case the transmitter has gone idle.
 1421          */
 1422         if (sc->rtk_ldata.rtk_tx_free != RTK_TX_DESC_CNT)
 1423                 CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
 1424 
 1425         return;
 1426 }
 1427 
 1428 static void
 1429 re_tick(xsc)
 1430         void                    *xsc;
 1431 {
 1432         struct rtk_softc        *sc = xsc;
 1433         int s = splnet();
 1434 
 1435         mii_tick(&sc->mii);
 1436         splx(s);
 1437 
 1438         callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc);
 1439 }
 1440 
 1441 #ifdef DEVICE_POLLING
 1442 static void
 1443 re_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
 1444 {
 1445         struct rtk_softc *sc = ifp->if_softc;
 1446 
 1447         RTK_LOCK(sc);
 1448         if (!(ifp->if_capenable & IFCAP_POLLING)) {
 1449                 ether_poll_deregister(ifp);
 1450                 cmd = POLL_DEREGISTER;
 1451         }
 1452         if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
 1453                 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS);
 1454                 goto done;
 1455         }
 1456 
 1457         sc->rxcycles = count;
 1458         re_rxeof(sc);
 1459         re_txeof(sc);
 1460 
 1461         if (ifp->if_snd.ifq_head != NULL)
 1462                 (*ifp->if_start)(ifp);
 1463 
 1464         if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
 1465                 u_int16_t       status;
 1466 
 1467                 status = CSR_READ_2(sc, RTK_ISR);
 1468                 if (status == 0xffff)
 1469                         goto done;
 1470                 if (status)
 1471                         CSR_WRITE_2(sc, RTK_ISR, status);
 1472 
 1473                 /*
 1474                  * XXX check behaviour on receiver stalls.
 1475                  */
 1476 
 1477                 if (status & RTK_ISR_SYSTEM_ERR) {
 1478                         re_reset(sc);
 1479                         re_init(sc);
 1480                 }
 1481         }
 1482 done:
 1483         RTK_UNLOCK(sc);
 1484 }
 1485 #endif /* DEVICE_POLLING */
 1486 
 1487 static int
 1488 re_intr(arg)
 1489         void                    *arg;
 1490 {
 1491         struct rtk_softc        *sc = arg;
 1492         struct ifnet            *ifp;
 1493         u_int16_t               status;
 1494         int                     handled = 0;
 1495 
 1496 #if 0
 1497         if (sc->suspended) {
 1498                 return 0;
 1499         }
 1500 #endif
 1501         ifp = &sc->ethercom.ec_if;
 1502 
 1503         if (!(ifp->if_flags & IFF_UP))
 1504                 return 0;
 1505 
 1506 #ifdef DEVICE_POLLING
 1507         if  (ifp->if_flags & IFF_POLLING)
 1508                 goto done;
 1509         if ((ifp->if_capenable & IFCAP_POLLING) &&
 1510             ether_poll_register(re_poll, ifp)) { /* ok, disable interrupts */
 1511                 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
 1512                 re_poll(ifp, 0, 1);
 1513                 goto done;
 1514         }
 1515 #endif /* DEVICE_POLLING */
 1516 
 1517         for (;;) {
 1518 
 1519                 status = CSR_READ_2(sc, RTK_ISR);
 1520                 /* If the card has gone away the read returns 0xffff. */
 1521                 if (status == 0xffff)
 1522                         break;
 1523                 if (status) {
 1524                         handled = 1;
 1525                         CSR_WRITE_2(sc, RTK_ISR, status);
 1526                 }
 1527 
 1528                 if ((status & RTK_INTRS_CPLUS) == 0)
 1529                         break;
 1530 
 1531                 if (status & RTK_ISR_RX_OK)
 1532                         re_rxeof(sc);
 1533 
 1534                 if (status & RTK_ISR_RX_ERR)
 1535                         re_rxeof(sc);
 1536 
 1537                 if ((status & RTK_ISR_TIMEOUT_EXPIRED) ||
 1538                     (status & RTK_ISR_TX_ERR) ||
 1539                     (status & RTK_ISR_TX_DESC_UNAVAIL))
 1540                         re_txeof(sc);
 1541 
 1542                 if (status & RTK_ISR_SYSTEM_ERR) {
 1543                         re_reset(sc);
 1544                         re_init(ifp);
 1545                 }
 1546 
 1547                 if (status & RTK_ISR_LINKCHG) {
 1548                         callout_stop(&sc->rtk_tick_ch);
 1549                         re_tick(sc);
 1550                 }
 1551         }
 1552 
 1553         if (ifp->if_snd.ifq_head != NULL)
 1554                 (*ifp->if_start)(ifp);
 1555 
 1556 #ifdef DEVICE_POLLING
 1557 done:
 1558 #endif
 1559 
 1560         return handled;
 1561 }
 1562 
 1563 static int
 1564 re_encap(sc, m_head, idx)
 1565         struct rtk_softc        *sc;
 1566         struct mbuf             *m_head;
 1567         int                     *idx;
 1568 {
 1569         bus_dmamap_t            map;
 1570         int                     error, i, curidx;
 1571         struct m_tag            *mtag;
 1572         struct rtk_desc         *d;
 1573         u_int32_t               cmdstat, rtk_flags;
 1574 
 1575         if (sc->rtk_ldata.rtk_tx_free <= 4)
 1576                 return(EFBIG);
 1577 
 1578         /*
 1579          * Set up checksum offload. Note: checksum offload bits must
 1580          * appear in all descriptors of a multi-descriptor transmit
 1581          * attempt. (This is according to testing done with an 8169
 1582          * chip. I'm not sure if this is a requirement or a bug.)
 1583          */
 1584 
 1585         rtk_flags = 0;
 1586 
 1587         if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
 1588                 rtk_flags |= RTK_TDESC_CMD_IPCSUM;
 1589         if (m_head->m_pkthdr.csum_flags & M_CSUM_TCPv4)
 1590                 rtk_flags |= RTK_TDESC_CMD_TCPCSUM;
 1591         if (m_head->m_pkthdr.csum_flags & M_CSUM_UDPv4)
 1592                 rtk_flags |= RTK_TDESC_CMD_UDPCSUM;
 1593 
 1594         map = sc->rtk_ldata.rtk_tx_dmamap[*idx];
 1595         error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
 1596             m_head, BUS_DMA_NOWAIT);
 1597 
 1598         if (error) {
 1599                 printf("%s: can't map mbuf (error %d)\n",
 1600                     sc->sc_dev.dv_xname, error);
 1601                 return ENOBUFS;
 1602         }
 1603 
 1604         if (map->dm_nsegs > sc->rtk_ldata.rtk_tx_free - 4)
 1605                 return ENOBUFS;
 1606         /*
 1607          * Map the segment array into descriptors. Note that we set the
 1608          * start-of-frame and end-of-frame markers for either TX or RX, but
 1609          * they really only have meaning in the TX case. (In the RX case,
 1610          * it's the chip that tells us where packets begin and end.)
 1611          * We also keep track of the end of the ring and set the
 1612          * end-of-ring bits as needed, and we set the ownership bits
 1613          * in all except the very first descriptor. (The caller will
 1614          * set this descriptor later when it start transmission or
 1615          * reception.)
 1616          */
 1617         i = 0;
 1618         curidx = *idx;
 1619         while (1) {
 1620                 d = &sc->rtk_ldata.rtk_tx_list[curidx];
 1621                 if (le32toh(d->rtk_cmdstat) & RTK_RDESC_STAT_OWN)
 1622                         return ENOBUFS;
 1623 
 1624                 cmdstat = map->dm_segs[i].ds_len;
 1625                 d->rtk_bufaddr_lo =
 1626                     htole32(RTK_ADDR_LO(map->dm_segs[i].ds_addr));
 1627                 d->rtk_bufaddr_hi =
 1628                     htole32(RTK_ADDR_HI(map->dm_segs[i].ds_addr));
 1629                 if (i == 0)
 1630                         cmdstat |= RTK_TDESC_CMD_SOF;
 1631                 else
 1632                         cmdstat |= RTK_TDESC_CMD_OWN;
 1633                 if (curidx == (RTK_RX_DESC_CNT - 1))
 1634                         cmdstat |= RTK_TDESC_CMD_EOR;
 1635                 d->rtk_cmdstat = htole32(cmdstat | rtk_flags);
 1636                 i++;
 1637                 if (i == map->dm_nsegs)
 1638                         break;
 1639                 RTK_DESC_INC(curidx);
 1640         }
 1641 
 1642         d->rtk_cmdstat |= htole32(RTK_TDESC_CMD_EOF);
 1643 
 1644         /*
 1645          * Insure that the map for this transmission
 1646          * is placed at the array index of the last descriptor
 1647          * in this chain.
 1648          */
 1649         sc->rtk_ldata.rtk_tx_dmamap[*idx] =
 1650             sc->rtk_ldata.rtk_tx_dmamap[curidx];
 1651         sc->rtk_ldata.rtk_tx_dmamap[curidx] = map;
 1652         sc->rtk_ldata.rtk_tx_mbuf[curidx] = m_head;
 1653         sc->rtk_ldata.rtk_tx_free -= map->dm_nsegs;
 1654 
 1655         /*
 1656          * Set up hardware VLAN tagging. Note: vlan tag info must
 1657          * appear in the first descriptor of a multi-descriptor
 1658          * transmission attempt.
 1659          */
 1660 
 1661         if (sc->ethercom.ec_nvlans &&
 1662             (mtag = m_tag_find(m_head, PACKET_TAG_VLAN, NULL)) != NULL)
 1663                 sc->rtk_ldata.rtk_tx_list[*idx].rtk_vlanctl =
 1664                     htole32(htons(*(u_int *)(mtag + 1)) |
 1665                     RTK_TDESC_VLANCTL_TAG);
 1666 
 1667         /* Transfer ownership of packet to the chip. */
 1668 
 1669         sc->rtk_ldata.rtk_tx_list[curidx].rtk_cmdstat |=
 1670             htole32(RTK_TDESC_CMD_OWN);
 1671         if (*idx != curidx)
 1672                 sc->rtk_ldata.rtk_tx_list[*idx].rtk_cmdstat |=
 1673                     htole32(RTK_TDESC_CMD_OWN);
 1674 
 1675         RTK_DESC_INC(curidx);
 1676         *idx = curidx;
 1677 
 1678         return 0;
 1679 }
 1680 
 1681 /*
 1682  * Main transmit routine for C+ and gigE NICs.
 1683  */
 1684 
 1685 static void
 1686 re_start(ifp)
 1687         struct ifnet            *ifp;
 1688 {
 1689         struct rtk_softc        *sc;
 1690         struct mbuf             *m_head = NULL;
 1691         int                     idx;
 1692 
 1693         sc = ifp->if_softc;
 1694 
 1695         idx = sc->rtk_ldata.rtk_tx_prodidx;
 1696         while (sc->rtk_ldata.rtk_tx_mbuf[idx] == NULL) {
 1697                 IF_DEQUEUE(&ifp->if_snd, m_head);
 1698                 if (m_head == NULL)
 1699                         break;
 1700 
 1701                 if (re_encap(sc, m_head, &idx)) {
 1702                         IF_PREPEND(&ifp->if_snd, m_head);
 1703                         ifp->if_flags |= IFF_OACTIVE;
 1704                         break;
 1705                 }
 1706 #if NBPFILTER > 0
 1707                 /*
 1708                  * If there's a BPF listener, bounce a copy of this frame
 1709                  * to him.
 1710                  */
 1711                 if (ifp->if_bpf)
 1712                         bpf_mtap(ifp->if_bpf, m_head);
 1713 #endif
 1714         }
 1715 
 1716         /* Flush the TX descriptors */
 1717 
 1718         bus_dmamap_sync(sc->sc_dmat,
 1719             sc->rtk_ldata.rtk_tx_list_map,
 1720             0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize,
 1721             BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
 1722 
 1723         sc->rtk_ldata.rtk_tx_prodidx = idx;
 1724 
 1725         /*
 1726          * RealTek put the TX poll request register in a different
 1727          * location on the 8169 gigE chip. I don't know why.
 1728          */
 1729 
 1730         if (sc->rtk_type == RTK_8169)
 1731                 CSR_WRITE_2(sc, RTK_GTXSTART, RTK_TXSTART_START);
 1732         else
 1733                 CSR_WRITE_2(sc, RTK_TXSTART, RTK_TXSTART_START);
 1734 
 1735         /*
 1736          * Use the countdown timer for interrupt moderation.
 1737          * 'TX done' interrupts are disabled. Instead, we reset the
 1738          * countdown timer, which will begin counting until it hits
 1739          * the value in the TIMERINT register, and then trigger an
 1740          * interrupt. Each time we write to the TIMERCNT register,
 1741          * the timer count is reset to 0.
 1742          */
 1743         CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
 1744 
 1745         /*
 1746          * Set a timeout in case the chip goes out to lunch.
 1747          */
 1748         ifp->if_timer = 5;
 1749 
 1750         return;
 1751 }
 1752 
 1753 static int
 1754 re_init(struct ifnet *ifp)
 1755 {
 1756         struct rtk_softc        *sc = ifp->if_softc;
 1757         u_int32_t               rxcfg = 0;
 1758         u_int32_t               reg;
 1759 
 1760         /*
 1761          * Cancel pending I/O and free all RX/TX buffers.
 1762          */
 1763         re_stop(sc);
 1764 
 1765         /*
 1766          * Enable C+ RX and TX mode, as well as VLAN stripping and
 1767          * RX checksum offload. We must configure the C+ register
 1768          * before all others.
 1769          */
 1770         CSR_WRITE_2(sc, RTK_CPLUS_CMD, RTK_CPLUSCMD_RXENB|
 1771             RTK_CPLUSCMD_TXENB|RTK_CPLUSCMD_PCI_MRW|
 1772             RTK_CPLUSCMD_VLANSTRIP|
 1773             (ifp->if_capenable &
 1774             (IFCAP_CSUM_IPv4 |IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4) ?
 1775             RTK_CPLUSCMD_RXCSUM_ENB : 0));
 1776 
 1777         /*
 1778          * Init our MAC address.  Even though the chipset
 1779          * documentation doesn't mention it, we need to enter "Config
 1780          * register write enable" mode to modify the ID registers.
 1781          */
 1782         CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_WRITECFG);
 1783         memcpy(&reg, LLADDR(ifp->if_sadl), 4);
 1784         CSR_WRITE_STREAM_4(sc, RTK_IDR0, reg);
 1785         reg = 0;
 1786         memcpy(&reg, LLADDR(ifp->if_sadl) + 4, 4);
 1787         CSR_WRITE_STREAM_4(sc, RTK_IDR4, reg);
 1788         CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
 1789 
 1790         /*
 1791          * For C+ mode, initialize the RX descriptors and mbufs.
 1792          */
 1793         re_rx_list_init(sc);
 1794         re_tx_list_init(sc);
 1795 
 1796         /*
 1797          * Enable transmit and receive.
 1798          */
 1799         CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
 1800 
 1801         /*
 1802          * Set the initial TX and RX configuration.
 1803          */
 1804         if (sc->rtk_testmode) {
 1805                 if (sc->rtk_type == RTK_8169)
 1806                         CSR_WRITE_4(sc, RTK_TXCFG,
 1807                             RTK_TXCFG_CONFIG|RTK_LOOPTEST_ON);
 1808                 else
 1809                         CSR_WRITE_4(sc, RTK_TXCFG,
 1810                             RTK_TXCFG_CONFIG|RTK_LOOPTEST_ON_CPLUS);
 1811         } else
 1812                 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
 1813         CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
 1814 
 1815         /* Set the individual bit to receive frames for this host only. */
 1816         rxcfg = CSR_READ_4(sc, RTK_RXCFG);
 1817         rxcfg |= RTK_RXCFG_RX_INDIV;
 1818 
 1819         /* If we want promiscuous mode, set the allframes bit. */
 1820         if (ifp->if_flags & IFF_PROMISC) {
 1821                 rxcfg |= RTK_RXCFG_RX_ALLPHYS;
 1822                 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
 1823         } else {
 1824                 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
 1825                 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
 1826         }
 1827 
 1828         /*
 1829          * Set capture broadcast bit to capture broadcast frames.
 1830          */
 1831         if (ifp->if_flags & IFF_BROADCAST) {
 1832                 rxcfg |= RTK_RXCFG_RX_BROAD;
 1833                 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
 1834         } else {
 1835                 rxcfg &= ~RTK_RXCFG_RX_BROAD;
 1836                 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
 1837         }
 1838 
 1839         /*
 1840          * Program the multicast filter, if necessary.
 1841          */
 1842         rtk_setmulti(sc);
 1843 
 1844 #ifdef DEVICE_POLLING
 1845         /*
 1846          * Disable interrupts if we are polling.
 1847          */
 1848         if (ifp->if_flags & IFF_POLLING)
 1849                 CSR_WRITE_2(sc, RTK_IMR, 0);
 1850         else    /* otherwise ... */
 1851 #endif /* DEVICE_POLLING */
 1852         /*
 1853          * Enable interrupts.
 1854          */
 1855         if (sc->rtk_testmode)
 1856                 CSR_WRITE_2(sc, RTK_IMR, 0);
 1857         else
 1858                 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS);
 1859 
 1860         /* Start RX/TX process. */
 1861         CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
 1862 #ifdef notdef
 1863         /* Enable receiver and transmitter. */
 1864         CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
 1865 #endif
 1866         /*
 1867          * Load the addresses of the RX and TX lists into the chip.
 1868          */
 1869 
 1870         CSR_WRITE_4(sc, RTK_RXLIST_ADDR_HI,
 1871             RTK_ADDR_HI(sc->rtk_ldata.rtk_rx_listseg.ds_addr));
 1872         CSR_WRITE_4(sc, RTK_RXLIST_ADDR_LO,
 1873             RTK_ADDR_LO(sc->rtk_ldata.rtk_rx_listseg.ds_addr));
 1874 
 1875         CSR_WRITE_4(sc, RTK_TXLIST_ADDR_HI,
 1876             RTK_ADDR_HI(sc->rtk_ldata.rtk_tx_listseg.ds_addr));
 1877         CSR_WRITE_4(sc, RTK_TXLIST_ADDR_LO,
 1878             RTK_ADDR_LO(sc->rtk_ldata.rtk_tx_listseg.ds_addr));
 1879 
 1880         CSR_WRITE_1(sc, RTK_EARLY_TX_THRESH, 16);
 1881 
 1882         /*
 1883          * Initialize the timer interrupt register so that
 1884          * a timer interrupt will be generated once the timer
 1885          * reaches a certain number of ticks. The timer is
 1886          * reloaded on each transmit. This gives us TX interrupt
 1887          * moderation, which dramatically improves TX frame rate.
 1888          */
 1889 
 1890         if (sc->rtk_type == RTK_8169)
 1891                 CSR_WRITE_4(sc, RTK_TIMERINT_8169, 0x800);
 1892         else
 1893                 CSR_WRITE_4(sc, RTK_TIMERINT, 0x400);
 1894 
 1895         /*
 1896          * For 8169 gigE NICs, set the max allowed RX packet
 1897          * size so we can receive jumbo frames.
 1898          */
 1899         if (sc->rtk_type == RTK_8169)
 1900                 CSR_WRITE_2(sc, RTK_MAXRXPKTLEN, 16383);
 1901 
 1902         if (sc->rtk_testmode)
 1903                 return 0;
 1904 
 1905         mii_mediachg(&sc->mii);
 1906 
 1907         CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD|RTK_CFG1_FULLDUPLEX);
 1908 
 1909         ifp->if_flags |= IFF_RUNNING;
 1910         ifp->if_flags &= ~IFF_OACTIVE;
 1911 
 1912         callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc);
 1913 
 1914         return 0;
 1915 }
 1916 
 1917 /*
 1918  * Set media options.
 1919  */
 1920 static int
 1921 re_ifmedia_upd(ifp)
 1922         struct ifnet            *ifp;
 1923 {
 1924         struct rtk_softc        *sc;
 1925 
 1926         sc = ifp->if_softc;
 1927 
 1928         return (mii_mediachg(&sc->mii));
 1929 }
 1930 
 1931 /*
 1932  * Report current media status.
 1933  */
 1934 static void
 1935 re_ifmedia_sts(ifp, ifmr)
 1936         struct ifnet            *ifp;
 1937         struct ifmediareq       *ifmr;
 1938 {
 1939         struct rtk_softc        *sc;
 1940 
 1941         sc = ifp->if_softc;
 1942 
 1943         mii_pollstat(&sc->mii);
 1944         ifmr->ifm_active = sc->mii.mii_media_active;
 1945         ifmr->ifm_status = sc->mii.mii_media_status;
 1946 
 1947         return;
 1948 }
 1949 
 1950 static int
 1951 re_ioctl(ifp, command, data)
 1952         struct ifnet            *ifp;
 1953         u_long                  command;
 1954         caddr_t                 data;
 1955 {
 1956         struct rtk_softc        *sc = ifp->if_softc;
 1957         struct ifreq            *ifr = (struct ifreq *) data;
 1958         int                     s, error = 0;
 1959 
 1960         s = splnet();
 1961 
 1962         switch(command) {
 1963         case SIOCSIFMTU:
 1964                 if (ifr->ifr_mtu > RTK_JUMBO_MTU)
 1965                         error = EINVAL;
 1966                 ifp->if_mtu = ifr->ifr_mtu;
 1967                 break;
 1968         case SIOCSIFFLAGS:
 1969                 if (ifp->if_flags & IFF_UP) {
 1970                         re_init(ifp);
 1971                 } else {
 1972                         if (ifp->if_flags & IFF_RUNNING)
 1973                                 re_stop(sc);
 1974                 }
 1975                 error = 0;
 1976                 break;
 1977         case SIOCGIFMEDIA:
 1978         case SIOCSIFMEDIA:
 1979                 error = ifmedia_ioctl(ifp, ifr, &sc->mii.mii_media, command);
 1980                 break;
 1981         default:
 1982                 error = ether_ioctl(ifp, command, data);
 1983                 if (error == ENETRESET) {
 1984                         if (RTK_IS_ENABLED(sc))
 1985                                 rtk_setmulti(sc);
 1986                         error = 0;
 1987                 }
 1988                 break;
 1989         }
 1990 
 1991         splx(s);
 1992 
 1993         return(error);
 1994 }
 1995 
 1996 static void
 1997 re_watchdog(ifp)
 1998         struct ifnet            *ifp;
 1999 {
 2000         struct rtk_softc        *sc;
 2001         int                     s;
 2002 
 2003         sc = ifp->if_softc;
 2004         s = splnet();
 2005         printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
 2006         ifp->if_oerrors++;
 2007 
 2008         re_txeof(sc);
 2009         re_rxeof(sc);
 2010 
 2011         re_init(ifp);
 2012 
 2013         splx(s);
 2014 }
 2015 
 2016 /*
 2017  * Stop the adapter and free any mbufs allocated to the
 2018  * RX and TX lists.
 2019  */
 2020 static void
 2021 re_stop(sc)
 2022         struct rtk_softc        *sc;
 2023 {
 2024         register int            i;
 2025         struct ifnet            *ifp;
 2026 
 2027         ifp = &sc->ethercom.ec_if;
 2028         ifp->if_timer = 0;
 2029 
 2030         callout_stop(&sc->rtk_tick_ch);
 2031         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
 2032 #ifdef DEVICE_POLLING
 2033         ether_poll_deregister(ifp);
 2034 #endif /* DEVICE_POLLING */
 2035 
 2036         CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
 2037         CSR_WRITE_2(sc, RTK_IMR, 0x0000);
 2038 
 2039         if (sc->rtk_head != NULL) {
 2040                 m_freem(sc->rtk_head);
 2041                 sc->rtk_head = sc->rtk_tail = NULL;
 2042         }
 2043 
 2044         /* Free the TX list buffers. */
 2045 
 2046         for (i = 0; i < RTK_TX_DESC_CNT; i++) {
 2047                 if (sc->rtk_ldata.rtk_tx_mbuf[i] != NULL) {
 2048                         bus_dmamap_unload(sc->sc_dmat,
 2049                             sc->rtk_ldata.rtk_tx_dmamap[i]);
 2050                         m_freem(sc->rtk_ldata.rtk_tx_mbuf[i]);
 2051                         sc->rtk_ldata.rtk_tx_mbuf[i] = NULL;
 2052                 }
 2053         }
 2054 
 2055         /* Free the RX list buffers. */
 2056 
 2057         for (i = 0; i < RTK_RX_DESC_CNT; i++) {
 2058                 if (sc->rtk_ldata.rtk_rx_mbuf[i] != NULL) {
 2059                         bus_dmamap_unload(sc->sc_dmat,
 2060                             sc->rtk_ldata.rtk_rx_dmamap[i]);
 2061                         m_freem(sc->rtk_ldata.rtk_rx_mbuf[i]);
 2062                         sc->rtk_ldata.rtk_rx_mbuf[i] = NULL;
 2063                 }
 2064         }
 2065 
 2066         return;
 2067 }
 2068 #if 0
 2069 /*
 2070  * Device suspend routine.  Stop the interface and save some PCI
 2071  * settings in case the BIOS doesn't restore them properly on
 2072  * resume.
 2073  */
 2074 static int
 2075 re_suspend(dev)
 2076         device_t                dev;
 2077 {
 2078         register int            i;
 2079         struct rtk_softc        *sc;
 2080 
 2081         sc = device_get_softc(dev);
 2082 
 2083         re_stop(sc);
 2084 
 2085         for (i = 0; i < 5; i++)
 2086                 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
 2087         sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
 2088         sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
 2089         sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
 2090         sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
 2091 
 2092         sc->suspended = 1;
 2093 
 2094         return (0);
 2095 }
 2096 
 2097 /*
 2098  * Device resume routine.  Restore some PCI settings in case the BIOS
 2099  * doesn't, re-enable busmastering, and restart the interface if
 2100  * appropriate.
 2101  */
 2102 static int
 2103 re_resume(dev)
 2104         device_t                dev;
 2105 {
 2106         register int            i;
 2107         struct rtk_softc        *sc;
 2108         struct ifnet            *ifp;
 2109 
 2110         sc = device_get_softc(dev);
 2111         ifp = &sc->ethercom.ec_if;
 2112 
 2113         /* better way to do this? */
 2114         for (i = 0; i < 5; i++)
 2115                 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
 2116         pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
 2117         pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
 2118         pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
 2119         pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
 2120 
 2121         /* reenable busmastering */
 2122         pci_enable_busmaster(dev);
 2123         pci_enable_io(dev, RTK_RES);
 2124 
 2125         /* reinitialize interface if necessary */
 2126         if (ifp->if_flags & IFF_UP)
 2127                 re_init(sc);
 2128 
 2129         sc->suspended = 0;
 2130 
 2131         return (0);
 2132 }
 2133 
 2134 /*
 2135  * Stop all chip I/O so that the kernel's probe routines don't
 2136  * get confused by errant DMAs when rebooting.
 2137  */
 2138 static void
 2139 re_shutdown(dev)
 2140         device_t                dev;
 2141 {
 2142         struct rtk_softc        *sc;
 2143 
 2144         sc = device_get_softc(dev);
 2145 
 2146         re_stop(sc);
 2147 
 2148         return;
 2149 }
 2150 #endif

Cache object: 1a5a92eae92c1c7be765ec53f8c03bef


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.