The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/tx/if_tx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997 Semen Ustimenko (semenu@FreeBSD.org)
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/8.4/sys/dev/tx/if_tx.c 225492 2011-09-11 20:38:33Z marius $");
   29 
   30 /*
   31  * EtherPower II 10/100 Fast Ethernet (SMC 9432 serie)
   32  *
   33  * These cards are based on SMC83c17x (EPIC) chip and one of the various
   34  * PHYs (QS6612, AC101 and LXT970 were seen). The media support depends on
   35  * card model. All cards support 10baseT/UTP and 100baseTX half- and full-
   36  * duplex (SMB9432TX). SMC9432BTX also supports 10baseT/BNC. SMC9432FTX also
   37  * supports fibre optics.
   38  *
   39  * Thanks are going to Steve Bauer and Jason Wright.
   40  */
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/sockio.h>
   45 #include <sys/mbuf.h>
   46 #include <sys/kernel.h>
   47 #include <sys/module.h>
   48 #include <sys/socket.h>
   49 #include <sys/queue.h>
   50 
   51 #include <net/if.h>
   52 #include <net/if_arp.h>
   53 #include <net/ethernet.h>
   54 #include <net/if_dl.h>
   55 #include <net/if_media.h>
   56 #include <net/if_types.h>
   57 
   58 #include <net/bpf.h>
   59 
   60 #include <net/if_vlan_var.h>
   61 
   62 #include <machine/bus.h>
   63 #include <machine/resource.h>
   64 #include <sys/bus.h>
   65 #include <sys/rman.h>
   66 
   67 #include <dev/pci/pcireg.h>
   68 #include <dev/pci/pcivar.h>
   69 
   70 #include <dev/mii/mii.h>
   71 #include <dev/mii/miivar.h>
   72 #include "miidevs.h"
   73 
   74 #include <dev/mii/lxtphyreg.h>
   75 
   76 #include "miibus_if.h"
   77 
   78 #include <dev/tx/if_txreg.h>
   79 #include <dev/tx/if_txvar.h>
   80 
   81 MODULE_DEPEND(tx, pci, 1, 1, 1);
   82 MODULE_DEPEND(tx, ether, 1, 1, 1);
   83 MODULE_DEPEND(tx, miibus, 1, 1, 1);
   84 
   85 static int epic_ifioctl(struct ifnet *, u_long, caddr_t);
   86 static void epic_intr(void *);
   87 static void epic_tx_underrun(epic_softc_t *);
   88 static void epic_ifstart(struct ifnet *);
   89 static void epic_ifstart_locked(struct ifnet *);
   90 static void epic_timer(void *);
   91 static void epic_init(void *);
   92 static void epic_init_locked(epic_softc_t *);
   93 static void epic_stop(epic_softc_t *);
   94 static void epic_rx_done(epic_softc_t *);
   95 static void epic_tx_done(epic_softc_t *);
   96 static int epic_init_rings(epic_softc_t *);
   97 static void epic_free_rings(epic_softc_t *);
   98 static void epic_stop_activity(epic_softc_t *);
   99 static int epic_queue_last_packet(epic_softc_t *);
  100 static void epic_start_activity(epic_softc_t *);
  101 static void epic_set_rx_mode(epic_softc_t *);
  102 static void epic_set_tx_mode(epic_softc_t *);
  103 static void epic_set_mc_table(epic_softc_t *);
  104 static int epic_read_eeprom(epic_softc_t *,u_int16_t);
  105 static void epic_output_eepromw(epic_softc_t *, u_int16_t);
  106 static u_int16_t epic_input_eepromw(epic_softc_t *);
  107 static u_int8_t epic_eeprom_clock(epic_softc_t *,u_int8_t);
  108 static void epic_write_eepromreg(epic_softc_t *,u_int8_t);
  109 static u_int8_t epic_read_eepromreg(epic_softc_t *);
  110 
  111 static int epic_read_phy_reg(epic_softc_t *, int, int);
  112 static void epic_write_phy_reg(epic_softc_t *, int, int, int);
  113 
  114 static int epic_miibus_readreg(device_t, int, int);
  115 static int epic_miibus_writereg(device_t, int, int, int);
  116 static void epic_miibus_statchg(device_t);
  117 static void epic_miibus_mediainit(device_t);
  118 
  119 static int epic_ifmedia_upd(struct ifnet *);
  120 static int epic_ifmedia_upd_locked(struct ifnet *);
  121 static void epic_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  122 
  123 static int epic_probe(device_t);
  124 static int epic_attach(device_t);
  125 static int epic_shutdown(device_t);
  126 static int epic_detach(device_t);
  127 static void epic_release(epic_softc_t *);
  128 static struct epic_type *epic_devtype(device_t);
  129 
  130 static device_method_t epic_methods[] = {
  131         /* Device interface */
  132         DEVMETHOD(device_probe,         epic_probe),
  133         DEVMETHOD(device_attach,        epic_attach),
  134         DEVMETHOD(device_detach,        epic_detach),
  135         DEVMETHOD(device_shutdown,      epic_shutdown),
  136 
  137         /* MII interface */
  138         DEVMETHOD(miibus_readreg,       epic_miibus_readreg),
  139         DEVMETHOD(miibus_writereg,      epic_miibus_writereg),
  140         DEVMETHOD(miibus_statchg,       epic_miibus_statchg),
  141         DEVMETHOD(miibus_mediainit,     epic_miibus_mediainit),
  142 
  143         { 0, 0 }
  144 };
  145 
  146 static driver_t epic_driver = {
  147         "tx",
  148         epic_methods,
  149         sizeof(epic_softc_t)
  150 };
  151 
  152 static devclass_t epic_devclass;
  153 
  154 DRIVER_MODULE(tx, pci, epic_driver, epic_devclass, 0, 0);
  155 DRIVER_MODULE(miibus, tx, miibus_driver, miibus_devclass, 0, 0);
  156 
  157 static struct epic_type epic_devs[] = {
  158         { SMC_VENDORID, SMC_DEVICEID_83C170, "SMC EtherPower II 10/100" },
  159         { 0, 0, NULL }
  160 };
  161 
  162 static int
  163 epic_probe(device_t dev)
  164 {
  165         struct epic_type *t;
  166 
  167         t = epic_devtype(dev);
  168 
  169         if (t != NULL) {
  170                 device_set_desc(dev, t->name);
  171                 return (BUS_PROBE_DEFAULT);
  172         }
  173 
  174         return (ENXIO);
  175 }
  176 
  177 static struct epic_type *
  178 epic_devtype(device_t dev)
  179 {
  180         struct epic_type *t;
  181 
  182         t = epic_devs;
  183 
  184         while (t->name != NULL) {
  185                 if ((pci_get_vendor(dev) == t->ven_id) &&
  186                     (pci_get_device(dev) == t->dev_id)) {
  187                         return (t);
  188                 }
  189                 t++;
  190         }
  191         return (NULL);
  192 }
  193 
  194 #ifdef EPIC_USEIOSPACE
  195 #define EPIC_RES        SYS_RES_IOPORT
  196 #define EPIC_RID        PCIR_BASEIO
  197 #else
  198 #define EPIC_RES        SYS_RES_MEMORY
  199 #define EPIC_RID        PCIR_BASEMEM
  200 #endif
  201 
  202 static void
  203 epic_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  204 {
  205         u_int32_t *addr;
  206 
  207         if (error)
  208                 return;
  209 
  210         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
  211         addr = arg;
  212         *addr = segs->ds_addr;
  213 }
  214 
  215 /*
  216  * Attach routine: map registers, allocate softc, rings and descriptors.
  217  * Reset to known state.
  218  */
  219 static int
  220 epic_attach(device_t dev)
  221 {
  222         struct ifnet *ifp;
  223         epic_softc_t *sc;
  224         int error;
  225         int i, rid, tmp;
  226         u_char eaddr[6];
  227 
  228         sc = device_get_softc(dev);
  229 
  230         /* Preinitialize softc structure. */
  231         sc->dev = dev;
  232         mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  233             MTX_DEF);
  234 
  235         /* Fill ifnet structure. */
  236         ifp = sc->ifp = if_alloc(IFT_ETHER);
  237         if (ifp == NULL) {
  238                 device_printf(dev, "can not if_alloc()\n");
  239                 error = ENOSPC;
  240                 goto fail;
  241         }
  242         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  243         ifp->if_softc = sc;
  244         ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST;
  245         ifp->if_ioctl = epic_ifioctl;
  246         ifp->if_start = epic_ifstart;
  247         ifp->if_init = epic_init;
  248         IFQ_SET_MAXLEN(&ifp->if_snd, TX_RING_SIZE - 1);
  249 
  250         /* Enable busmastering. */
  251         pci_enable_busmaster(dev);
  252 
  253         rid = EPIC_RID;
  254         sc->res = bus_alloc_resource_any(dev, EPIC_RES, &rid, RF_ACTIVE);
  255         if (sc->res == NULL) {
  256                 device_printf(dev, "couldn't map ports/memory\n");
  257                 error = ENXIO;
  258                 goto fail;
  259         }
  260 
  261         /* Allocate interrupt. */
  262         rid = 0;
  263         sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
  264             RF_SHAREABLE | RF_ACTIVE);
  265         if (sc->irq == NULL) {
  266                 device_printf(dev, "couldn't map interrupt\n");
  267                 error = ENXIO;
  268                 goto fail;
  269         }
  270 
  271         /* Allocate DMA tags. */
  272         error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
  273             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  274             MCLBYTES * EPIC_MAX_FRAGS, EPIC_MAX_FRAGS, MCLBYTES, 0, NULL, NULL,
  275             &sc->mtag);
  276         if (error) {
  277                 device_printf(dev, "couldn't allocate dma tag\n");
  278                 goto fail;
  279         }
  280 
  281         error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
  282             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  283             sizeof(struct epic_rx_desc) * RX_RING_SIZE,
  284             1, sizeof(struct epic_rx_desc) * RX_RING_SIZE, 0, NULL,
  285             NULL, &sc->rtag);
  286         if (error) {
  287                 device_printf(dev, "couldn't allocate dma tag\n");
  288                 goto fail;
  289         }
  290 
  291         error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
  292             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  293             sizeof(struct epic_tx_desc) * TX_RING_SIZE,
  294             1, sizeof(struct epic_tx_desc) * TX_RING_SIZE, 0,
  295             NULL, NULL, &sc->ttag);
  296         if (error) {
  297                 device_printf(dev, "couldn't allocate dma tag\n");
  298                 goto fail;
  299         }
  300 
  301         error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
  302             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  303             sizeof(struct epic_frag_list) * TX_RING_SIZE,
  304             1, sizeof(struct epic_frag_list) * TX_RING_SIZE, 0,
  305             NULL, NULL, &sc->ftag);
  306         if (error) {
  307                 device_printf(dev, "couldn't allocate dma tag\n");
  308                 goto fail;
  309         }
  310 
  311         /* Allocate DMA safe memory and get the DMA addresses. */
  312         error = bus_dmamem_alloc(sc->ftag, (void **)&sc->tx_flist,
  313             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fmap);
  314         if (error) {
  315                 device_printf(dev, "couldn't allocate dma memory\n");
  316                 goto fail;
  317         }
  318         error = bus_dmamap_load(sc->ftag, sc->fmap, sc->tx_flist,
  319             sizeof(struct epic_frag_list) * TX_RING_SIZE, epic_dma_map_addr,
  320             &sc->frag_addr, 0);
  321         if (error) {
  322                 device_printf(dev, "couldn't map dma memory\n");
  323                 goto fail;
  324         }
  325         error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc,
  326             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->tmap);
  327         if (error) {
  328                 device_printf(dev, "couldn't allocate dma memory\n");
  329                 goto fail;
  330         }
  331         error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc,
  332             sizeof(struct epic_tx_desc) * TX_RING_SIZE, epic_dma_map_addr,
  333             &sc->tx_addr, 0);
  334         if (error) {
  335                 device_printf(dev, "couldn't map dma memory\n");
  336                 goto fail;
  337         }
  338         error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc,
  339             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rmap);
  340         if (error) {
  341                 device_printf(dev, "couldn't allocate dma memory\n");
  342                 goto fail;
  343         }
  344         error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc,
  345             sizeof(struct epic_rx_desc) * RX_RING_SIZE, epic_dma_map_addr,
  346             &sc->rx_addr, 0);
  347         if (error) {
  348                 device_printf(dev, "couldn't map dma memory\n");
  349                 goto fail;
  350         }
  351 
  352         /* Bring the chip out of low-power mode. */
  353         CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
  354         DELAY(500);
  355 
  356         /* Workaround for Application Note 7-15. */
  357         for (i = 0; i < 16; i++)
  358                 CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
  359 
  360         /* Read MAC address from EEPROM. */
  361         for (i = 0; i < ETHER_ADDR_LEN / sizeof(u_int16_t); i++)
  362                 ((u_int16_t *)eaddr)[i] = epic_read_eeprom(sc,i);
  363 
  364         /* Set Non-Volatile Control Register from EEPROM. */
  365         CSR_WRITE_4(sc, NVCTL, epic_read_eeprom(sc, EEPROM_NVCTL) & 0x1F);
  366 
  367         /* Set defaults. */
  368         sc->tx_threshold = TRANSMIT_THRESHOLD;
  369         sc->txcon = TXCON_DEFAULT;
  370         sc->miicfg = MIICFG_SMI_ENABLE;
  371         sc->phyid = EPIC_UNKN_PHY;
  372         sc->serinst = -1;
  373 
  374         /* Fetch card id. */
  375         sc->cardvend = pci_read_config(dev, PCIR_SUBVEND_0, 2);
  376         sc->cardid = pci_read_config(dev, PCIR_SUBDEV_0, 2);
  377 
  378         if (sc->cardvend != SMC_VENDORID)
  379                 device_printf(dev, "unknown card vendor %04xh\n", sc->cardvend);
  380 
  381         /* Do ifmedia setup. */
  382         error = mii_attach(dev, &sc->miibus, ifp, epic_ifmedia_upd,
  383             epic_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
  384         if (error != 0) {
  385                 device_printf(dev, "attaching PHYs failed\n");
  386                 goto fail;
  387         }
  388 
  389         /* board type and ... */
  390         printf(" type ");
  391         for(i = 0x2c; i < 0x32; i++) {
  392                 tmp = epic_read_eeprom(sc, i);
  393                 if (' ' == (u_int8_t)tmp)
  394                         break;
  395                 printf("%c", (u_int8_t)tmp);
  396                 tmp >>= 8;
  397                 if (' ' == (u_int8_t)tmp)
  398                         break;
  399                 printf("%c", (u_int8_t)tmp);
  400         }
  401         printf("\n");
  402 
  403         /* Initialize rings. */
  404         if (epic_init_rings(sc)) {
  405                 device_printf(dev, "failed to init rings\n");
  406                 error = ENXIO;
  407                 goto fail;
  408         }
  409 
  410         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
  411         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  412         ifp->if_capenable |= IFCAP_VLAN_MTU;
  413         callout_init_mtx(&sc->timer, &sc->lock, 0);
  414 
  415         /* Attach to OS's managers. */
  416         ether_ifattach(ifp, eaddr);
  417 
  418         /* Activate our interrupt handler. */
  419         error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
  420             NULL, epic_intr, sc, &sc->sc_ih);
  421         if (error) {
  422                 device_printf(dev, "couldn't set up irq\n");
  423                 ether_ifdetach(ifp);
  424                 goto fail;
  425         }
  426 
  427         return (0);
  428 fail:
  429         epic_release(sc);
  430         return (error);
  431 }
  432 
  433 /*
  434  * Free any resources allocated by the driver.
  435  */
  436 static void
  437 epic_release(epic_softc_t *sc)
  438 {
  439         if (sc->ifp != NULL)
  440                 if_free(sc->ifp);
  441         if (sc->irq)
  442                 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
  443         if (sc->res)
  444                 bus_release_resource(sc->dev, EPIC_RES, EPIC_RID, sc->res);
  445         epic_free_rings(sc);
  446         if (sc->tx_flist) {
  447                 bus_dmamap_unload(sc->ftag, sc->fmap);
  448                 bus_dmamem_free(sc->ftag, sc->tx_flist, sc->fmap);
  449                 bus_dmamap_destroy(sc->ftag, sc->fmap);
  450         }
  451         if (sc->tx_desc) {
  452                 bus_dmamap_unload(sc->ttag, sc->tmap);
  453                 bus_dmamem_free(sc->ttag, sc->tx_desc, sc->tmap);
  454                 bus_dmamap_destroy(sc->ttag, sc->tmap);
  455         }
  456         if (sc->rx_desc) {
  457                 bus_dmamap_unload(sc->rtag, sc->rmap);
  458                 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap);
  459                 bus_dmamap_destroy(sc->rtag, sc->rmap);
  460         }
  461         if (sc->mtag)
  462                 bus_dma_tag_destroy(sc->mtag);
  463         if (sc->ftag)
  464                 bus_dma_tag_destroy(sc->ftag);
  465         if (sc->ttag)
  466                 bus_dma_tag_destroy(sc->ttag);
  467         if (sc->rtag)
  468                 bus_dma_tag_destroy(sc->rtag);
  469         mtx_destroy(&sc->lock);
  470 }
  471 
  472 /*
  473  * Detach driver and free resources.
  474  */
  475 static int
  476 epic_detach(device_t dev)
  477 {
  478         struct ifnet *ifp;
  479         epic_softc_t *sc;
  480 
  481         sc = device_get_softc(dev);
  482         ifp = sc->ifp;
  483 
  484         EPIC_LOCK(sc);
  485         epic_stop(sc);
  486         EPIC_UNLOCK(sc);
  487         callout_drain(&sc->timer);
  488         ether_ifdetach(ifp);
  489         bus_teardown_intr(dev, sc->irq, sc->sc_ih);
  490 
  491         bus_generic_detach(dev);
  492         device_delete_child(dev, sc->miibus);
  493 
  494         epic_release(sc);
  495         return (0);
  496 }
  497 
  498 #undef  EPIC_RES
  499 #undef  EPIC_RID
  500 
  501 /*
  502  * Stop all chip I/O so that the kernel's probe routines don't
  503  * get confused by errant DMAs when rebooting.
  504  */
  505 static int
  506 epic_shutdown(device_t dev)
  507 {
  508         epic_softc_t *sc;
  509 
  510         sc = device_get_softc(dev);
  511 
  512         EPIC_LOCK(sc);
  513         epic_stop(sc);
  514         EPIC_UNLOCK(sc);
  515         return (0);
  516 }
  517 
  518 /*
  519  * This is if_ioctl handler.
  520  */
  521 static int
  522 epic_ifioctl(struct ifnet *ifp, u_long command, caddr_t data)
  523 {
  524         epic_softc_t *sc = ifp->if_softc;
  525         struct mii_data *mii;
  526         struct ifreq *ifr = (struct ifreq *) data;
  527         int error = 0;
  528 
  529         switch (command) {
  530         case SIOCSIFMTU:
  531                 if (ifp->if_mtu == ifr->ifr_mtu)
  532                         break;
  533 
  534                 /* XXX Though the datasheet doesn't imply any
  535                  * limitations on RX and TX sizes beside max 64Kb
  536                  * DMA transfer, seems we can't send more then 1600
  537                  * data bytes per ethernet packet (transmitter hangs
  538                  * up if more data is sent).
  539                  */
  540                 EPIC_LOCK(sc);
  541                 if (ifr->ifr_mtu + ifp->if_hdrlen <= EPIC_MAX_MTU) {
  542                         ifp->if_mtu = ifr->ifr_mtu;
  543                         epic_stop(sc);
  544                         epic_init_locked(sc);
  545                 } else
  546                         error = EINVAL;
  547                 EPIC_UNLOCK(sc);
  548                 break;
  549 
  550         case SIOCSIFFLAGS:
  551                 /*
  552                  * If the interface is marked up and stopped, then start it.
  553                  * If it is marked down and running, then stop it.
  554                  */
  555                 EPIC_LOCK(sc);
  556                 if (ifp->if_flags & IFF_UP) {
  557                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
  558                                 epic_init_locked(sc);
  559                                 EPIC_UNLOCK(sc);
  560                                 break;
  561                         }
  562                 } else {
  563                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  564                                 epic_stop(sc);
  565                                 EPIC_UNLOCK(sc);
  566                                 break;
  567                         }
  568                 }
  569 
  570                 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */
  571                 epic_stop_activity(sc);
  572                 epic_set_mc_table(sc);
  573                 epic_set_rx_mode(sc);
  574                 epic_start_activity(sc);
  575                 EPIC_UNLOCK(sc);
  576                 break;
  577 
  578         case SIOCADDMULTI:
  579         case SIOCDELMULTI:
  580                 EPIC_LOCK(sc);
  581                 epic_set_mc_table(sc);
  582                 EPIC_UNLOCK(sc);
  583                 error = 0;
  584                 break;
  585 
  586         case SIOCSIFMEDIA:
  587         case SIOCGIFMEDIA:
  588                 mii = device_get_softc(sc->miibus);
  589                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
  590                 break;
  591 
  592         default:
  593                 error = ether_ioctl(ifp, command, data);
  594                 break;
  595         }
  596         return (error);
  597 }
  598 
  599 static void
  600 epic_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg,
  601     bus_size_t mapsize, int error)
  602 {
  603         struct epic_frag_list *flist;
  604         int i;
  605 
  606         if (error)
  607                 return;
  608 
  609         KASSERT(nseg <= EPIC_MAX_FRAGS, ("too many DMA segments"));
  610         flist = arg;
  611         /* Fill fragments list. */
  612         for (i = 0; i < nseg; i++) {
  613                 KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
  614                 flist->frag[i].fraglen = segs[i].ds_len;
  615                 flist->frag[i].fragaddr = segs[i].ds_addr;
  616         }
  617         flist->numfrags = nseg;
  618 }
  619 
  620 static void
  621 epic_dma_map_rxbuf(void *arg, bus_dma_segment_t *segs, int nseg,
  622     bus_size_t mapsize, int error)
  623 {
  624         struct epic_rx_desc *desc;
  625 
  626         if (error)
  627                 return;
  628 
  629         KASSERT(nseg == 1, ("too many DMA segments"));
  630         desc = arg;
  631         desc->bufaddr = segs->ds_addr;
  632 }
  633 
  634 /*
  635  * This is if_start handler. It takes mbufs from if_snd queue
  636  * and queue them for transmit, one by one, until TX ring become full
  637  * or queue become empty.
  638  */
  639 static void
  640 epic_ifstart(struct ifnet * ifp)
  641 {
  642         epic_softc_t *sc = ifp->if_softc;
  643 
  644         EPIC_LOCK(sc);
  645         epic_ifstart_locked(ifp);
  646         EPIC_UNLOCK(sc);
  647 }
  648 
  649 static void
  650 epic_ifstart_locked(struct ifnet * ifp)
  651 {
  652         epic_softc_t *sc = ifp->if_softc;
  653         struct epic_tx_buffer *buf;
  654         struct epic_tx_desc *desc;
  655         struct epic_frag_list *flist;
  656         struct mbuf *m0, *m;
  657         int error;
  658 
  659         while (sc->pending_txs < TX_RING_SIZE) {
  660                 buf = sc->tx_buffer + sc->cur_tx;
  661                 desc = sc->tx_desc + sc->cur_tx;
  662                 flist = sc->tx_flist + sc->cur_tx;
  663 
  664                 /* Get next packet to send. */
  665                 IF_DEQUEUE(&ifp->if_snd, m0);
  666 
  667                 /* If nothing to send, return. */
  668                 if (m0 == NULL)
  669                         return;
  670 
  671                 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0,
  672                     epic_dma_map_txbuf, flist, 0);
  673 
  674                 if (error && error != EFBIG) {
  675                         m_freem(m0);
  676                         ifp->if_oerrors++;
  677                         continue;
  678                 }
  679 
  680                 /*
  681                  * If packet was more than EPIC_MAX_FRAGS parts,
  682                  * recopy packet to a newly allocated mbuf cluster.
  683                  */
  684                 if (error) {
  685                         m = m_defrag(m0, M_DONTWAIT);
  686                         if (m == NULL) {
  687                                 m_freem(m0);
  688                                 ifp->if_oerrors++;
  689                                 continue;
  690                         }
  691                         m_freem(m0);
  692                         m0 = m;
  693 
  694                         error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m,
  695                             epic_dma_map_txbuf, flist, 0);
  696                         if (error) {
  697                                 m_freem(m);
  698                                 ifp->if_oerrors++;
  699                                 continue;
  700                         }
  701                 }
  702                 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE);
  703 
  704                 buf->mbuf = m0;
  705                 sc->pending_txs++;
  706                 sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
  707                 desc->control = 0x01;
  708                 desc->txlength =
  709                     max(m0->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
  710                 desc->status = 0x8000;
  711                 bus_dmamap_sync(sc->ttag, sc->tmap,
  712                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  713                 bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE);
  714                 CSR_WRITE_4(sc, COMMAND, COMMAND_TXQUEUED);
  715 
  716                 /* Set watchdog timer. */
  717                 sc->tx_timeout = 8;
  718 
  719                 BPF_MTAP(ifp, m0);
  720         }
  721 
  722         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  723 }
  724 
  725 /*
  726  * Synopsis: Finish all received frames.
  727  */
  728 static void
  729 epic_rx_done(epic_softc_t *sc)
  730 {
  731         struct ifnet *ifp = sc->ifp;
  732         u_int16_t len;
  733         struct epic_rx_buffer *buf;
  734         struct epic_rx_desc *desc;
  735         struct mbuf *m;
  736         bus_dmamap_t map;
  737         int error;
  738 
  739         bus_dmamap_sync(sc->rtag, sc->rmap, BUS_DMASYNC_POSTREAD);
  740         while ((sc->rx_desc[sc->cur_rx].status & 0x8000) == 0) {
  741                 buf = sc->rx_buffer + sc->cur_rx;
  742                 desc = sc->rx_desc + sc->cur_rx;
  743 
  744                 /* Switch to next descriptor. */
  745                 sc->cur_rx = (sc->cur_rx + 1) & RX_RING_MASK;
  746 
  747                 /*
  748                  * Check for RX errors. This should only happen if
  749                  * SAVE_ERRORED_PACKETS is set. RX errors generate
  750                  * RXE interrupt usually.
  751                  */
  752                 if ((desc->status & 1) == 0) {
  753                         ifp->if_ierrors++;
  754                         desc->status = 0x8000;
  755                         continue;
  756                 }
  757 
  758                 /* Save packet length and mbuf contained packet. */
  759                 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
  760                 len = desc->rxlength - ETHER_CRC_LEN;
  761                 m = buf->mbuf;
  762 
  763                 /* Try to get an mbuf cluster. */
  764                 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
  765                 if (buf->mbuf == NULL) {
  766                         buf->mbuf = m;
  767                         desc->status = 0x8000;
  768                         ifp->if_ierrors++;
  769                         continue;
  770                 }
  771                 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
  772                 m_adj(buf->mbuf, ETHER_ALIGN);
  773 
  774                 /* Point to new mbuf, and give descriptor to chip. */
  775                 error = bus_dmamap_load_mbuf(sc->mtag, sc->sparemap, buf->mbuf,
  776                     epic_dma_map_rxbuf, desc, 0);
  777                 if (error) {
  778                         buf->mbuf = m;
  779                         desc->status = 0x8000;
  780                         ifp->if_ierrors++;
  781                         continue;
  782                 }
  783 
  784                 desc->status = 0x8000;
  785                 bus_dmamap_unload(sc->mtag, buf->map);
  786                 map = buf->map;
  787                 buf->map = sc->sparemap;
  788                 sc->sparemap = map;
  789                 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
  790 
  791                 /* First mbuf in packet holds the ethernet and packet headers */
  792                 m->m_pkthdr.rcvif = ifp;
  793                 m->m_pkthdr.len = m->m_len = len;
  794 
  795                 /* Give mbuf to OS. */
  796                 EPIC_UNLOCK(sc);
  797                 (*ifp->if_input)(ifp, m);
  798                 EPIC_LOCK(sc);
  799 
  800                 /* Successfuly received frame */
  801                 ifp->if_ipackets++;
  802         }
  803         bus_dmamap_sync(sc->rtag, sc->rmap,
  804             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  805 }
  806 
  807 /*
  808  * Synopsis: Do last phase of transmission. I.e. if desc is
  809  * transmitted, decrease pending_txs counter, free mbuf contained
  810  * packet, switch to next descriptor and repeat until no packets
  811  * are pending or descriptor is not transmitted yet.
  812  */
  813 static void
  814 epic_tx_done(epic_softc_t *sc)
  815 {
  816         struct epic_tx_buffer *buf;
  817         struct epic_tx_desc *desc;
  818         u_int16_t status;
  819 
  820         bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_POSTREAD);
  821         while (sc->pending_txs > 0) {
  822                 buf = sc->tx_buffer + sc->dirty_tx;
  823                 desc = sc->tx_desc + sc->dirty_tx;
  824                 status = desc->status;
  825 
  826                 /*
  827                  * If packet is not transmitted, thou followed
  828                  * packets are not transmitted too.
  829                  */
  830                 if (status & 0x8000)
  831                         break;
  832 
  833                 /* Packet is transmitted. Switch to next and free mbuf. */
  834                 sc->pending_txs--;
  835                 sc->dirty_tx = (sc->dirty_tx + 1) & TX_RING_MASK;
  836                 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE);
  837                 bus_dmamap_unload(sc->mtag, buf->map);
  838                 m_freem(buf->mbuf);
  839                 buf->mbuf = NULL;
  840 
  841                 /* Check for errors and collisions. */
  842                 if (status & 0x0001)
  843                         sc->ifp->if_opackets++;
  844                 else
  845                         sc->ifp->if_oerrors++;
  846                 sc->ifp->if_collisions += (status >> 8) & 0x1F;
  847 #ifdef EPIC_DIAG
  848                 if ((status & 0x1001) == 0x1001)
  849                         device_printf(sc->dev,
  850                             "Tx ERROR: excessive coll. number\n");
  851 #endif
  852         }
  853 
  854         if (sc->pending_txs < TX_RING_SIZE)
  855                 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  856         bus_dmamap_sync(sc->ttag, sc->tmap,
  857             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  858 }
  859 
  860 /*
  861  * Interrupt function
  862  */
  863 static void
  864 epic_intr(void *arg)
  865 {
  866     epic_softc_t *sc;
  867     int status, i;
  868 
  869     sc = arg;
  870     i = 4;
  871     EPIC_LOCK(sc);
  872     while (i-- && ((status = CSR_READ_4(sc, INTSTAT)) & INTSTAT_INT_ACTV)) {
  873         CSR_WRITE_4(sc, INTSTAT, status);
  874 
  875         if (status & (INTSTAT_RQE|INTSTAT_RCC|INTSTAT_OVW)) {
  876             epic_rx_done(sc);
  877             if (status & (INTSTAT_RQE|INTSTAT_OVW)) {
  878 #ifdef EPIC_DIAG
  879                 if (status & INTSTAT_OVW)
  880                     device_printf(sc->dev, "RX buffer overflow\n");
  881                 if (status & INTSTAT_RQE)
  882                     device_printf(sc->dev, "RX FIFO overflow\n");
  883 #endif
  884                 if ((CSR_READ_4(sc, COMMAND) & COMMAND_RXQUEUED) == 0)
  885                     CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED);
  886                 sc->ifp->if_ierrors++;
  887             }
  888         }
  889 
  890         if (status & (INTSTAT_TXC|INTSTAT_TCC|INTSTAT_TQE)) {
  891             epic_tx_done(sc);
  892             if (sc->ifp->if_snd.ifq_head != NULL)
  893                     epic_ifstart_locked(sc->ifp);
  894         }
  895 
  896         /* Check for rare errors */
  897         if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
  898                       INTSTAT_APE|INTSTAT_DPE|INTSTAT_TXU|INTSTAT_RXE)) {
  899             if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
  900                           INTSTAT_APE|INTSTAT_DPE)) {
  901                 device_printf(sc->dev, "PCI fatal errors occured: %s%s%s%s\n",
  902                     (status & INTSTAT_PMA) ? "PMA " : "",
  903                     (status & INTSTAT_PTA) ? "PTA " : "",
  904                     (status & INTSTAT_APE) ? "APE " : "",
  905                     (status & INTSTAT_DPE) ? "DPE" : "");
  906 
  907                 epic_stop(sc);
  908                 epic_init_locked(sc);
  909                 break;
  910             }
  911 
  912             if (status & INTSTAT_RXE) {
  913 #ifdef EPIC_DIAG
  914                 device_printf(sc->dev, "CRC/Alignment error\n");
  915 #endif
  916                 sc->ifp->if_ierrors++;
  917             }
  918 
  919             if (status & INTSTAT_TXU) {
  920                 epic_tx_underrun(sc);
  921                 sc->ifp->if_oerrors++;
  922             }
  923         }
  924     }
  925 
  926     /* If no packets are pending, then no timeouts. */
  927     if (sc->pending_txs == 0)
  928             sc->tx_timeout = 0;
  929     EPIC_UNLOCK(sc);
  930 }
  931 
  932 /*
  933  * Handle the TX underrun error: increase the TX threshold
  934  * and restart the transmitter.
  935  */
  936 static void
  937 epic_tx_underrun(epic_softc_t *sc)
  938 {
  939         if (sc->tx_threshold > TRANSMIT_THRESHOLD_MAX) {
  940                 sc->txcon &= ~TXCON_EARLY_TRANSMIT_ENABLE;
  941 #ifdef EPIC_DIAG
  942                 device_printf(sc->dev, "Tx UNDERRUN: early TX disabled\n");
  943 #endif
  944         } else {
  945                 sc->tx_threshold += 0x40;
  946 #ifdef EPIC_DIAG
  947                 device_printf(sc->dev,
  948                     "Tx UNDERRUN: TX threshold increased to %d\n",
  949                     sc->tx_threshold);
  950 #endif
  951         }
  952 
  953         /* We must set TXUGO to reset the stuck transmitter. */
  954         CSR_WRITE_4(sc, COMMAND, COMMAND_TXUGO);
  955 
  956         /* Update the TX threshold */
  957         epic_stop_activity(sc);
  958         epic_set_tx_mode(sc);
  959         epic_start_activity(sc);
  960 }
  961 
  962 /*
  963  * This function is called once a second when the interface is running
  964  * and performs two functions.  First, it provides a timer for the mii
  965  * to help with autonegotiation.  Second, it checks for transmit
  966  * timeouts.
  967  */
  968 static void
  969 epic_timer(void *arg)
  970 {
  971         epic_softc_t *sc = arg;
  972         struct mii_data *mii;
  973         struct ifnet *ifp;
  974 
  975         ifp = sc->ifp;
  976         EPIC_ASSERT_LOCKED(sc);
  977         if (sc->tx_timeout && --sc->tx_timeout == 0) {
  978                 device_printf(sc->dev, "device timeout %d packets\n",
  979                     sc->pending_txs);
  980 
  981                 /* Try to finish queued packets. */
  982                 epic_tx_done(sc);
  983 
  984                 /* If not successful. */
  985                 if (sc->pending_txs > 0) {
  986                         ifp->if_oerrors += sc->pending_txs;
  987 
  988                         /* Reinitialize board. */
  989                         device_printf(sc->dev, "reinitialization\n");
  990                         epic_stop(sc);
  991                         epic_init_locked(sc);
  992                 } else
  993                         device_printf(sc->dev,
  994                             "seems we can continue normaly\n");
  995 
  996                 /* Start output. */
  997                 if (ifp->if_snd.ifq_head)
  998                         epic_ifstart_locked(ifp);
  999         }
 1000 
 1001         mii = device_get_softc(sc->miibus);
 1002         mii_tick(mii);
 1003 
 1004         callout_reset(&sc->timer, hz, epic_timer, sc);
 1005 }
 1006 
 1007 /*
 1008  * Set media options.
 1009  */
 1010 static int
 1011 epic_ifmedia_upd(struct ifnet *ifp)
 1012 {
 1013         epic_softc_t *sc;
 1014         int error;
 1015 
 1016         sc = ifp->if_softc;
 1017         EPIC_LOCK(sc);
 1018         error = epic_ifmedia_upd_locked(ifp);
 1019         EPIC_UNLOCK(sc);
 1020         return (error);
 1021 }
 1022         
 1023 static int
 1024 epic_ifmedia_upd_locked(struct ifnet *ifp)
 1025 {
 1026         epic_softc_t *sc;
 1027         struct mii_data *mii;
 1028         struct ifmedia *ifm;
 1029         struct mii_softc *miisc;
 1030         int cfg, media;
 1031 
 1032         sc = ifp->if_softc;
 1033         mii = device_get_softc(sc->miibus);
 1034         ifm = &mii->mii_media;
 1035         media = ifm->ifm_cur->ifm_media;
 1036 
 1037         /* Do not do anything if interface is not up. */
 1038         if ((ifp->if_flags & IFF_UP) == 0)
 1039                 return (0);
 1040 
 1041         /*
 1042          * Lookup current selected PHY.
 1043          */
 1044         if (IFM_INST(media) == sc->serinst) {
 1045                 sc->phyid = EPIC_SERIAL;
 1046                 sc->physc = NULL;
 1047         } else {
 1048                 /* If we're not selecting serial interface, select MII mode. */
 1049                 sc->miicfg &= ~MIICFG_SERIAL_ENABLE;
 1050                 CSR_WRITE_4(sc, MIICFG, sc->miicfg);
 1051 
 1052                 /* Default to unknown PHY. */
 1053                 sc->phyid = EPIC_UNKN_PHY;
 1054 
 1055                 /* Lookup selected PHY. */
 1056                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
 1057                         if (IFM_INST(media) == miisc->mii_inst) {
 1058                                 sc->physc = miisc;
 1059                                 break;
 1060                         }
 1061                 }
 1062 
 1063                 /* Identify selected PHY. */
 1064                 if (sc->physc) {
 1065                         int id1, id2, model, oui;
 1066 
 1067                         id1 = PHY_READ(sc->physc, MII_PHYIDR1);
 1068                         id2 = PHY_READ(sc->physc, MII_PHYIDR2);
 1069 
 1070                         oui = MII_OUI(id1, id2);
 1071                         model = MII_MODEL(id2);
 1072                         switch (oui) {
 1073                         case MII_OUI_QUALSEMI:
 1074                                 if (model == MII_MODEL_QUALSEMI_QS6612)
 1075                                         sc->phyid = EPIC_QS6612_PHY;
 1076                                 break;
 1077                         case MII_OUI_xxALTIMA:
 1078                                 if (model == MII_MODEL_xxALTIMA_AC101)
 1079                                         sc->phyid = EPIC_AC101_PHY;
 1080                                 break;
 1081                         case MII_OUI_xxLEVEL1:
 1082                                 if (model == MII_MODEL_xxLEVEL1_LXT970)
 1083                                         sc->phyid = EPIC_LXT970_PHY;
 1084                                 break;
 1085                         }
 1086                 }
 1087         }
 1088 
 1089         /*
 1090          * Do PHY specific card setup.
 1091          */
 1092 
 1093         /*
 1094          * Call this, to isolate all not selected PHYs and
 1095          * set up selected.
 1096          */
 1097         mii_mediachg(mii);
 1098 
 1099         /* Do our own setup. */
 1100         switch (sc->phyid) {
 1101         case EPIC_QS6612_PHY:
 1102                 break;
 1103         case EPIC_AC101_PHY:
 1104                 /* We have to powerup fiber tranceivers. */
 1105                 if (IFM_SUBTYPE(media) == IFM_100_FX)
 1106                         sc->miicfg |= MIICFG_694_ENABLE;
 1107                 else
 1108                         sc->miicfg &= ~MIICFG_694_ENABLE;
 1109                 CSR_WRITE_4(sc, MIICFG, sc->miicfg);
 1110 
 1111                 break;
 1112         case EPIC_LXT970_PHY:
 1113                 /* We have to powerup fiber tranceivers. */
 1114                 cfg = PHY_READ(sc->physc, MII_LXTPHY_CONFIG);
 1115                 if (IFM_SUBTYPE(media) == IFM_100_FX)
 1116                         cfg |= CONFIG_LEDC1 | CONFIG_LEDC0;
 1117                 else
 1118                         cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
 1119                 PHY_WRITE(sc->physc, MII_LXTPHY_CONFIG, cfg);
 1120 
 1121                 break;
 1122         case EPIC_SERIAL:
 1123                 /* Select serial PHY (10base2/BNC usually). */
 1124                 sc->miicfg |= MIICFG_694_ENABLE | MIICFG_SERIAL_ENABLE;
 1125                 CSR_WRITE_4(sc, MIICFG, sc->miicfg);
 1126 
 1127                 /* There is no driver to fill this. */
 1128                 mii->mii_media_active = media;
 1129                 mii->mii_media_status = 0;
 1130 
 1131                 /*
 1132                  * We need to call this manually as it wasn't called
 1133                  * in mii_mediachg().
 1134                  */
 1135                 epic_miibus_statchg(sc->dev);
 1136                 break;
 1137         default:
 1138                 device_printf(sc->dev, "ERROR! Unknown PHY selected\n");
 1139                 return (EINVAL);
 1140         }
 1141 
 1142         return (0);
 1143 }
 1144 
 1145 /*
 1146  * Report current media status.
 1147  */
 1148 static void
 1149 epic_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 1150 {
 1151         epic_softc_t *sc;
 1152         struct mii_data *mii;
 1153         struct ifmedia *ifm;
 1154 
 1155         sc = ifp->if_softc;
 1156         mii = device_get_softc(sc->miibus);
 1157         EPIC_LOCK(sc);
 1158         ifm = &mii->mii_media;
 1159 
 1160         /* Nothing should be selected if interface is down. */
 1161         if ((ifp->if_flags & IFF_UP) == 0) {
 1162                 ifmr->ifm_active = IFM_NONE;
 1163                 ifmr->ifm_status = 0;
 1164                 EPIC_UNLOCK(sc);
 1165                 return;
 1166         }
 1167 
 1168         /* Call underlying pollstat, if not serial PHY. */
 1169         if (sc->phyid != EPIC_SERIAL)
 1170                 mii_pollstat(mii);
 1171 
 1172         /* Simply copy media info. */
 1173         ifmr->ifm_active = mii->mii_media_active;
 1174         ifmr->ifm_status = mii->mii_media_status;
 1175         EPIC_UNLOCK(sc);
 1176 }
 1177 
 1178 /*
 1179  * Callback routine, called on media change.
 1180  */
 1181 static void
 1182 epic_miibus_statchg(device_t dev)
 1183 {
 1184         epic_softc_t *sc;
 1185         struct mii_data *mii;
 1186         int media;
 1187 
 1188         sc = device_get_softc(dev);
 1189         mii = device_get_softc(sc->miibus);
 1190         media = mii->mii_media_active;
 1191 
 1192         sc->txcon &= ~(TXCON_LOOPBACK_MODE | TXCON_FULL_DUPLEX);
 1193 
 1194         /*
 1195          * If we are in full-duplex mode or loopback operation,
 1196          * we need to decouple receiver and transmitter.
 1197          */
 1198         if (IFM_OPTIONS(media) & (IFM_FDX | IFM_LOOP))
 1199                 sc->txcon |= TXCON_FULL_DUPLEX;
 1200 
 1201         /* On some cards we need manualy set fullduplex led. */
 1202         if (sc->cardid == SMC9432FTX ||
 1203             sc->cardid == SMC9432FTX_SC) {
 1204                 if (IFM_OPTIONS(media) & IFM_FDX)
 1205                         sc->miicfg |= MIICFG_694_ENABLE;
 1206                 else
 1207                         sc->miicfg &= ~MIICFG_694_ENABLE;
 1208 
 1209                 CSR_WRITE_4(sc, MIICFG, sc->miicfg);
 1210         }
 1211 
 1212         epic_stop_activity(sc);
 1213         epic_set_tx_mode(sc);
 1214         epic_start_activity(sc);
 1215 }
 1216 
 1217 static void
 1218 epic_miibus_mediainit(device_t dev)
 1219 {
 1220         epic_softc_t *sc;
 1221         struct mii_data *mii;
 1222         struct ifmedia *ifm;
 1223         int media;
 1224 
 1225         sc = device_get_softc(dev);
 1226         mii = device_get_softc(sc->miibus);
 1227         ifm = &mii->mii_media;
 1228 
 1229         /*
 1230          * Add Serial Media Interface if present, this applies to
 1231          * SMC9432BTX serie.
 1232          */
 1233         if (CSR_READ_4(sc, MIICFG) & MIICFG_PHY_PRESENT) {
 1234                 /* Store its instance. */
 1235                 sc->serinst = mii->mii_instance++;
 1236 
 1237                 /* Add as 10base2/BNC media. */
 1238                 media = IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->serinst);
 1239                 ifmedia_add(ifm, media, 0, NULL);
 1240 
 1241                 /* Report to user. */
 1242                 device_printf(sc->dev, "serial PHY detected (10Base2/BNC)\n");
 1243         }
 1244 }
 1245 
 1246 /*
 1247  * Reset chip and update media.
 1248  */
 1249 static void
 1250 epic_init(void *xsc)
 1251 {
 1252         epic_softc_t *sc = xsc;
 1253 
 1254         EPIC_LOCK(sc);
 1255         epic_init_locked(sc);
 1256         EPIC_UNLOCK(sc);
 1257 }
 1258 
 1259 static void
 1260 epic_init_locked(epic_softc_t *sc)
 1261 {
 1262         struct ifnet *ifp = sc->ifp;
 1263         int i;
 1264 
 1265         /* If interface is already running, then we need not do anything. */
 1266         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1267                 return;
 1268         }
 1269 
 1270         /* Soft reset the chip (we have to power up card before). */
 1271         CSR_WRITE_4(sc, GENCTL, 0);
 1272         CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
 1273 
 1274         /*
 1275          * Reset takes 15 pci ticks which depends on PCI bus speed.
 1276          * Assuming it >= 33000000 hz, we have wait at least 495e-6 sec.
 1277          */
 1278         DELAY(500);
 1279 
 1280         /* Wake up */
 1281         CSR_WRITE_4(sc, GENCTL, 0);
 1282 
 1283         /* Workaround for Application Note 7-15 */
 1284         for (i = 0; i < 16; i++)
 1285                 CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
 1286 
 1287         /* Give rings to EPIC */
 1288         CSR_WRITE_4(sc, PRCDAR, sc->rx_addr);
 1289         CSR_WRITE_4(sc, PTCDAR, sc->tx_addr);
 1290 
 1291         /* Put node address to EPIC. */
 1292         CSR_WRITE_4(sc, LAN0, ((u_int16_t *)IF_LLADDR(sc->ifp))[0]);
 1293         CSR_WRITE_4(sc, LAN1, ((u_int16_t *)IF_LLADDR(sc->ifp))[1]);
 1294         CSR_WRITE_4(sc, LAN2, ((u_int16_t *)IF_LLADDR(sc->ifp))[2]);
 1295 
 1296         /* Set tx mode, includeing transmit threshold. */
 1297         epic_set_tx_mode(sc);
 1298 
 1299         /* Compute and set RXCON. */
 1300         epic_set_rx_mode(sc);
 1301 
 1302         /* Set multicast table. */
 1303         epic_set_mc_table(sc);
 1304 
 1305         /* Enable interrupts by setting the interrupt mask. */
 1306         CSR_WRITE_4(sc, INTMASK,
 1307                 INTSTAT_RCC  | /* INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE | */
 1308                 /* INTSTAT_TXC | */ INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU |
 1309                 INTSTAT_FATAL);
 1310 
 1311         /* Acknowledge all pending interrupts. */
 1312         CSR_WRITE_4(sc, INTSTAT, CSR_READ_4(sc, INTSTAT));
 1313 
 1314         /* Enable interrupts,  set for PCI read multiple and etc */
 1315         CSR_WRITE_4(sc, GENCTL,
 1316                 GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE |
 1317                 GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64);
 1318 
 1319         /* Mark interface running ... */
 1320         if (ifp->if_flags & IFF_UP)
 1321                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1322         else
 1323                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1324 
 1325         /* ... and free */
 1326         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1327 
 1328         /* Start Rx process */
 1329         epic_start_activity(sc);
 1330 
 1331         /* Set appropriate media */
 1332         epic_ifmedia_upd_locked(ifp);
 1333 
 1334         callout_reset(&sc->timer, hz, epic_timer, sc);
 1335 }
 1336 
 1337 /*
 1338  * Synopsis: calculate and set Rx mode. Chip must be in idle state to
 1339  * access RXCON.
 1340  */
 1341 static void
 1342 epic_set_rx_mode(epic_softc_t *sc)
 1343 {
 1344         u_int32_t flags;
 1345         u_int32_t rxcon;
 1346 
 1347         flags = sc->ifp->if_flags;
 1348         rxcon = RXCON_DEFAULT;
 1349 
 1350 #ifdef EPIC_EARLY_RX
 1351         rxcon |= RXCON_EARLY_RX;
 1352 #endif
 1353 
 1354         rxcon |= (flags & IFF_PROMISC) ? RXCON_PROMISCUOUS_MODE : 0;
 1355 
 1356         CSR_WRITE_4(sc, RXCON, rxcon);
 1357 }
 1358 
 1359 /*
 1360  * Synopsis: Set transmit control register. Chip must be in idle state to
 1361  * access TXCON.
 1362  */
 1363 static void
 1364 epic_set_tx_mode(epic_softc_t *sc)
 1365 {
 1366 
 1367         if (sc->txcon & TXCON_EARLY_TRANSMIT_ENABLE)
 1368                 CSR_WRITE_4(sc, ETXTHR, sc->tx_threshold);
 1369 
 1370         CSR_WRITE_4(sc, TXCON, sc->txcon);
 1371 }
 1372 
 1373 /*
 1374  * Synopsis: Program multicast filter honoring IFF_ALLMULTI and IFF_PROMISC
 1375  * flags (note that setting PROMISC bit in EPIC's RXCON will only touch
 1376  * individual frames, multicast filter must be manually programmed).
 1377  *
 1378  * Note: EPIC must be in idle state.
 1379  */
 1380 static void
 1381 epic_set_mc_table(epic_softc_t *sc)
 1382 {
 1383         struct ifnet *ifp;
 1384         struct ifmultiaddr *ifma;
 1385         u_int16_t filter[4];
 1386         u_int8_t h;
 1387 
 1388         ifp = sc->ifp;
 1389         if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
 1390                 CSR_WRITE_4(sc, MC0, 0xFFFF);
 1391                 CSR_WRITE_4(sc, MC1, 0xFFFF);
 1392                 CSR_WRITE_4(sc, MC2, 0xFFFF);
 1393                 CSR_WRITE_4(sc, MC3, 0xFFFF);
 1394                 return;
 1395         }
 1396 
 1397         filter[0] = 0;
 1398         filter[1] = 0;
 1399         filter[2] = 0;
 1400         filter[3] = 0;
 1401 
 1402         if_maddr_rlock(ifp);
 1403         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1404                 if (ifma->ifma_addr->sa_family != AF_LINK)
 1405                         continue;
 1406                 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
 1407                     ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
 1408                 filter[h >> 4] |= 1 << (h & 0xF);
 1409         }
 1410         if_maddr_runlock(ifp);
 1411 
 1412         CSR_WRITE_4(sc, MC0, filter[0]);
 1413         CSR_WRITE_4(sc, MC1, filter[1]);
 1414         CSR_WRITE_4(sc, MC2, filter[2]);
 1415         CSR_WRITE_4(sc, MC3, filter[3]);
 1416 }
 1417 
 1418 
 1419 /*
 1420  * Synopsis: Start receive process and transmit one, if they need.
 1421  */
 1422 static void
 1423 epic_start_activity(epic_softc_t *sc)
 1424 {
 1425 
 1426         /* Start rx process. */
 1427         CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED | COMMAND_START_RX |
 1428             (sc->pending_txs ? COMMAND_TXQUEUED : 0));
 1429 }
 1430 
 1431 /*
 1432  * Synopsis: Completely stop Rx and Tx processes. If TQE is set additional
 1433  * packet needs to be queued to stop Tx DMA.
 1434  */
 1435 static void
 1436 epic_stop_activity(epic_softc_t *sc)
 1437 {
 1438         int status, i;
 1439 
 1440         /* Stop Tx and Rx DMA. */
 1441         CSR_WRITE_4(sc, COMMAND,
 1442             COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA);
 1443 
 1444         /* Wait Rx and Tx DMA to stop (why 1 ms ??? XXX). */
 1445         for (i = 0; i < 0x1000; i++) {
 1446                 status = CSR_READ_4(sc, INTSTAT) &
 1447                     (INTSTAT_TXIDLE | INTSTAT_RXIDLE);
 1448                 if (status == (INTSTAT_TXIDLE | INTSTAT_RXIDLE))
 1449                         break;
 1450                 DELAY(1);
 1451         }
 1452 
 1453         /* Catch all finished packets. */
 1454         epic_rx_done(sc);
 1455         epic_tx_done(sc);
 1456 
 1457         status = CSR_READ_4(sc, INTSTAT);
 1458 
 1459         if ((status & INTSTAT_RXIDLE) == 0)
 1460                 device_printf(sc->dev, "ERROR! Can't stop Rx DMA\n");
 1461 
 1462         if ((status & INTSTAT_TXIDLE) == 0)
 1463                 device_printf(sc->dev, "ERROR! Can't stop Tx DMA\n");
 1464 
 1465         /*
 1466          * May need to queue one more packet if TQE, this is rare
 1467          * but existing case.
 1468          */
 1469         if ((status & INTSTAT_TQE) && !(status & INTSTAT_TXIDLE))
 1470                 (void)epic_queue_last_packet(sc);
 1471 }
 1472 
 1473 /*
 1474  * The EPIC transmitter may stuck in TQE state. It will not go IDLE until
 1475  * a packet from current descriptor will be copied to internal RAM. We
 1476  * compose a dummy packet here and queue it for transmission.
 1477  *
 1478  * XXX the packet will then be actually sent over network...
 1479  */
 1480 static int
 1481 epic_queue_last_packet(epic_softc_t *sc)
 1482 {
 1483         struct epic_tx_desc *desc;
 1484         struct epic_frag_list *flist;
 1485         struct epic_tx_buffer *buf;
 1486         struct mbuf *m0;
 1487         int error, i;
 1488 
 1489         device_printf(sc->dev, "queue last packet\n");
 1490 
 1491         desc = sc->tx_desc + sc->cur_tx;
 1492         flist = sc->tx_flist + sc->cur_tx;
 1493         buf = sc->tx_buffer + sc->cur_tx;
 1494 
 1495         if ((desc->status & 0x8000) || (buf->mbuf != NULL))
 1496                 return (EBUSY);
 1497 
 1498         MGETHDR(m0, M_DONTWAIT, MT_DATA);
 1499         if (m0 == NULL)
 1500                 return (ENOBUFS);
 1501 
 1502         /* Prepare mbuf. */
 1503         m0->m_len = min(MHLEN, ETHER_MIN_LEN - ETHER_CRC_LEN);
 1504         m0->m_pkthdr.len = m0->m_len;
 1505         m0->m_pkthdr.rcvif = sc->ifp;
 1506         bzero(mtod(m0, caddr_t), m0->m_len);
 1507 
 1508         /* Fill fragments list. */
 1509         error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0,
 1510             epic_dma_map_txbuf, flist, 0);
 1511         if (error) {
 1512                 m_freem(m0);
 1513                 return (error);
 1514         }
 1515         bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE);
 1516 
 1517         /* Fill in descriptor. */
 1518         buf->mbuf = m0;
 1519         sc->pending_txs++;
 1520         sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
 1521         desc->control = 0x01;
 1522         desc->txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
 1523         desc->status = 0x8000;
 1524         bus_dmamap_sync(sc->ttag, sc->tmap,
 1525             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1526         bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE);
 1527 
 1528         /* Launch transmission. */
 1529         CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_TDMA | COMMAND_TXQUEUED);
 1530 
 1531         /* Wait Tx DMA to stop (for how long??? XXX) */
 1532         for (i = 0; i < 1000; i++) {
 1533                 if (CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE)
 1534                         break;
 1535                 DELAY(1);
 1536         }
 1537 
 1538         if ((CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) == 0)
 1539                 device_printf(sc->dev, "ERROR! can't stop Tx DMA (2)\n");
 1540         else
 1541                 epic_tx_done(sc);
 1542 
 1543         return (0);
 1544 }
 1545 
 1546 /*
 1547  *  Synopsis: Shut down board and deallocates rings.
 1548  */
 1549 static void
 1550 epic_stop(epic_softc_t *sc)
 1551 {
 1552 
 1553         EPIC_ASSERT_LOCKED(sc);
 1554 
 1555         sc->tx_timeout = 0;
 1556         callout_stop(&sc->timer);
 1557 
 1558         /* Disable interrupts */
 1559         CSR_WRITE_4(sc, INTMASK, 0);
 1560         CSR_WRITE_4(sc, GENCTL, 0);
 1561 
 1562         /* Try to stop Rx and TX processes */
 1563         epic_stop_activity(sc);
 1564 
 1565         /* Reset chip */
 1566         CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
 1567         DELAY(1000);
 1568 
 1569         /* Make chip go to bed */
 1570         CSR_WRITE_4(sc, GENCTL, GENCTL_POWER_DOWN);
 1571 
 1572         /* Mark as stopped */
 1573         sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1574 }
 1575 
 1576 /*
 1577  * Synopsis: This function should free all memory allocated for rings.
 1578  */
 1579 static void
 1580 epic_free_rings(epic_softc_t *sc)
 1581 {
 1582         int i;
 1583 
 1584         for (i = 0; i < RX_RING_SIZE; i++) {
 1585                 struct epic_rx_buffer *buf = sc->rx_buffer + i;
 1586                 struct epic_rx_desc *desc = sc->rx_desc + i;
 1587 
 1588                 desc->status = 0;
 1589                 desc->buflength = 0;
 1590                 desc->bufaddr = 0;
 1591 
 1592                 if (buf->mbuf) {
 1593                         bus_dmamap_unload(sc->mtag, buf->map);
 1594                         bus_dmamap_destroy(sc->mtag, buf->map);
 1595                         m_freem(buf->mbuf);
 1596                 }
 1597                 buf->mbuf = NULL;
 1598         }
 1599 
 1600         if (sc->sparemap != NULL)
 1601                 bus_dmamap_destroy(sc->mtag, sc->sparemap);
 1602 
 1603         for (i = 0; i < TX_RING_SIZE; i++) {
 1604                 struct epic_tx_buffer *buf = sc->tx_buffer + i;
 1605                 struct epic_tx_desc *desc = sc->tx_desc + i;
 1606 
 1607                 desc->status = 0;
 1608                 desc->buflength = 0;
 1609                 desc->bufaddr = 0;
 1610 
 1611                 if (buf->mbuf) {
 1612                         bus_dmamap_unload(sc->mtag, buf->map);
 1613                         bus_dmamap_destroy(sc->mtag, buf->map);
 1614                         m_freem(buf->mbuf);
 1615                 }
 1616                 buf->mbuf = NULL;
 1617         }
 1618 }
 1619 
 1620 /*
 1621  * Synopsis:  Allocates mbufs for Rx ring and point Rx descs to them.
 1622  * Point Tx descs to fragment lists. Check that all descs and fraglists
 1623  * are bounded and aligned properly.
 1624  */
 1625 static int
 1626 epic_init_rings(epic_softc_t *sc)
 1627 {
 1628         int error, i;
 1629 
 1630         sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0;
 1631 
 1632         /* Initialize the RX descriptor ring. */
 1633         for (i = 0; i < RX_RING_SIZE; i++) {
 1634                 struct epic_rx_buffer *buf = sc->rx_buffer + i;
 1635                 struct epic_rx_desc *desc = sc->rx_desc + i;
 1636 
 1637                 desc->status = 0;               /* Owned by driver */
 1638                 desc->next = sc->rx_addr +
 1639                     ((i + 1) & RX_RING_MASK) * sizeof(struct epic_rx_desc);
 1640 
 1641                 if ((desc->next & 3) ||
 1642                     ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
 1643                         epic_free_rings(sc);
 1644                         return (EFAULT);
 1645                 }
 1646 
 1647                 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 1648                 if (buf->mbuf == NULL) {
 1649                         epic_free_rings(sc);
 1650                         return (ENOBUFS);
 1651                 }
 1652                 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
 1653                 m_adj(buf->mbuf, ETHER_ALIGN);
 1654 
 1655                 error = bus_dmamap_create(sc->mtag, 0, &buf->map);
 1656                 if (error) {
 1657                         epic_free_rings(sc);
 1658                         return (error);
 1659                 }
 1660                 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
 1661                     epic_dma_map_rxbuf, desc, 0);
 1662                 if (error) {
 1663                         epic_free_rings(sc);
 1664                         return (error);
 1665                 }
 1666                 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
 1667 
 1668                 desc->buflength = buf->mbuf->m_len; /* Max RX buffer length */
 1669                 desc->status = 0x8000;          /* Set owner bit to NIC */
 1670         }
 1671         bus_dmamap_sync(sc->rtag, sc->rmap,
 1672             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1673 
 1674         /* Create the spare DMA map. */
 1675         error = bus_dmamap_create(sc->mtag, 0, &sc->sparemap);
 1676         if (error) {
 1677                 epic_free_rings(sc);
 1678                 return (error);
 1679         }
 1680 
 1681         /* Initialize the TX descriptor ring. */
 1682         for (i = 0; i < TX_RING_SIZE; i++) {
 1683                 struct epic_tx_buffer *buf = sc->tx_buffer + i;
 1684                 struct epic_tx_desc *desc = sc->tx_desc + i;
 1685 
 1686                 desc->status = 0;
 1687                 desc->next = sc->tx_addr +
 1688                     ((i + 1) & TX_RING_MASK) * sizeof(struct epic_tx_desc);
 1689 
 1690                 if ((desc->next & 3) ||
 1691                     ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
 1692                         epic_free_rings(sc);
 1693                         return (EFAULT);
 1694                 }
 1695 
 1696                 buf->mbuf = NULL;
 1697                 desc->bufaddr = sc->frag_addr +
 1698                     i * sizeof(struct epic_frag_list);
 1699 
 1700                 if ((desc->bufaddr & 3) ||
 1701                     ((desc->bufaddr & PAGE_MASK) +
 1702                     sizeof(struct epic_frag_list)) > PAGE_SIZE) {
 1703                         epic_free_rings(sc);
 1704                         return (EFAULT);
 1705                 }
 1706 
 1707                 error = bus_dmamap_create(sc->mtag, 0, &buf->map);
 1708                 if (error) {
 1709                         epic_free_rings(sc);
 1710                         return (error);
 1711                 }
 1712         }
 1713         bus_dmamap_sync(sc->ttag, sc->tmap,
 1714             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1715         bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE);
 1716 
 1717         return (0);
 1718 }
 1719 
 1720 /*
 1721  * EEPROM operation functions
 1722  */
 1723 static void
 1724 epic_write_eepromreg(epic_softc_t *sc, u_int8_t val)
 1725 {
 1726         u_int16_t i;
 1727 
 1728         CSR_WRITE_1(sc, EECTL, val);
 1729 
 1730         for (i = 0; i < 0xFF; i++) {
 1731                 if ((CSR_READ_1(sc, EECTL) & 0x20) == 0)
 1732                         break;
 1733         }
 1734 }
 1735 
 1736 static u_int8_t
 1737 epic_read_eepromreg(epic_softc_t *sc)
 1738 {
 1739 
 1740         return (CSR_READ_1(sc, EECTL));
 1741 }
 1742 
 1743 static u_int8_t
 1744 epic_eeprom_clock(epic_softc_t *sc, u_int8_t val)
 1745 {
 1746 
 1747         epic_write_eepromreg(sc, val);
 1748         epic_write_eepromreg(sc, (val | 0x4));
 1749         epic_write_eepromreg(sc, val);
 1750 
 1751         return (epic_read_eepromreg(sc));
 1752 }
 1753 
 1754 static void
 1755 epic_output_eepromw(epic_softc_t *sc, u_int16_t val)
 1756 {
 1757         int i;
 1758 
 1759         for (i = 0xF; i >= 0; i--) {
 1760                 if (val & (1 << i))
 1761                         epic_eeprom_clock(sc, 0x0B);
 1762                 else
 1763                         epic_eeprom_clock(sc, 0x03);
 1764         }
 1765 }
 1766 
 1767 static u_int16_t
 1768 epic_input_eepromw(epic_softc_t *sc)
 1769 {
 1770         u_int16_t retval = 0;
 1771         int i;
 1772 
 1773         for (i = 0xF; i >= 0; i--) {
 1774                 if (epic_eeprom_clock(sc, 0x3) & 0x10)
 1775                         retval |= (1 << i);
 1776         }
 1777 
 1778         return (retval);
 1779 }
 1780 
 1781 static int
 1782 epic_read_eeprom(epic_softc_t *sc, u_int16_t loc)
 1783 {
 1784         u_int16_t dataval;
 1785         u_int16_t read_cmd;
 1786 
 1787         epic_write_eepromreg(sc, 3);
 1788 
 1789         if (epic_read_eepromreg(sc) & 0x40)
 1790                 read_cmd = (loc & 0x3F) | 0x180;
 1791         else
 1792                 read_cmd = (loc & 0xFF) | 0x600;
 1793 
 1794         epic_output_eepromw(sc, read_cmd);
 1795 
 1796         dataval = epic_input_eepromw(sc);
 1797 
 1798         epic_write_eepromreg(sc, 1);
 1799 
 1800         return (dataval);
 1801 }
 1802 
 1803 /*
 1804  * Here goes MII read/write routines.
 1805  */
 1806 static int
 1807 epic_read_phy_reg(epic_softc_t *sc, int phy, int reg)
 1808 {
 1809         int i;
 1810 
 1811         CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x01));
 1812 
 1813         for (i = 0; i < 0x100; i++) {
 1814                 if ((CSR_READ_4(sc, MIICTL) & 0x01) == 0)
 1815                         break;
 1816                 DELAY(1);
 1817         }
 1818 
 1819         return (CSR_READ_4(sc, MIIDATA));
 1820 }
 1821 
 1822 static void
 1823 epic_write_phy_reg(epic_softc_t *sc, int phy, int reg, int val)
 1824 {
 1825         int i;
 1826 
 1827         CSR_WRITE_4(sc, MIIDATA, val);
 1828         CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x02));
 1829 
 1830         for(i = 0; i < 0x100; i++) {
 1831                 if ((CSR_READ_4(sc, MIICTL) & 0x02) == 0)
 1832                         break;
 1833                 DELAY(1);
 1834         }
 1835 }
 1836 
 1837 static int
 1838 epic_miibus_readreg(device_t dev, int phy, int reg)
 1839 {
 1840         epic_softc_t *sc;
 1841 
 1842         sc = device_get_softc(dev);
 1843 
 1844         return (PHY_READ_2(sc, phy, reg));
 1845 }
 1846 
 1847 static int
 1848 epic_miibus_writereg(device_t dev, int phy, int reg, int data)
 1849 {
 1850         epic_softc_t *sc;
 1851 
 1852         sc = device_get_softc(dev);
 1853 
 1854         PHY_WRITE_2(sc, phy, reg, data);
 1855 
 1856         return (0);
 1857 }

Cache object: 1e1bc40d5e8ecfa1bc38da66f9323a14


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.