The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mge/if_mge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
    3  * All rights reserved.
    4  *
    5  * Developed by Semihalf.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. Neither the name of MARVELL nor the names of contributors
   16  *    may be used to endorse or promote products derived from this software
   17  *    without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  */
   31 
   32 #ifdef HAVE_KERNEL_OPTION_HEADERS
   33 #include "opt_device_polling.h"
   34 #endif
   35 
   36 #include <sys/cdefs.h>
   37 __FBSDID("$FreeBSD: releng/8.4/sys/dev/mge/if_mge.c 233025 2012-03-16 09:22:59Z scottl $");
   38 
   39 #include <sys/param.h>
   40 #include <sys/systm.h>
   41 #include <sys/endian.h>
   42 #include <sys/mbuf.h>
   43 #include <sys/lock.h>
   44 #include <sys/mutex.h>
   45 #include <sys/kernel.h>
   46 #include <sys/module.h>
   47 #include <sys/socket.h>
   48 #include <sys/sysctl.h>
   49 
   50 #include <net/ethernet.h>
   51 #include <net/bpf.h>
   52 #include <net/if.h>
   53 #include <net/if_arp.h>
   54 #include <net/if_dl.h>
   55 #include <net/if_media.h>
   56 #include <net/if_types.h>
   57 #include <net/if_vlan_var.h>
   58 
   59 #include <netinet/in_systm.h>
   60 #include <netinet/in.h>
   61 #include <netinet/ip.h>
   62 
   63 #include <sys/sockio.h>
   64 #include <sys/bus.h>
   65 #include <machine/bus.h>
   66 #include <sys/rman.h>
   67 #include <machine/resource.h>
   68 
   69 #include <dev/mii/mii.h>
   70 #include <dev/mii/miivar.h>
   71 
   72 #ifndef MII_ADDR_BASE
   73 #define MII_ADDR_BASE 8
   74 #endif
   75 
   76 #include <dev/mge/if_mgevar.h>
   77 #include <arm/mv/mvreg.h>
   78 #include <arm/mv/mvvar.h>
   79 
   80 #include "miibus_if.h"
   81 
   82 /* PHY registers are in the address space of the first mge unit */
   83 static struct mge_softc *sc_mge0 = NULL;
   84 
   85 static int mge_probe(device_t dev);
   86 static int mge_attach(device_t dev);
   87 static int mge_detach(device_t dev);
   88 static int mge_shutdown(device_t dev);
   89 static int mge_suspend(device_t dev);
   90 static int mge_resume(device_t dev);
   91 
   92 static int mge_miibus_readreg(device_t dev, int phy, int reg);
   93 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
   94 
   95 static int mge_ifmedia_upd(struct ifnet *ifp);
   96 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
   97 
   98 static void mge_init(void *arg);
   99 static void mge_init_locked(void *arg);
  100 static void mge_start(struct ifnet *ifp);
  101 static void mge_start_locked(struct ifnet *ifp);
  102 static void mge_watchdog(struct mge_softc *sc);
  103 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
  104 
  105 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
  106 static uint32_t mge_rx_ipg(uint32_t val, int ver);
  107 static void mge_ver_params(struct mge_softc *sc);
  108 
  109 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
  110 static void mge_intr_rx(void *arg);
  111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
  112 static void mge_intr_tx(void *arg);
  113 static void mge_intr_tx_locked(struct mge_softc *sc);
  114 static void mge_intr_misc(void *arg);
  115 static void mge_intr_sum(void *arg);
  116 static void mge_intr_err(void *arg);
  117 static void mge_stop(struct mge_softc *sc);
  118 static void mge_tick(void *msc);
  119 static uint32_t mge_set_port_serial_control(uint32_t media);
  120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
  121 static void mge_set_mac_address(struct mge_softc *sc);
  122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
  123     uint8_t queue);
  124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
  125 static int mge_allocate_dma(struct mge_softc *sc);
  126 static int mge_alloc_desc_dma(struct mge_softc *sc,
  127     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
  128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
  129     struct mbuf **mbufp, bus_addr_t *paddr);
  130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
  131 static void mge_free_dma(struct mge_softc *sc);
  132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
  133     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
  134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
  135     uint32_t status, uint16_t bufsize);
  136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
  137     struct mge_desc_wrapper *dw);
  138 static uint8_t mge_crc8(uint8_t *data, int size);
  139 static void mge_setup_multicast(struct mge_softc *sc);
  140 static void mge_set_rxic(struct mge_softc *sc);
  141 static void mge_set_txic(struct mge_softc *sc);
  142 static void mge_add_sysctls(struct mge_softc *sc);
  143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
  144 
  145 static device_method_t mge_methods[] = {
  146         /* Device interface */
  147         DEVMETHOD(device_probe,         mge_probe),
  148         DEVMETHOD(device_attach,        mge_attach),
  149         DEVMETHOD(device_detach,        mge_detach),
  150         DEVMETHOD(device_shutdown,      mge_shutdown),
  151         DEVMETHOD(device_suspend,       mge_suspend),
  152         DEVMETHOD(device_resume,        mge_resume),
  153         /* MII interface */
  154         DEVMETHOD(miibus_readreg,       mge_miibus_readreg),
  155         DEVMETHOD(miibus_writereg,      mge_miibus_writereg),
  156         { 0, 0 }
  157 };
  158 
  159 static driver_t mge_driver = {
  160         "mge",
  161         mge_methods,
  162         sizeof(struct mge_softc),
  163 };
  164 
  165 static devclass_t mge_devclass;
  166 
  167 DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
  168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
  169 MODULE_DEPEND(mge, ether, 1, 1, 1);
  170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
  171 
  172 static struct resource_spec res_spec[] = {
  173         { SYS_RES_MEMORY, 0, RF_ACTIVE },
  174         { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
  175         { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
  176         { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
  177         { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
  178         { SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
  179         { -1, 0 }
  180 };
  181 
  182 static struct {
  183         driver_intr_t *handler;
  184         char * description;
  185 } mge_intrs[MGE_INTR_COUNT] = {
  186         { mge_intr_rx,  "GbE receive interrupt" },
  187         { mge_intr_tx,  "GbE transmit interrupt" },
  188         { mge_intr_misc,"GbE misc interrupt" },
  189         { mge_intr_sum, "GbE summary interrupt" },
  190         { mge_intr_err, "GbE error interrupt" },
  191 };
  192 
  193 static void
  194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
  195 {
  196         uint32_t mac_l, mac_h;
  197 
  198         /* XXX use currently programmed MAC address; eventually this info will
  199          * be provided by the loader */
  200 
  201         mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
  202         mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
  203 
  204         addr[0] = (mac_h & 0xff000000) >> 24;
  205         addr[1] = (mac_h & 0x00ff0000) >> 16;
  206         addr[2] = (mac_h & 0x0000ff00) >> 8;
  207         addr[3] = (mac_h & 0x000000ff);
  208         addr[4] = (mac_l & 0x0000ff00) >> 8;
  209         addr[5] = (mac_l & 0x000000ff);
  210 }
  211 
  212 static uint32_t
  213 mge_tfut_ipg(uint32_t val, int ver)
  214 {
  215 
  216         switch (ver) {
  217         case 1:
  218                 return ((val & 0x3fff) << 4);
  219         case 2:
  220         default:
  221                 return ((val & 0xffff) << 4);
  222         }
  223 }
  224 
  225 static uint32_t
  226 mge_rx_ipg(uint32_t val, int ver)
  227 {
  228 
  229         switch (ver) {
  230         case 1:
  231                 return ((val & 0x3fff) << 8);
  232         case 2:
  233         default:
  234                 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
  235         }
  236 }
  237 
  238 static void
  239 mge_ver_params(struct mge_softc *sc)
  240 {
  241         uint32_t d, r;
  242 
  243         soc_id(&d, &r);
  244         if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
  245             d == MV_DEV_MV78100_Z0) {
  246                 sc->mge_ver = 2;
  247                 sc->mge_mtu = 0x4e8;
  248                 sc->mge_tfut_ipg_max = 0xFFFF;
  249                 sc->mge_rx_ipg_max = 0xFFFF;
  250                 sc->mge_tx_arb_cfg = 0xFC0000FF;
  251                 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
  252                 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
  253         } else {
  254                 sc->mge_ver = 1;
  255                 sc->mge_mtu = 0x458;
  256                 sc->mge_tfut_ipg_max = 0x3FFF;
  257                 sc->mge_rx_ipg_max = 0x3FFF;
  258                 sc->mge_tx_arb_cfg = 0x000000FF;
  259                 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
  260                 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
  261         }
  262 }
  263 
  264 static void
  265 mge_set_mac_address(struct mge_softc *sc)
  266 {
  267         char *if_mac;
  268         uint32_t mac_l, mac_h;
  269 
  270         MGE_GLOBAL_LOCK_ASSERT(sc);
  271 
  272         if_mac = (char *)IF_LLADDR(sc->ifp);
  273 
  274         mac_l = (if_mac[4] << 8) | (if_mac[5]);
  275         mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
  276             (if_mac[2] << 8) | (if_mac[3] << 0);
  277 
  278         MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
  279         MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
  280 
  281         mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
  282 }
  283 
  284 static void
  285 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
  286 {
  287         uint32_t reg_idx, reg_off, reg_val, i;
  288 
  289         last_byte &= 0xf;
  290         reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
  291         reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
  292         reg_val = (1 | (queue << 1)) << reg_off;
  293 
  294         for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
  295                 if ( i == reg_idx)
  296                         MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
  297                 else
  298                         MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
  299         }
  300 }
  301 
  302 static void
  303 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
  304 {
  305         uint32_t port_config;
  306         uint32_t reg_val, i;
  307 
  308         /* Enable or disable promiscuous mode as needed */
  309         if (sc->ifp->if_flags & IFF_PROMISC) {
  310                 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
  311                 port_config |= PORT_CONFIG_UPM;
  312                 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
  313 
  314                 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
  315                    (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
  316 
  317                 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
  318                         MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
  319                         MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
  320                 }
  321 
  322                 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
  323                         MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
  324 
  325         } else {
  326                 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
  327                 port_config &= ~PORT_CONFIG_UPM;
  328                 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
  329 
  330                 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
  331                         MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
  332                         MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
  333                 }
  334 
  335                 mge_set_mac_address(sc);
  336         }
  337 }
  338 
  339 static void
  340 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  341 {
  342         u_int32_t *paddr;
  343 
  344         KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
  345         paddr = arg;
  346 
  347         *paddr = segs->ds_addr;
  348 }
  349 
  350 static int
  351 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
  352     bus_addr_t *paddr)
  353 {
  354         struct mbuf *new_mbuf;
  355         bus_dma_segment_t seg[1];
  356         int error;
  357         int nsegs;
  358 
  359         KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
  360 
  361         new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
  362         if (new_mbuf == NULL)
  363                 return (ENOBUFS);
  364         new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
  365 
  366         if (*mbufp) {
  367                 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
  368                 bus_dmamap_unload(tag, map);
  369         }
  370 
  371         error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
  372             BUS_DMA_NOWAIT);
  373         KASSERT(nsegs == 1, ("Too many segments returned!"));
  374         if (nsegs != 1 || error)
  375                 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
  376 
  377         bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
  378 
  379         (*mbufp) = new_mbuf;
  380         (*paddr) = seg->ds_addr;
  381         return (0);
  382 }
  383 
  384 static int
  385 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
  386     uint32_t size, bus_dma_tag_t *buffer_tag)
  387 {
  388         struct mge_desc_wrapper *dw;
  389         bus_addr_t desc_paddr;
  390         int i, error;
  391 
  392         desc_paddr = 0;
  393         for (i = size - 1; i >= 0; i--) {
  394                 dw = &(tab[i]);
  395                 error = bus_dmamem_alloc(sc->mge_desc_dtag,
  396                     (void**)&(dw->mge_desc),
  397                     BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
  398                     &(dw->desc_dmap));
  399 
  400                 if (error) {
  401                         if_printf(sc->ifp, "failed to allocate DMA memory\n");
  402                         dw->mge_desc = NULL;
  403                         return (ENXIO);
  404                 }
  405 
  406                 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
  407                     dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
  408                     &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
  409 
  410                 if (error) {
  411                         if_printf(sc->ifp, "can't load descriptor\n");
  412                         bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
  413                             dw->desc_dmap);
  414                         dw->mge_desc = NULL;
  415                         return (ENXIO);
  416                 }
  417 
  418                 /* Chain descriptors */
  419                 dw->mge_desc->next_desc = desc_paddr;
  420                 desc_paddr = dw->mge_desc_paddr;
  421         }
  422         tab[size - 1].mge_desc->next_desc = desc_paddr;
  423 
  424         /* Allocate a busdma tag for mbufs. */
  425         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),    /* parent */
  426             8, 0,                               /* alignment, boundary */
  427             BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
  428             BUS_SPACE_MAXADDR,                  /* highaddr */
  429             NULL, NULL,                         /* filtfunc, filtfuncarg */
  430             MCLBYTES, 1,                        /* maxsize, nsegments */
  431             MCLBYTES, 0,                        /* maxsegsz, flags */
  432             NULL, NULL,                         /* lockfunc, lockfuncarg */
  433             buffer_tag);                        /* dmat */
  434         if (error) {
  435                 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
  436                 return (ENXIO);
  437         }
  438 
  439         /* Create TX busdma maps */
  440         for (i = 0; i < size; i++) {
  441                 dw = &(tab[i]);
  442                 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
  443                 if (error) {
  444                         if_printf(sc->ifp, "failed to create map for mbuf\n");
  445                         return (ENXIO);
  446                 }
  447 
  448                 dw->buffer = (struct mbuf*)NULL;
  449                 dw->mge_desc->buffer = (bus_addr_t)NULL;
  450         }
  451 
  452         return (0);
  453 }
  454 
  455 static int
  456 mge_allocate_dma(struct mge_softc *sc)
  457 {
  458         int error;
  459         struct mge_desc_wrapper *dw;
  460         int i;
  461 
  462         /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
  463         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),    /* parent */
  464             16, 0,                              /* alignment, boundary */
  465             BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
  466             BUS_SPACE_MAXADDR,                  /* highaddr */
  467             NULL, NULL,                         /* filtfunc, filtfuncarg */
  468             sizeof(struct mge_desc), 1,         /* maxsize, nsegments */
  469             sizeof(struct mge_desc), 0,         /* maxsegsz, flags */
  470             NULL, NULL,                         /* lockfunc, lockfuncarg */
  471             &sc->mge_desc_dtag);                /* dmat */
  472 
  473 
  474         mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
  475             &sc->mge_tx_dtag);
  476         mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
  477             &sc->mge_rx_dtag);
  478 
  479         for (i = 0; i < MGE_RX_DESC_NUM; i++) {
  480                 dw = &(sc->mge_rx_desc[i]);
  481                 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
  482                     &dw->mge_desc->buffer);
  483         }
  484 
  485         sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
  486         sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
  487 
  488         return (0);
  489 }
  490 
  491 static void
  492 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
  493     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
  494 {
  495         struct mge_desc_wrapper *dw;
  496         int i;
  497 
  498         for (i = 0; i < size; i++) {
  499                 /* Free RX mbuf */
  500                 dw = &(tab[i]);
  501 
  502                 if (dw->buffer_dmap) {
  503                         if (free_mbufs) {
  504                                 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
  505                                     BUS_DMASYNC_POSTREAD);
  506                                 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
  507                         }
  508                         bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
  509                         if (free_mbufs)
  510                                 m_freem(dw->buffer);
  511                 }
  512                 /* Free RX descriptors */
  513                 if (dw->desc_dmap) {
  514                         bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
  515                             BUS_DMASYNC_POSTREAD);
  516                         bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
  517                         bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
  518                             dw->desc_dmap);
  519                 }
  520         }
  521 }
  522 
  523 static void
  524 mge_free_dma(struct mge_softc *sc)
  525 {
  526         /* Free desciptors and mbufs */
  527         mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
  528         mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
  529 
  530         /* Destroy mbuf dma tag */
  531         bus_dma_tag_destroy(sc->mge_tx_dtag);
  532         bus_dma_tag_destroy(sc->mge_rx_dtag);
  533         /* Destroy descriptors tag */
  534         bus_dma_tag_destroy(sc->mge_desc_dtag);
  535 }
  536 
  537 static void
  538 mge_reinit_rx(struct mge_softc *sc)
  539 {
  540         struct mge_desc_wrapper *dw;
  541         int i;
  542 
  543         MGE_RECEIVE_LOCK_ASSERT(sc);
  544 
  545         mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
  546 
  547         mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
  548             &sc->mge_rx_dtag);
  549 
  550         for (i = 0; i < MGE_RX_DESC_NUM; i++) {
  551                 dw = &(sc->mge_rx_desc[i]);
  552                 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
  553                 &dw->mge_desc->buffer);
  554         }
  555 
  556         sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
  557         sc->rx_desc_curr = 0;
  558 
  559         MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
  560             sc->rx_desc_start);
  561 
  562         /* Enable RX queue */
  563         MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
  564 }
  565 
  566 #ifdef DEVICE_POLLING
  567 static poll_handler_t mge_poll;
  568 
  569 static int
  570 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
  571 {
  572         struct mge_softc *sc = ifp->if_softc;
  573         uint32_t int_cause, int_cause_ext;
  574         int rx_npkts = 0;
  575 
  576         MGE_GLOBAL_LOCK(sc);
  577 
  578         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
  579                 MGE_GLOBAL_UNLOCK(sc);
  580                 return (rx_npkts);
  581         }
  582 
  583         if (cmd == POLL_AND_CHECK_STATUS) {
  584                 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
  585                 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
  586 
  587                 /* Check for resource error */
  588                 if (int_cause & MGE_PORT_INT_RXERRQ0)
  589                         mge_reinit_rx(sc);
  590 
  591                 if (int_cause || int_cause_ext) {
  592                         MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
  593                         MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
  594                 }
  595         }
  596 
  597         mge_intr_tx_locked(sc);
  598         rx_npkts = mge_intr_rx_locked(sc, count);
  599 
  600         MGE_GLOBAL_UNLOCK(sc);
  601         return (rx_npkts);
  602 }
  603 #endif /* DEVICE_POLLING */
  604 
  605 static int
  606 mge_attach(device_t dev)
  607 {
  608         struct mge_softc *sc;
  609         struct ifnet *ifp;
  610         uint8_t hwaddr[ETHER_ADDR_LEN];
  611         int i, error, phy;
  612 
  613         sc = device_get_softc(dev);
  614         sc->dev = dev;
  615 
  616         if (device_get_unit(dev) == 0)
  617                 sc_mge0 = sc;
  618 
  619         /* Set chip version-dependent parameters */
  620         mge_ver_params(sc);
  621 
  622         /*
  623          * We assume static PHY address <=> device unit mapping:
  624          * PHY Address = MII_ADDR_BASE + devce unit.
  625          * This is true for most Marvell boards.
  626          */
  627         phy = MII_ADDR_BASE + device_get_unit(dev);
  628 
  629         /* Initialize mutexes */
  630         mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
  631         mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
  632 
  633         /* Allocate IO and IRQ resources */
  634         error = bus_alloc_resources(dev, res_spec, sc->res);
  635         if (error) {
  636                 device_printf(dev, "could not allocate resources\n");
  637                 mge_detach(dev);
  638                 return (ENXIO);
  639         }
  640 
  641         /* Allocate DMA, buffers, buffer descriptors */
  642         error = mge_allocate_dma(sc);
  643         if (error) {
  644                 mge_detach(dev);
  645                 return (ENXIO);
  646         }
  647 
  648         sc->tx_desc_curr = 0;
  649         sc->rx_desc_curr = 0;
  650         sc->tx_desc_used_idx = 0;
  651         sc->tx_desc_used_count = 0;
  652 
  653         /* Configure defaults for interrupts coalescing */
  654         sc->rx_ic_time = 768;
  655         sc->tx_ic_time = 768;
  656         mge_add_sysctls(sc);
  657 
  658         /* Allocate network interface */
  659         ifp = sc->ifp = if_alloc(IFT_ETHER);
  660         if (ifp == NULL) {
  661                 device_printf(dev, "if_alloc() failed\n");
  662                 mge_detach(dev);
  663                 return (ENOMEM);
  664         }
  665 
  666         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  667         ifp->if_softc = sc;
  668         ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
  669         ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
  670         ifp->if_capenable = ifp->if_capabilities;
  671         ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
  672 
  673 #ifdef DEVICE_POLLING
  674         /* Advertise that polling is supported */
  675         ifp->if_capabilities |= IFCAP_POLLING;
  676 #endif
  677 
  678         ifp->if_init = mge_init;
  679         ifp->if_start = mge_start;
  680         ifp->if_ioctl = mge_ioctl;
  681 
  682         ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
  683         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
  684         IFQ_SET_READY(&ifp->if_snd);
  685 
  686         mge_get_mac_address(sc, hwaddr);
  687         ether_ifattach(ifp, hwaddr);
  688         callout_init(&sc->wd_callout, 0);
  689 
  690         /* Attach PHY(s) */
  691         error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
  692             mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
  693         if (error) {
  694                 device_printf(dev, "attaching PHYs failed\n");
  695                 if_free(ifp);
  696                 sc->ifp = NULL;
  697                 mge_detach(dev);
  698                 return (error);
  699         }
  700         sc->mii = device_get_softc(sc->miibus);
  701 
  702         /* Attach interrupt handlers */
  703         for (i = 0; i < 2; ++i) {
  704                 error = bus_setup_intr(dev, sc->res[1 + i],
  705                     INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
  706                     sc, &sc->ih_cookie[i]);
  707                 if (error) {
  708                         device_printf(dev, "could not setup %s\n",
  709                             mge_intrs[i].description);
  710                         ether_ifdetach(sc->ifp);
  711                         return (error);
  712                 }
  713         }
  714 
  715         return (0);
  716 }
  717 
  718 static int
  719 mge_detach(device_t dev)
  720 {
  721         struct mge_softc *sc;
  722         int error,i;
  723 
  724         sc = device_get_softc(dev);
  725 
  726         /* Stop controller and free TX queue */
  727         if (sc->ifp)
  728                 mge_shutdown(dev);
  729 
  730         /* Wait for stopping ticks */
  731         callout_drain(&sc->wd_callout);
  732 
  733         /* Stop and release all interrupts */
  734         for (i = 0; i < 2; ++i) {
  735                 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
  736                 if (error)
  737                         device_printf(dev, "could not release %s\n",
  738                             mge_intrs[i].description);
  739         }
  740 
  741         /* Detach network interface */
  742         if (sc->ifp) {
  743                 ether_ifdetach(sc->ifp);
  744                 if_free(sc->ifp);
  745         }
  746 
  747         /* Free DMA resources */
  748         mge_free_dma(sc);
  749 
  750         /* Free IO memory handler */
  751         bus_release_resources(dev, res_spec, sc->res);
  752 
  753         /* Destroy mutexes */
  754         mtx_destroy(&sc->receive_lock);
  755         mtx_destroy(&sc->transmit_lock);
  756 
  757         return (0);
  758 }
  759 
  760 static void
  761 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
  762 {
  763         struct mge_softc *sc = ifp->if_softc;
  764         struct mii_data *mii;
  765 
  766         MGE_TRANSMIT_LOCK(sc);
  767 
  768         mii = sc->mii;
  769         mii_pollstat(mii);
  770 
  771         ifmr->ifm_active = mii->mii_media_active;
  772         ifmr->ifm_status = mii->mii_media_status;
  773 
  774         MGE_TRANSMIT_UNLOCK(sc);
  775 }
  776 
  777 static uint32_t
  778 mge_set_port_serial_control(uint32_t media)
  779 {
  780         uint32_t port_config;
  781 
  782         port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
  783             PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
  784 
  785         if (IFM_TYPE(media) == IFM_ETHER) {
  786                 switch(IFM_SUBTYPE(media)) {
  787                         case IFM_AUTO:
  788                                 break;
  789                         case IFM_1000_T:
  790                                 port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
  791                                     PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
  792                                     PORT_SERIAL_SPEED_AUTONEG);
  793                                 break;
  794                         case IFM_100_TX:
  795                                 port_config  |= (PORT_SERIAL_MII_SPEED_100 |
  796                                     PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
  797                                     PORT_SERIAL_SPEED_AUTONEG);
  798                                 break;
  799                         case IFM_10_T:
  800                                 port_config  |= (PORT_SERIAL_AUTONEG |
  801                                     PORT_SERIAL_AUTONEG_FC |
  802                                     PORT_SERIAL_SPEED_AUTONEG);
  803                                 break;
  804                 }
  805                 if (media & IFM_FDX)
  806                         port_config |= PORT_SERIAL_FULL_DUPLEX;
  807         }
  808         return (port_config);
  809 }
  810 
  811 static int
  812 mge_ifmedia_upd(struct ifnet *ifp)
  813 {
  814         struct mge_softc *sc = ifp->if_softc;
  815 
  816         if (ifp->if_flags & IFF_UP) {
  817                 MGE_GLOBAL_LOCK(sc);
  818 
  819                 sc->mge_media_status = sc->mii->mii_media.ifm_media;
  820                 mii_mediachg(sc->mii);
  821                 mge_init_locked(sc);
  822 
  823                 MGE_GLOBAL_UNLOCK(sc);
  824         }
  825 
  826         return (0);
  827 }
  828 
  829 static void
  830 mge_init(void *arg)
  831 {
  832         struct mge_softc *sc = arg;
  833 
  834         MGE_GLOBAL_LOCK(sc);
  835 
  836         mge_init_locked(arg);
  837 
  838         MGE_GLOBAL_UNLOCK(sc);
  839 }
  840 
  841 static void
  842 mge_init_locked(void *arg)
  843 {
  844         struct mge_softc *sc = arg;
  845         struct mge_desc_wrapper *dw;
  846         volatile uint32_t reg_val;
  847         int i, count;
  848 
  849 
  850         MGE_GLOBAL_LOCK_ASSERT(sc);
  851 
  852         /* Stop interface */
  853         mge_stop(sc);
  854 
  855         /* Disable interrupts */
  856         mge_intrs_ctrl(sc, 0);
  857 
  858         /* Set MAC address */
  859         mge_set_mac_address(sc);
  860 
  861         /* Setup multicast filters */
  862         mge_setup_multicast(sc);
  863 
  864         if (sc->mge_ver == 2) {
  865                 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
  866                 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
  867         }
  868 
  869         /* Initialize TX queue configuration registers */
  870         MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
  871         MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
  872         MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
  873 
  874         /* Clear TX queue configuration registers for unused queues */
  875         for (i = 1; i < 7; i++) {
  876                 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
  877                 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
  878                 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
  879         }
  880 
  881         /* Set default MTU */
  882         MGE_WRITE(sc, sc->mge_mtu, 0);
  883 
  884         /* Port configuration */
  885         MGE_WRITE(sc, MGE_PORT_CONFIG,
  886             PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
  887             PORT_CONFIG_ARO_RXQ(0));
  888         MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
  889 
  890         /* Setup port configuration */
  891         reg_val = mge_set_port_serial_control(sc->mge_media_status);
  892         MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
  893 
  894         /* Setup SDMA configuration */
  895         MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
  896             MGE_SDMA_TX_BYTE_SWAP |
  897             MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
  898             MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
  899 
  900         MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
  901 
  902         MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
  903         MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
  904             sc->rx_desc_start);
  905 
  906         /* Reset descriptor indexes */
  907         sc->tx_desc_curr = 0;
  908         sc->rx_desc_curr = 0;
  909         sc->tx_desc_used_idx = 0;
  910         sc->tx_desc_used_count = 0;
  911 
  912         /* Enable RX descriptors */
  913         for (i = 0; i < MGE_RX_DESC_NUM; i++) {
  914                 dw = &sc->mge_rx_desc[i];
  915                 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
  916                 dw->mge_desc->buff_size = MCLBYTES;
  917                 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
  918                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  919         }
  920 
  921         /* Enable RX queue */
  922         MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
  923 
  924         /* Enable port */
  925         reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
  926         reg_val |= PORT_SERIAL_ENABLE;
  927         MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
  928         count = 0x100000;
  929         for (;;) {
  930                 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
  931                 if (reg_val & MGE_STATUS_LINKUP)
  932                         break;
  933                 DELAY(100);
  934                 if (--count == 0) {
  935                         if_printf(sc->ifp, "Timeout on link-up\n");
  936                         break;
  937                 }
  938         }
  939 
  940         /* Setup interrupts coalescing */
  941         mge_set_rxic(sc);
  942         mge_set_txic(sc);
  943 
  944         /* Enable interrupts */
  945 #ifdef DEVICE_POLLING
  946         /*
  947          * * ...only if polling is not turned on. Disable interrupts explicitly
  948          * if polling is enabled.
  949          */
  950         if (sc->ifp->if_capenable & IFCAP_POLLING)
  951                 mge_intrs_ctrl(sc, 0);
  952         else
  953 #endif /* DEVICE_POLLING */
  954         mge_intrs_ctrl(sc, 1);
  955 
  956         /* Activate network interface */
  957         sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
  958         sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  959         sc->wd_timer = 0;
  960 
  961         /* Schedule watchdog timeout */
  962         callout_reset(&sc->wd_callout, hz, mge_tick, sc);
  963 }
  964 
  965 static void
  966 mge_intr_err(void *arg)
  967 {
  968         struct mge_softc *sc = arg;
  969         struct ifnet *ifp;
  970 
  971         ifp = sc->ifp;
  972         if_printf(ifp, "%s\n", __FUNCTION__);
  973 }
  974 
  975 static void
  976 mge_intr_misc(void *arg)
  977 {
  978         struct mge_softc *sc = arg;
  979         struct ifnet *ifp;
  980 
  981         ifp = sc->ifp;
  982         if_printf(ifp, "%s\n", __FUNCTION__);
  983 }
  984 
  985 static void
  986 mge_intr_rx(void *arg) {
  987         struct mge_softc *sc = arg;
  988         uint32_t int_cause, int_cause_ext;
  989 
  990         MGE_RECEIVE_LOCK(sc);
  991 
  992 #ifdef DEVICE_POLLING
  993         if (sc->ifp->if_capenable & IFCAP_POLLING) {
  994                 MGE_RECEIVE_UNLOCK(sc);
  995                 return;
  996         }
  997 #endif
  998 
  999         /* Get interrupt cause */
 1000         int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
 1001         int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
 1002 
 1003         /* Check for resource error */
 1004         if (int_cause & MGE_PORT_INT_RXERRQ0) {
 1005                 mge_reinit_rx(sc);
 1006                 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
 1007                     int_cause & ~MGE_PORT_INT_RXERRQ0);
 1008         }
 1009 
 1010         int_cause &= MGE_PORT_INT_RXQ0;
 1011         int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
 1012 
 1013         if (int_cause || int_cause_ext) {
 1014                 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
 1015                 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
 1016                 mge_intr_rx_locked(sc, -1);
 1017         }
 1018 
 1019         MGE_RECEIVE_UNLOCK(sc);
 1020 }
 1021 
 1022 
 1023 static int
 1024 mge_intr_rx_locked(struct mge_softc *sc, int count)
 1025 {
 1026         struct ifnet *ifp = sc->ifp;
 1027         uint32_t status;
 1028         uint16_t bufsize;
 1029         struct mge_desc_wrapper* dw;
 1030         struct mbuf *mb;
 1031         int rx_npkts = 0;
 1032 
 1033         MGE_RECEIVE_LOCK_ASSERT(sc);
 1034 
 1035         while (count != 0) {
 1036                 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
 1037                 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
 1038                     BUS_DMASYNC_POSTREAD);
 1039 
 1040                 /* Get status */
 1041                 status = dw->mge_desc->cmd_status;
 1042                 bufsize = dw->mge_desc->buff_size;
 1043                 if ((status & MGE_DMA_OWNED) != 0)
 1044                         break;
 1045 
 1046                 if (dw->mge_desc->byte_count &&
 1047                     ~(status & MGE_ERR_SUMMARY)) {
 1048 
 1049                         bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
 1050                             BUS_DMASYNC_POSTREAD);
 1051 
 1052                         mb = m_devget(dw->buffer->m_data,
 1053                             dw->mge_desc->byte_count - ETHER_CRC_LEN,
 1054                             0, ifp, NULL);
 1055 
 1056                         if (mb == NULL)
 1057                                 /* Give up if no mbufs */
 1058                                 break;
 1059 
 1060                         mb->m_len -= 2;
 1061                         mb->m_pkthdr.len -= 2;
 1062                         mb->m_data += 2;
 1063 
 1064                         mge_offload_process_frame(ifp, mb, status,
 1065                             bufsize);
 1066 
 1067                         MGE_RECEIVE_UNLOCK(sc);
 1068                         (*ifp->if_input)(ifp, mb);
 1069                         MGE_RECEIVE_LOCK(sc);
 1070                         rx_npkts++;
 1071                 }
 1072 
 1073                 dw->mge_desc->byte_count = 0;
 1074                 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
 1075                 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
 1076                 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
 1077                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1078 
 1079                 if (count > 0)
 1080                         count -= 1;
 1081         }
 1082 
 1083         return (rx_npkts);
 1084 }
 1085 
 1086 static void
 1087 mge_intr_sum(void *arg)
 1088 {
 1089         struct mge_softc *sc = arg;
 1090         struct ifnet *ifp;
 1091 
 1092         ifp = sc->ifp;
 1093         if_printf(ifp, "%s\n", __FUNCTION__);
 1094 }
 1095 
 1096 static void
 1097 mge_intr_tx(void *arg)
 1098 {
 1099         struct mge_softc *sc = arg;
 1100         uint32_t int_cause_ext;
 1101 
 1102         MGE_TRANSMIT_LOCK(sc);
 1103 
 1104 #ifdef DEVICE_POLLING
 1105         if (sc->ifp->if_capenable & IFCAP_POLLING) {
 1106                 MGE_TRANSMIT_UNLOCK(sc);
 1107                 return;
 1108         }
 1109 #endif
 1110 
 1111         /* Ack the interrupt */
 1112         int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
 1113         MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
 1114             int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
 1115 
 1116         mge_intr_tx_locked(sc);
 1117 
 1118         MGE_TRANSMIT_UNLOCK(sc);
 1119 }
 1120 
 1121 
 1122 static void
 1123 mge_intr_tx_locked(struct mge_softc *sc)
 1124 {
 1125         struct ifnet *ifp = sc->ifp;
 1126         struct mge_desc_wrapper *dw;
 1127         struct mge_desc *desc;
 1128         uint32_t status;
 1129         int send = 0;
 1130 
 1131         MGE_TRANSMIT_LOCK_ASSERT(sc);
 1132 
 1133         /* Disable watchdog */
 1134         sc->wd_timer = 0;
 1135 
 1136         while (sc->tx_desc_used_count) {
 1137                 /* Get the descriptor */
 1138                 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
 1139                 desc = dw->mge_desc;
 1140                 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
 1141                     BUS_DMASYNC_POSTREAD);
 1142 
 1143                 /* Get descriptor status */
 1144                 status = desc->cmd_status;
 1145 
 1146                 if (status & MGE_DMA_OWNED)
 1147                         break;
 1148 
 1149                 sc->tx_desc_used_idx =
 1150                         (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
 1151                 sc->tx_desc_used_count--;
 1152 
 1153                 /* Update collision statistics */
 1154                 if (status & MGE_ERR_SUMMARY) {
 1155                         if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
 1156                                 ifp->if_collisions++;
 1157                         if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
 1158                                 ifp->if_collisions += 16;
 1159                 }
 1160 
 1161                 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
 1162                     BUS_DMASYNC_POSTWRITE);
 1163                 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
 1164                 m_freem(dw->buffer);
 1165                 dw->buffer = (struct mbuf*)NULL;
 1166                 send++;
 1167 
 1168                 ifp->if_opackets++;
 1169         }
 1170 
 1171         if (send) {
 1172                 /* Now send anything that was pending */
 1173                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1174                 mge_start_locked(ifp);
 1175         }
 1176 }
 1177 
 1178 static int
 1179 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 1180 {
 1181         struct mge_softc *sc = ifp->if_softc;
 1182         struct ifreq *ifr = (struct ifreq *)data;
 1183         int mask, error;
 1184         uint32_t flags;
 1185 
 1186         error = 0;
 1187 
 1188         switch (command) {
 1189         case SIOCSIFFLAGS:
 1190                 MGE_GLOBAL_LOCK(sc);
 1191 
 1192                 if (ifp->if_flags & IFF_UP) {
 1193                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1194                                 flags = ifp->if_flags ^ sc->mge_if_flags;
 1195                                 if (flags & IFF_PROMISC)
 1196                                         mge_set_prom_mode(sc,
 1197                                             MGE_RX_DEFAULT_QUEUE);
 1198 
 1199                                 if (flags & IFF_ALLMULTI)
 1200                                         mge_setup_multicast(sc);
 1201                         } else
 1202                                 mge_init_locked(sc);
 1203                 }
 1204                 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1205                         mge_stop(sc);
 1206 
 1207                 sc->mge_if_flags = ifp->if_flags;
 1208                 MGE_GLOBAL_UNLOCK(sc);
 1209                 break;
 1210         case SIOCADDMULTI:
 1211         case SIOCDELMULTI:
 1212                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1213                         MGE_GLOBAL_LOCK(sc);
 1214                         mge_setup_multicast(sc);
 1215                         MGE_GLOBAL_UNLOCK(sc);
 1216                 }
 1217                 break;
 1218         case SIOCSIFCAP:
 1219                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
 1220                 if (mask & IFCAP_HWCSUM) {
 1221                         ifp->if_capenable &= ~IFCAP_HWCSUM;
 1222                         ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
 1223                         if (ifp->if_capenable & IFCAP_TXCSUM)
 1224                                 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
 1225                         else
 1226                                 ifp->if_hwassist = 0;
 1227                 }
 1228 #ifdef DEVICE_POLLING
 1229                 if (mask & IFCAP_POLLING) {
 1230                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
 1231                                 error = ether_poll_register(mge_poll, ifp);
 1232                                 if (error)
 1233                                         return(error);
 1234 
 1235                                 MGE_GLOBAL_LOCK(sc);
 1236                                 mge_intrs_ctrl(sc, 0);
 1237                                 ifp->if_capenable |= IFCAP_POLLING;
 1238                                 MGE_GLOBAL_UNLOCK(sc);
 1239                         } else {
 1240                                 error = ether_poll_deregister(ifp);
 1241                                 MGE_GLOBAL_LOCK(sc);
 1242                                 mge_intrs_ctrl(sc, 1);
 1243                                 ifp->if_capenable &= ~IFCAP_POLLING;
 1244                                 MGE_GLOBAL_UNLOCK(sc);
 1245                         }
 1246                 }
 1247 #endif
 1248                 break;
 1249         case SIOCGIFMEDIA: /* fall through */
 1250         case SIOCSIFMEDIA:
 1251                 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
 1252                     && !(ifr->ifr_media & IFM_FDX)) {
 1253                         device_printf(sc->dev,
 1254                             "1000baseTX half-duplex unsupported\n");
 1255                         return 0;
 1256                 }
 1257                 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
 1258                 break;
 1259         default:
 1260                 error = ether_ioctl(ifp, command, data);
 1261         }
 1262         return (error);
 1263 }
 1264 
 1265 static int
 1266 mge_miibus_readreg(device_t dev, int phy, int reg)
 1267 {
 1268         uint32_t retries;
 1269 
 1270         MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
 1271             (MGE_SMI_READ | (reg << 21) | (phy << 16)));
 1272 
 1273         retries = MGE_SMI_READ_RETRIES;
 1274         while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
 1275                 DELAY(MGE_SMI_READ_DELAY);
 1276 
 1277         if (retries == 0)
 1278                 device_printf(dev, "Timeout while reading from PHY\n");
 1279 
 1280         return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
 1281 }
 1282 
 1283 static int
 1284 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
 1285 {
 1286         uint32_t retries;
 1287 
 1288         MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
 1289             (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
 1290 
 1291         retries = MGE_SMI_WRITE_RETRIES;
 1292         while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
 1293                 DELAY(MGE_SMI_WRITE_DELAY);
 1294 
 1295         if (retries == 0)
 1296                 device_printf(dev, "Timeout while writing to PHY\n");
 1297         return (0);
 1298 }
 1299 
 1300 static int
 1301 mge_probe(device_t dev)
 1302 {
 1303 
 1304         device_set_desc(dev, "Marvell Gigabit Ethernet controller");
 1305         return (BUS_PROBE_DEFAULT);
 1306 }
 1307 
 1308 static int
 1309 mge_resume(device_t dev)
 1310 {
 1311 
 1312         device_printf(dev, "%s\n", __FUNCTION__);
 1313         return (0);
 1314 }
 1315 
 1316 static int
 1317 mge_shutdown(device_t dev)
 1318 {
 1319         struct mge_softc *sc = device_get_softc(dev);
 1320 
 1321         MGE_GLOBAL_LOCK(sc);
 1322 
 1323 #ifdef DEVICE_POLLING
 1324         if (sc->ifp->if_capenable & IFCAP_POLLING)
 1325                 ether_poll_deregister(sc->ifp);
 1326 #endif
 1327 
 1328         mge_stop(sc);
 1329 
 1330         MGE_GLOBAL_UNLOCK(sc);
 1331 
 1332         return (0);
 1333 }
 1334 
 1335 static int
 1336 mge_encap(struct mge_softc *sc, struct mbuf *m0)
 1337 {
 1338         struct mge_desc_wrapper *dw = NULL;
 1339         struct ifnet *ifp;
 1340         bus_dma_segment_t segs[MGE_TX_DESC_NUM];
 1341         bus_dmamap_t mapp;
 1342         int error;
 1343         int seg, nsegs;
 1344         int desc_no;
 1345 
 1346         ifp = sc->ifp;
 1347 
 1348         /* Check for free descriptors */
 1349         if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
 1350                 /* No free descriptors */
 1351                 return (-1);
 1352         }
 1353 
 1354         /* Fetch unused map */
 1355         desc_no = sc->tx_desc_curr;
 1356         dw = &sc->mge_tx_desc[desc_no];
 1357         mapp = dw->buffer_dmap;
 1358 
 1359         /* Create mapping in DMA memory */
 1360         error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
 1361             BUS_DMA_NOWAIT);
 1362         if (error != 0 || nsegs != 1 ) {
 1363                 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
 1364                 return ((error != 0) ? error : -1);
 1365         }
 1366 
 1367         bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
 1368 
 1369         /* Everything is ok, now we can send buffers */
 1370         for (seg = 0; seg < nsegs; seg++) {
 1371                 dw->mge_desc->byte_count = segs[seg].ds_len;
 1372                 dw->mge_desc->buffer = segs[seg].ds_addr;
 1373                 dw->buffer = m0;
 1374                 dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
 1375                     MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
 1376                     MGE_DMA_OWNED;
 1377 
 1378                 if (seg == 0)
 1379                         mge_offload_setup_descriptor(sc, dw);
 1380         }
 1381 
 1382         bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
 1383             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1384 
 1385         sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
 1386         sc->tx_desc_used_count++;
 1387         return (0);
 1388 }
 1389 
 1390 static void
 1391 mge_tick(void *msc)
 1392 {
 1393         struct mge_softc *sc = msc;
 1394 
 1395         /* Check for TX timeout */
 1396         mge_watchdog(sc);
 1397 
 1398         mii_tick(sc->mii);
 1399 
 1400         /* Check for media type change */
 1401         if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
 1402                 mge_ifmedia_upd(sc->ifp);
 1403 
 1404         /* Schedule another timeout one second from now */
 1405         callout_reset(&sc->wd_callout, hz, mge_tick, sc);
 1406 }
 1407 
 1408 static void
 1409 mge_watchdog(struct mge_softc *sc)
 1410 {
 1411         struct ifnet *ifp;
 1412 
 1413         ifp = sc->ifp;
 1414 
 1415         MGE_GLOBAL_LOCK(sc);
 1416 
 1417         if (sc->wd_timer == 0 || --sc->wd_timer) {
 1418                 MGE_GLOBAL_UNLOCK(sc);
 1419                 return;
 1420         }
 1421 
 1422         ifp->if_oerrors++;
 1423         if_printf(ifp, "watchdog timeout\n");
 1424 
 1425         mge_stop(sc);
 1426         mge_init_locked(sc);
 1427 
 1428         MGE_GLOBAL_UNLOCK(sc);
 1429 }
 1430 
 1431 static void
 1432 mge_start(struct ifnet *ifp)
 1433 {
 1434         struct mge_softc *sc = ifp->if_softc;
 1435 
 1436         MGE_TRANSMIT_LOCK(sc);
 1437 
 1438         mge_start_locked(ifp);
 1439 
 1440         MGE_TRANSMIT_UNLOCK(sc);
 1441 }
 1442 
 1443 static void
 1444 mge_start_locked(struct ifnet *ifp)
 1445 {
 1446         struct mge_softc *sc;
 1447         struct mbuf *m0, *mtmp;
 1448         uint32_t reg_val, queued = 0;
 1449 
 1450         sc = ifp->if_softc;
 1451 
 1452         MGE_TRANSMIT_LOCK_ASSERT(sc);
 1453 
 1454         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1455             IFF_DRV_RUNNING)
 1456                 return;
 1457 
 1458         for (;;) {
 1459                 /* Get packet from the queue */
 1460                 IF_DEQUEUE(&ifp->if_snd, m0);
 1461                 if (m0 == NULL)
 1462                         break;
 1463 
 1464                 mtmp = m_defrag(m0, M_DONTWAIT);
 1465                 if (mtmp)
 1466                         m0 = mtmp;
 1467 
 1468                 if (mge_encap(sc, m0)) {
 1469                         IF_PREPEND(&ifp->if_snd, m0);
 1470                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1471                         break;
 1472                 }
 1473                 queued++;
 1474                 BPF_MTAP(ifp, m0);
 1475         }
 1476 
 1477         if (queued) {
 1478                 /* Enable transmitter and watchdog timer */
 1479                 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
 1480                 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
 1481                 sc->wd_timer = 5;
 1482         }
 1483 }
 1484 
 1485 static void
 1486 mge_stop(struct mge_softc *sc)
 1487 {
 1488         struct ifnet *ifp;
 1489         volatile uint32_t reg_val, status;
 1490         struct mge_desc_wrapper *dw;
 1491         struct mge_desc *desc;
 1492         int count;
 1493 
 1494         ifp = sc->ifp;
 1495 
 1496         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 1497                 return;
 1498 
 1499         /* Stop tick engine */
 1500         callout_stop(&sc->wd_callout);
 1501 
 1502         /* Disable interface */
 1503         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1504         sc->wd_timer = 0;
 1505 
 1506         /* Disable interrupts */
 1507         mge_intrs_ctrl(sc, 0);
 1508 
 1509         /* Disable Rx and Tx */
 1510         reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
 1511         MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
 1512         MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
 1513 
 1514         /* Remove pending data from TX queue */
 1515         while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
 1516             sc->tx_desc_used_count) {
 1517                 /* Get the descriptor */
 1518                 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
 1519                 desc = dw->mge_desc;
 1520                 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
 1521                     BUS_DMASYNC_POSTREAD);
 1522 
 1523                 /* Get descriptor status */
 1524                 status = desc->cmd_status;
 1525 
 1526                 if (status & MGE_DMA_OWNED)
 1527                         break;
 1528 
 1529                 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
 1530                     MGE_TX_DESC_NUM;
 1531                 sc->tx_desc_used_count--;
 1532 
 1533                 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
 1534                     BUS_DMASYNC_POSTWRITE);
 1535                 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
 1536 
 1537                 m_freem(dw->buffer);
 1538                 dw->buffer = (struct mbuf*)NULL;
 1539         }
 1540 
 1541         /* Wait for end of transmission */
 1542         count = 0x100000;
 1543         while (count--) {
 1544                 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
 1545                 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
 1546                     (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
 1547                         break;
 1548                 DELAY(100);
 1549         }
 1550 
 1551         if(!count)
 1552                 if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
 1553                     __FUNCTION__);
 1554 
 1555         reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
 1556         reg_val &= ~(PORT_SERIAL_ENABLE);
 1557         MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
 1558 }
 1559 
 1560 static int
 1561 mge_suspend(device_t dev)
 1562 {
 1563 
 1564         device_printf(dev, "%s\n", __FUNCTION__);
 1565         return (0);
 1566 }
 1567 
 1568 static void
 1569 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
 1570     uint32_t status, uint16_t bufsize)
 1571 {
 1572         int csum_flags = 0;
 1573 
 1574         if (ifp->if_capenable & IFCAP_RXCSUM) {
 1575                 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
 1576                         csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
 1577 
 1578                 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
 1579                     (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
 1580                     (status & MGE_RX_L4_CSUM_OK)) {
 1581                         csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
 1582                         frame->m_pkthdr.csum_data = 0xFFFF;
 1583                 }
 1584 
 1585                 frame->m_pkthdr.csum_flags = csum_flags;
 1586         }
 1587 }
 1588 
 1589 static void
 1590 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
 1591 {
 1592         struct mbuf *m0 = dw->buffer;
 1593         struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
 1594         int csum_flags = m0->m_pkthdr.csum_flags;
 1595         int cmd_status = 0;
 1596         struct ip *ip;
 1597         int ehlen, etype;
 1598 
 1599         if (csum_flags) {
 1600                 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 1601                         etype = ntohs(eh->evl_proto);
 1602                         ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 1603                         csum_flags |= MGE_TX_VLAN_TAGGED;
 1604                 } else {
 1605                         etype = ntohs(eh->evl_encap_proto);
 1606                         ehlen = ETHER_HDR_LEN;
 1607                 }
 1608 
 1609                 if (etype != ETHERTYPE_IP) {
 1610                         if_printf(sc->ifp,
 1611                             "TCP/IP Offload enabled for unsupported "
 1612                             "protocol!\n");
 1613                         return;
 1614                 }
 1615 
 1616                 ip = (struct ip *)(m0->m_data + ehlen);
 1617                 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
 1618 
 1619                 if ((m0->m_flags & M_FRAG) == 0)
 1620                         cmd_status |= MGE_TX_NOT_FRAGMENT;
 1621         }
 1622 
 1623         if (csum_flags & CSUM_IP)
 1624                 cmd_status |= MGE_TX_GEN_IP_CSUM;
 1625 
 1626         if (csum_flags & CSUM_TCP)
 1627                 cmd_status |= MGE_TX_GEN_L4_CSUM;
 1628 
 1629         if (csum_flags & CSUM_UDP)
 1630                 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
 1631 
 1632         dw->mge_desc->cmd_status |= cmd_status;
 1633 }
 1634 
 1635 static void
 1636 mge_intrs_ctrl(struct mge_softc *sc, int enable)
 1637 {
 1638 
 1639         if (enable) {
 1640                 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
 1641                     MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
 1642                 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
 1643                     MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
 1644                     MGE_PORT_INT_EXT_TXBUF0);
 1645         } else {
 1646                 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
 1647                 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
 1648 
 1649                 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
 1650                 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
 1651 
 1652                 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
 1653                 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
 1654         }
 1655 }
 1656 
 1657 static uint8_t
 1658 mge_crc8(uint8_t *data, int size)
 1659 {
 1660         uint8_t crc = 0;
 1661         static const uint8_t ct[256] = {
 1662                 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
 1663                 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
 1664                 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
 1665                 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
 1666                 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
 1667                 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
 1668                 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
 1669                 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
 1670                 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
 1671                 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
 1672                 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
 1673                 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
 1674                 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
 1675                 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
 1676                 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
 1677                 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
 1678                 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
 1679                 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
 1680                 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
 1681                 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
 1682                 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
 1683                 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
 1684                 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
 1685                 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
 1686                 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
 1687                 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
 1688                 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
 1689                 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
 1690                 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
 1691                 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
 1692                 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
 1693                 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
 1694         };
 1695 
 1696         while(size--)
 1697                 crc = ct[crc ^ *(data++)];
 1698 
 1699         return(crc);
 1700 }
 1701 
 1702 static void
 1703 mge_setup_multicast(struct mge_softc *sc)
 1704 {
 1705         uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
 1706         uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
 1707         uint32_t smt[MGE_MCAST_REG_NUMBER];
 1708         uint32_t omt[MGE_MCAST_REG_NUMBER];
 1709         struct ifnet *ifp = sc->ifp;
 1710         struct ifmultiaddr *ifma;
 1711         uint8_t *mac;
 1712         int i;
 1713 
 1714         if (ifp->if_flags & IFF_ALLMULTI) {
 1715                 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
 1716                         smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
 1717         } else {
 1718                 memset(smt, 0, sizeof(smt));
 1719                 memset(omt, 0, sizeof(omt));
 1720 
 1721                 if_maddr_rlock(ifp);
 1722                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1723                         if (ifma->ifma_addr->sa_family != AF_LINK)
 1724                                 continue;
 1725 
 1726                         mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
 1727                         if (memcmp(mac, special, sizeof(special)) == 0) {
 1728                                 i = mac[5];
 1729                                 smt[i >> 2] |= v << ((i & 0x03) << 3);
 1730                         } else {
 1731                                 i = mge_crc8(mac, ETHER_ADDR_LEN);
 1732                                 omt[i >> 2] |= v << ((i & 0x03) << 3);
 1733                         }
 1734                 }
 1735                 if_maddr_runlock(ifp);
 1736         }
 1737 
 1738         for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
 1739                 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
 1740                 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
 1741         }
 1742 }
 1743 
 1744 static void
 1745 mge_set_rxic(struct mge_softc *sc)
 1746 {
 1747         uint32_t reg;
 1748 
 1749         if (sc->rx_ic_time > sc->mge_rx_ipg_max)
 1750                 sc->rx_ic_time = sc->mge_rx_ipg_max;
 1751 
 1752         reg = MGE_READ(sc, MGE_SDMA_CONFIG);
 1753         reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
 1754         reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
 1755         MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
 1756 }
 1757 
 1758 static void
 1759 mge_set_txic(struct mge_softc *sc)
 1760 {
 1761         uint32_t reg;
 1762 
 1763         if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
 1764                 sc->tx_ic_time = sc->mge_tfut_ipg_max;
 1765 
 1766         reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
 1767         reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
 1768         reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
 1769         MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
 1770 }
 1771 
 1772 static int
 1773 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
 1774 {
 1775         struct mge_softc *sc = (struct mge_softc *)arg1;
 1776         uint32_t time;
 1777         int error;
 1778 
 1779         time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; 
 1780         error = sysctl_handle_int(oidp, &time, 0, req);
 1781         if (error != 0)
 1782                 return(error);
 1783 
 1784         MGE_GLOBAL_LOCK(sc);
 1785         if (arg2 == MGE_IC_RX) {
 1786                 sc->rx_ic_time = time;
 1787                 mge_set_rxic(sc);
 1788         } else {
 1789                 sc->tx_ic_time = time;
 1790                 mge_set_txic(sc);
 1791         }
 1792         MGE_GLOBAL_UNLOCK(sc);
 1793 
 1794         return(0);
 1795 }
 1796 
 1797 static void
 1798 mge_add_sysctls(struct mge_softc *sc)
 1799 {
 1800         struct sysctl_ctx_list *ctx;
 1801         struct sysctl_oid_list *children;
 1802         struct sysctl_oid *tree;
 1803 
 1804         ctx = device_get_sysctl_ctx(sc->dev);
 1805         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 1806         tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
 1807             CTLFLAG_RD, 0, "MGE Interrupts coalescing");
 1808         children = SYSCTL_CHILDREN(tree);
 1809 
 1810         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
 1811             CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
 1812             "I", "IC RX time threshold");
 1813         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
 1814             CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
 1815             "I", "IC TX time threshold");
 1816 }

Cache object: 6c2dcbb094022b01ad19818f9441dc78


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.