The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mge/if_mge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
    3  * All rights reserved.
    4  *
    5  * Developed by Semihalf.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. Neither the name of MARVELL nor the names of contributors
   16  *    may be used to endorse or promote products derived from this software
   17  *    without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  */
   31 
   32 #ifdef HAVE_KERNEL_OPTION_HEADERS
   33 #include "opt_device_polling.h"
   34 #endif
   35 
   36 #include <sys/cdefs.h>
   37 __FBSDID("$FreeBSD$");
   38 
   39 #include <sys/param.h>
   40 #include <sys/systm.h>
   41 #include <sys/endian.h>
   42 #include <sys/mbuf.h>
   43 #include <sys/lock.h>
   44 #include <sys/mutex.h>
   45 #include <sys/kernel.h>
   46 #include <sys/module.h>
   47 #include <sys/socket.h>
   48 #include <sys/sysctl.h>
   49 
   50 #include <net/ethernet.h>
   51 #include <net/bpf.h>
   52 #include <net/if.h>
   53 #include <net/if_arp.h>
   54 #include <net/if_dl.h>
   55 #include <net/if_media.h>
   56 #include <net/if_types.h>
   57 #include <net/if_vlan_var.h>
   58 
   59 #include <netinet/in_systm.h>
   60 #include <netinet/in.h>
   61 #include <netinet/ip.h>
   62 
   63 #include <sys/sockio.h>
   64 #include <sys/bus.h>
   65 #include <machine/bus.h>
   66 #include <sys/rman.h>
   67 #include <machine/resource.h>
   68 
   69 #include <dev/mii/mii.h>
   70 #include <dev/mii/miivar.h>
   71 
   72 #include <dev/fdt/fdt_common.h>
   73 #include <dev/ofw/ofw_bus.h>
   74 #include <dev/ofw/ofw_bus_subr.h>
   75 
   76 #include <dev/mge/if_mgevar.h>
   77 #include <arm/mv/mvreg.h>
   78 #include <arm/mv/mvvar.h>
   79 
   80 #include "miibus_if.h"
   81 
   82 static int mge_probe(device_t dev);
   83 static int mge_attach(device_t dev);
   84 static int mge_detach(device_t dev);
   85 static int mge_shutdown(device_t dev);
   86 static int mge_suspend(device_t dev);
   87 static int mge_resume(device_t dev);
   88 
   89 static int mge_miibus_readreg(device_t dev, int phy, int reg);
   90 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
   91 
   92 static int mge_ifmedia_upd(struct ifnet *ifp);
   93 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
   94 
   95 static void mge_init(void *arg);
   96 static void mge_init_locked(void *arg);
   97 static void mge_start(struct ifnet *ifp);
   98 static void mge_start_locked(struct ifnet *ifp);
   99 static void mge_watchdog(struct mge_softc *sc);
  100 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
  101 
  102 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
  103 static uint32_t mge_rx_ipg(uint32_t val, int ver);
  104 static void mge_ver_params(struct mge_softc *sc);
  105 
  106 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
  107 static void mge_intr_rx(void *arg);
  108 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
  109 static void mge_intr_tx(void *arg);
  110 static void mge_intr_tx_locked(struct mge_softc *sc);
  111 static void mge_intr_misc(void *arg);
  112 static void mge_intr_sum(void *arg);
  113 static void mge_intr_err(void *arg);
  114 static void mge_stop(struct mge_softc *sc);
  115 static void mge_tick(void *msc);
  116 static uint32_t mge_set_port_serial_control(uint32_t media);
  117 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
  118 static void mge_set_mac_address(struct mge_softc *sc);
  119 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
  120     uint8_t queue);
  121 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
  122 static int mge_allocate_dma(struct mge_softc *sc);
  123 static int mge_alloc_desc_dma(struct mge_softc *sc,
  124     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
  125 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
  126     struct mbuf **mbufp, bus_addr_t *paddr);
  127 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
  128 static void mge_free_dma(struct mge_softc *sc);
  129 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
  130     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
  131 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
  132     uint32_t status, uint16_t bufsize);
  133 static void mge_offload_setup_descriptor(struct mge_softc *sc,
  134     struct mge_desc_wrapper *dw);
  135 static uint8_t mge_crc8(uint8_t *data, int size);
  136 static void mge_setup_multicast(struct mge_softc *sc);
  137 static void mge_set_rxic(struct mge_softc *sc);
  138 static void mge_set_txic(struct mge_softc *sc);
  139 static void mge_add_sysctls(struct mge_softc *sc);
  140 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
  141 
  142 static device_method_t mge_methods[] = {
  143         /* Device interface */
  144         DEVMETHOD(device_probe,         mge_probe),
  145         DEVMETHOD(device_attach,        mge_attach),
  146         DEVMETHOD(device_detach,        mge_detach),
  147         DEVMETHOD(device_shutdown,      mge_shutdown),
  148         DEVMETHOD(device_suspend,       mge_suspend),
  149         DEVMETHOD(device_resume,        mge_resume),
  150         /* MII interface */
  151         DEVMETHOD(miibus_readreg,       mge_miibus_readreg),
  152         DEVMETHOD(miibus_writereg,      mge_miibus_writereg),
  153         { 0, 0 }
  154 };
  155 
  156 static driver_t mge_driver = {
  157         "mge",
  158         mge_methods,
  159         sizeof(struct mge_softc),
  160 };
  161 
  162 static devclass_t mge_devclass;
  163 
  164 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
  165 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
  166 MODULE_DEPEND(mge, ether, 1, 1, 1);
  167 MODULE_DEPEND(mge, miibus, 1, 1, 1);
  168 
  169 static struct resource_spec res_spec[] = {
  170         { SYS_RES_MEMORY, 0, RF_ACTIVE },
  171         { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
  172         { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
  173         { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
  174         { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
  175         { SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
  176         { -1, 0 }
  177 };
  178 
  179 static struct {
  180         driver_intr_t *handler;
  181         char * description;
  182 } mge_intrs[MGE_INTR_COUNT] = {
  183         { mge_intr_rx,  "GbE receive interrupt" },
  184         { mge_intr_tx,  "GbE transmit interrupt" },
  185         { mge_intr_misc,"GbE misc interrupt" },
  186         { mge_intr_sum, "GbE summary interrupt" },
  187         { mge_intr_err, "GbE error interrupt" },
  188 };
  189 
  190 static void
  191 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
  192 {
  193         uint32_t mac_l, mac_h;
  194         uint8_t lmac[6];
  195         int i, valid;
  196 
  197         /*
  198          * Retrieve hw address from the device tree.
  199          */
  200         i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
  201         if (i == 6) {
  202                 valid = 0;
  203                 for (i = 0; i < 6; i++)
  204                         if (lmac[i] != 0) {
  205                                 valid = 1;
  206                                 break;
  207                         }
  208 
  209                 if (valid) {
  210                         bcopy(lmac, addr, 6);
  211                         return;
  212                 }
  213         }
  214 
  215         /*
  216          * Fall back -- use the currently programmed address.
  217          */
  218         mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
  219         mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
  220 
  221         addr[0] = (mac_h & 0xff000000) >> 24;
  222         addr[1] = (mac_h & 0x00ff0000) >> 16;
  223         addr[2] = (mac_h & 0x0000ff00) >> 8;
  224         addr[3] = (mac_h & 0x000000ff);
  225         addr[4] = (mac_l & 0x0000ff00) >> 8;
  226         addr[5] = (mac_l & 0x000000ff);
  227 }
  228 
  229 static uint32_t
  230 mge_tfut_ipg(uint32_t val, int ver)
  231 {
  232 
  233         switch (ver) {
  234         case 1:
  235                 return ((val & 0x3fff) << 4);
  236         case 2:
  237         default:
  238                 return ((val & 0xffff) << 4);
  239         }
  240 }
  241 
  242 static uint32_t
  243 mge_rx_ipg(uint32_t val, int ver)
  244 {
  245 
  246         switch (ver) {
  247         case 1:
  248                 return ((val & 0x3fff) << 8);
  249         case 2:
  250         default:
  251                 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
  252         }
  253 }
  254 
  255 static void
  256 mge_ver_params(struct mge_softc *sc)
  257 {
  258         uint32_t d, r;
  259 
  260         soc_id(&d, &r);
  261         if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
  262             d == MV_DEV_MV78100_Z0) {
  263                 sc->mge_ver = 2;
  264                 sc->mge_mtu = 0x4e8;
  265                 sc->mge_tfut_ipg_max = 0xFFFF;
  266                 sc->mge_rx_ipg_max = 0xFFFF;
  267                 sc->mge_tx_arb_cfg = 0xFC0000FF;
  268                 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
  269                 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
  270         } else {
  271                 sc->mge_ver = 1;
  272                 sc->mge_mtu = 0x458;
  273                 sc->mge_tfut_ipg_max = 0x3FFF;
  274                 sc->mge_rx_ipg_max = 0x3FFF;
  275                 sc->mge_tx_arb_cfg = 0x000000FF;
  276                 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
  277                 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
  278         }
  279 }
  280 
  281 static void
  282 mge_set_mac_address(struct mge_softc *sc)
  283 {
  284         char *if_mac;
  285         uint32_t mac_l, mac_h;
  286 
  287         MGE_GLOBAL_LOCK_ASSERT(sc);
  288 
  289         if_mac = (char *)IF_LLADDR(sc->ifp);
  290 
  291         mac_l = (if_mac[4] << 8) | (if_mac[5]);
  292         mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
  293             (if_mac[2] << 8) | (if_mac[3] << 0);
  294 
  295         MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
  296         MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
  297 
  298         mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
  299 }
  300 
  301 static void
  302 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
  303 {
  304         uint32_t reg_idx, reg_off, reg_val, i;
  305 
  306         last_byte &= 0xf;
  307         reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
  308         reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
  309         reg_val = (1 | (queue << 1)) << reg_off;
  310 
  311         for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
  312                 if ( i == reg_idx)
  313                         MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
  314                 else
  315                         MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
  316         }
  317 }
  318 
  319 static void
  320 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
  321 {
  322         uint32_t port_config;
  323         uint32_t reg_val, i;
  324 
  325         /* Enable or disable promiscuous mode as needed */
  326         if (sc->ifp->if_flags & IFF_PROMISC) {
  327                 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
  328                 port_config |= PORT_CONFIG_UPM;
  329                 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
  330 
  331                 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
  332                    (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
  333 
  334                 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
  335                         MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
  336                         MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
  337                 }
  338 
  339                 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
  340                         MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
  341 
  342         } else {
  343                 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
  344                 port_config &= ~PORT_CONFIG_UPM;
  345                 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
  346 
  347                 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
  348                         MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
  349                         MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
  350                 }
  351 
  352                 mge_set_mac_address(sc);
  353         }
  354 }
  355 
  356 static void
  357 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  358 {
  359         u_int32_t *paddr;
  360 
  361         KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
  362         paddr = arg;
  363 
  364         *paddr = segs->ds_addr;
  365 }
  366 
  367 static int
  368 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
  369     bus_addr_t *paddr)
  370 {
  371         struct mbuf *new_mbuf;
  372         bus_dma_segment_t seg[1];
  373         int error;
  374         int nsegs;
  375 
  376         KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
  377 
  378         new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
  379         if (new_mbuf == NULL)
  380                 return (ENOBUFS);
  381         new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
  382 
  383         if (*mbufp) {
  384                 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
  385                 bus_dmamap_unload(tag, map);
  386         }
  387 
  388         error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
  389             BUS_DMA_NOWAIT);
  390         KASSERT(nsegs == 1, ("Too many segments returned!"));
  391         if (nsegs != 1 || error)
  392                 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
  393 
  394         bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
  395 
  396         (*mbufp) = new_mbuf;
  397         (*paddr) = seg->ds_addr;
  398         return (0);
  399 }
  400 
  401 static int
  402 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
  403     uint32_t size, bus_dma_tag_t *buffer_tag)
  404 {
  405         struct mge_desc_wrapper *dw;
  406         bus_addr_t desc_paddr;
  407         int i, error;
  408 
  409         desc_paddr = 0;
  410         for (i = size - 1; i >= 0; i--) {
  411                 dw = &(tab[i]);
  412                 error = bus_dmamem_alloc(sc->mge_desc_dtag,
  413                     (void**)&(dw->mge_desc),
  414                     BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
  415                     &(dw->desc_dmap));
  416 
  417                 if (error) {
  418                         if_printf(sc->ifp, "failed to allocate DMA memory\n");
  419                         dw->mge_desc = NULL;
  420                         return (ENXIO);
  421                 }
  422 
  423                 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
  424                     dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
  425                     &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
  426 
  427                 if (error) {
  428                         if_printf(sc->ifp, "can't load descriptor\n");
  429                         bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
  430                             dw->desc_dmap);
  431                         dw->mge_desc = NULL;
  432                         return (ENXIO);
  433                 }
  434 
  435                 /* Chain descriptors */
  436                 dw->mge_desc->next_desc = desc_paddr;
  437                 desc_paddr = dw->mge_desc_paddr;
  438         }
  439         tab[size - 1].mge_desc->next_desc = desc_paddr;
  440 
  441         /* Allocate a busdma tag for mbufs. */
  442         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),    /* parent */
  443             8, 0,                               /* alignment, boundary */
  444             BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
  445             BUS_SPACE_MAXADDR,                  /* highaddr */
  446             NULL, NULL,                         /* filtfunc, filtfuncarg */
  447             MCLBYTES, 1,                        /* maxsize, nsegments */
  448             MCLBYTES, 0,                        /* maxsegsz, flags */
  449             NULL, NULL,                         /* lockfunc, lockfuncarg */
  450             buffer_tag);                        /* dmat */
  451         if (error) {
  452                 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
  453                 return (ENXIO);
  454         }
  455 
  456         /* Create TX busdma maps */
  457         for (i = 0; i < size; i++) {
  458                 dw = &(tab[i]);
  459                 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
  460                 if (error) {
  461                         if_printf(sc->ifp, "failed to create map for mbuf\n");
  462                         return (ENXIO);
  463                 }
  464 
  465                 dw->buffer = (struct mbuf*)NULL;
  466                 dw->mge_desc->buffer = (bus_addr_t)NULL;
  467         }
  468 
  469         return (0);
  470 }
  471 
  472 static int
  473 mge_allocate_dma(struct mge_softc *sc)
  474 {
  475         int error;
  476         struct mge_desc_wrapper *dw;
  477         int i;
  478 
  479         /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
  480         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),    /* parent */
  481             16, 0,                              /* alignment, boundary */
  482             BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
  483             BUS_SPACE_MAXADDR,                  /* highaddr */
  484             NULL, NULL,                         /* filtfunc, filtfuncarg */
  485             sizeof(struct mge_desc), 1,         /* maxsize, nsegments */
  486             sizeof(struct mge_desc), 0,         /* maxsegsz, flags */
  487             NULL, NULL,                         /* lockfunc, lockfuncarg */
  488             &sc->mge_desc_dtag);                /* dmat */
  489 
  490 
  491         mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
  492             &sc->mge_tx_dtag);
  493         mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
  494             &sc->mge_rx_dtag);
  495 
  496         for (i = 0; i < MGE_RX_DESC_NUM; i++) {
  497                 dw = &(sc->mge_rx_desc[i]);
  498                 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
  499                     &dw->mge_desc->buffer);
  500         }
  501 
  502         sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
  503         sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
  504 
  505         return (0);
  506 }
  507 
  508 static void
  509 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
  510     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
  511 {
  512         struct mge_desc_wrapper *dw;
  513         int i;
  514 
  515         for (i = 0; i < size; i++) {
  516                 /* Free RX mbuf */
  517                 dw = &(tab[i]);
  518 
  519                 if (dw->buffer_dmap) {
  520                         if (free_mbufs) {
  521                                 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
  522                                     BUS_DMASYNC_POSTREAD);
  523                                 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
  524                         }
  525                         bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
  526                         if (free_mbufs)
  527                                 m_freem(dw->buffer);
  528                 }
  529                 /* Free RX descriptors */
  530                 if (dw->desc_dmap) {
  531                         bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
  532                             BUS_DMASYNC_POSTREAD);
  533                         bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
  534                         bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
  535                             dw->desc_dmap);
  536                 }
  537         }
  538 }
  539 
  540 static void
  541 mge_free_dma(struct mge_softc *sc)
  542 {
  543         /* Free desciptors and mbufs */
  544         mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
  545         mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
  546 
  547         /* Destroy mbuf dma tag */
  548         bus_dma_tag_destroy(sc->mge_tx_dtag);
  549         bus_dma_tag_destroy(sc->mge_rx_dtag);
  550         /* Destroy descriptors tag */
  551         bus_dma_tag_destroy(sc->mge_desc_dtag);
  552 }
  553 
  554 static void
  555 mge_reinit_rx(struct mge_softc *sc)
  556 {
  557         struct mge_desc_wrapper *dw;
  558         int i;
  559 
  560         MGE_RECEIVE_LOCK_ASSERT(sc);
  561 
  562         mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
  563 
  564         mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
  565             &sc->mge_rx_dtag);
  566 
  567         for (i = 0; i < MGE_RX_DESC_NUM; i++) {
  568                 dw = &(sc->mge_rx_desc[i]);
  569                 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
  570                 &dw->mge_desc->buffer);
  571         }
  572 
  573         sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
  574         sc->rx_desc_curr = 0;
  575 
  576         MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
  577             sc->rx_desc_start);
  578 
  579         /* Enable RX queue */
  580         MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
  581 }
  582 
  583 #ifdef DEVICE_POLLING
  584 static poll_handler_t mge_poll;
  585 
  586 static int
  587 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
  588 {
  589         struct mge_softc *sc = ifp->if_softc;
  590         uint32_t int_cause, int_cause_ext;
  591         int rx_npkts = 0;
  592 
  593         MGE_GLOBAL_LOCK(sc);
  594 
  595         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
  596                 MGE_GLOBAL_UNLOCK(sc);
  597                 return (rx_npkts);
  598         }
  599 
  600         if (cmd == POLL_AND_CHECK_STATUS) {
  601                 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
  602                 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
  603 
  604                 /* Check for resource error */
  605                 if (int_cause & MGE_PORT_INT_RXERRQ0)
  606                         mge_reinit_rx(sc);
  607 
  608                 if (int_cause || int_cause_ext) {
  609                         MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
  610                         MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
  611                 }
  612         }
  613 
  614         mge_intr_tx_locked(sc);
  615         rx_npkts = mge_intr_rx_locked(sc, count);
  616 
  617         MGE_GLOBAL_UNLOCK(sc);
  618         return (rx_npkts);
  619 }
  620 #endif /* DEVICE_POLLING */
  621 
  622 static int
  623 mge_attach(device_t dev)
  624 {
  625         struct mge_softc *sc;
  626         struct mii_softc *miisc;
  627         struct ifnet *ifp;
  628         uint8_t hwaddr[ETHER_ADDR_LEN];
  629         int i, error, phy;
  630 
  631         sc = device_get_softc(dev);
  632         sc->dev = dev;
  633         sc->node = ofw_bus_get_node(dev);
  634 
  635         /* Set chip version-dependent parameters */
  636         mge_ver_params(sc);
  637 
  638         /* Get phy address and used softc from fdt */
  639         if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0)
  640                 return (ENXIO);
  641 
  642         /* Initialize mutexes */
  643         mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
  644         mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
  645 
  646         /* Allocate IO and IRQ resources */
  647         error = bus_alloc_resources(dev, res_spec, sc->res);
  648         if (error) {
  649                 device_printf(dev, "could not allocate resources\n");
  650                 mge_detach(dev);
  651                 return (ENXIO);
  652         }
  653 
  654         /* Allocate DMA, buffers, buffer descriptors */
  655         error = mge_allocate_dma(sc);
  656         if (error) {
  657                 mge_detach(dev);
  658                 return (ENXIO);
  659         }
  660 
  661         sc->tx_desc_curr = 0;
  662         sc->rx_desc_curr = 0;
  663         sc->tx_desc_used_idx = 0;
  664         sc->tx_desc_used_count = 0;
  665 
  666         /* Configure defaults for interrupts coalescing */
  667         sc->rx_ic_time = 768;
  668         sc->tx_ic_time = 768;
  669         mge_add_sysctls(sc);
  670 
  671         /* Allocate network interface */
  672         ifp = sc->ifp = if_alloc(IFT_ETHER);
  673         if (ifp == NULL) {
  674                 device_printf(dev, "if_alloc() failed\n");
  675                 mge_detach(dev);
  676                 return (ENOMEM);
  677         }
  678 
  679         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  680         ifp->if_softc = sc;
  681         ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
  682         ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
  683         ifp->if_capenable = ifp->if_capabilities;
  684         ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
  685 
  686 #ifdef DEVICE_POLLING
  687         /* Advertise that polling is supported */
  688         ifp->if_capabilities |= IFCAP_POLLING;
  689 #endif
  690 
  691         ifp->if_init = mge_init;
  692         ifp->if_start = mge_start;
  693         ifp->if_ioctl = mge_ioctl;
  694 
  695         ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
  696         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
  697         IFQ_SET_READY(&ifp->if_snd);
  698 
  699         mge_get_mac_address(sc, hwaddr);
  700         ether_ifattach(ifp, hwaddr);
  701         callout_init(&sc->wd_callout, 0);
  702 
  703         /* Attach PHY(s) */
  704         error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
  705             mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
  706         if (error) {
  707                 device_printf(dev, "attaching PHYs failed\n");
  708                 mge_detach(dev);
  709                 return (error);
  710         }
  711         sc->mii = device_get_softc(sc->miibus);
  712 
  713         /* Tell the MAC where to find the PHY so autoneg works */
  714         miisc = LIST_FIRST(&sc->mii->mii_phys);
  715         MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
  716 
  717         /* Attach interrupt handlers */
  718         for (i = 0; i < 2; ++i) {
  719                 error = bus_setup_intr(dev, sc->res[1 + i],
  720                     INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
  721                     sc, &sc->ih_cookie[i]);
  722                 if (error) {
  723                         device_printf(dev, "could not setup %s\n",
  724                             mge_intrs[i].description);
  725                         mge_detach(dev);
  726                         return (error);
  727                 }
  728         }
  729 
  730         return (0);
  731 }
  732 
  733 static int
  734 mge_detach(device_t dev)
  735 {
  736         struct mge_softc *sc;
  737         int error,i;
  738 
  739         sc = device_get_softc(dev);
  740 
  741         /* Stop controller and free TX queue */
  742         if (sc->ifp)
  743                 mge_shutdown(dev);
  744 
  745         /* Wait for stopping ticks */
  746         callout_drain(&sc->wd_callout);
  747 
  748         /* Stop and release all interrupts */
  749         for (i = 0; i < 2; ++i) {
  750                 if (!sc->ih_cookie[i])
  751                         continue;
  752 
  753                 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
  754                 if (error)
  755                         device_printf(dev, "could not release %s\n",
  756                             mge_intrs[i].description);
  757         }
  758 
  759         /* Detach network interface */
  760         if (sc->ifp) {
  761                 ether_ifdetach(sc->ifp);
  762                 if_free(sc->ifp);
  763         }
  764 
  765         /* Free DMA resources */
  766         mge_free_dma(sc);
  767 
  768         /* Free IO memory handler */
  769         bus_release_resources(dev, res_spec, sc->res);
  770 
  771         /* Destroy mutexes */
  772         mtx_destroy(&sc->receive_lock);
  773         mtx_destroy(&sc->transmit_lock);
  774 
  775         return (0);
  776 }
  777 
  778 static void
  779 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
  780 {
  781         struct mge_softc *sc = ifp->if_softc;
  782         struct mii_data *mii;
  783 
  784         MGE_TRANSMIT_LOCK(sc);
  785 
  786         mii = sc->mii;
  787         mii_pollstat(mii);
  788 
  789         ifmr->ifm_active = mii->mii_media_active;
  790         ifmr->ifm_status = mii->mii_media_status;
  791 
  792         MGE_TRANSMIT_UNLOCK(sc);
  793 }
  794 
  795 static uint32_t
  796 mge_set_port_serial_control(uint32_t media)
  797 {
  798         uint32_t port_config;
  799 
  800         port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
  801             PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
  802 
  803         if (IFM_TYPE(media) == IFM_ETHER) {
  804                 switch(IFM_SUBTYPE(media)) {
  805                         case IFM_AUTO:
  806                                 break;
  807                         case IFM_1000_T:
  808                                 port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
  809                                     PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
  810                                     PORT_SERIAL_SPEED_AUTONEG);
  811                                 break;
  812                         case IFM_100_TX:
  813                                 port_config  |= (PORT_SERIAL_MII_SPEED_100 |
  814                                     PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
  815                                     PORT_SERIAL_SPEED_AUTONEG);
  816                                 break;
  817                         case IFM_10_T:
  818                                 port_config  |= (PORT_SERIAL_AUTONEG |
  819                                     PORT_SERIAL_AUTONEG_FC |
  820                                     PORT_SERIAL_SPEED_AUTONEG);
  821                                 break;
  822                 }
  823                 if (media & IFM_FDX)
  824                         port_config |= PORT_SERIAL_FULL_DUPLEX;
  825         }
  826         return (port_config);
  827 }
  828 
  829 static int
  830 mge_ifmedia_upd(struct ifnet *ifp)
  831 {
  832         struct mge_softc *sc = ifp->if_softc;
  833 
  834         if (ifp->if_flags & IFF_UP) {
  835                 MGE_GLOBAL_LOCK(sc);
  836 
  837                 sc->mge_media_status = sc->mii->mii_media.ifm_media;
  838                 mii_mediachg(sc->mii);
  839                 mge_init_locked(sc);
  840 
  841                 MGE_GLOBAL_UNLOCK(sc);
  842         }
  843 
  844         return (0);
  845 }
  846 
  847 static void
  848 mge_init(void *arg)
  849 {
  850         struct mge_softc *sc = arg;
  851 
  852         MGE_GLOBAL_LOCK(sc);
  853 
  854         mge_init_locked(arg);
  855 
  856         MGE_GLOBAL_UNLOCK(sc);
  857 }
  858 
  859 static void
  860 mge_init_locked(void *arg)
  861 {
  862         struct mge_softc *sc = arg;
  863         struct mge_desc_wrapper *dw;
  864         volatile uint32_t reg_val;
  865         int i, count;
  866 
  867 
  868         MGE_GLOBAL_LOCK_ASSERT(sc);
  869 
  870         /* Stop interface */
  871         mge_stop(sc);
  872 
  873         /* Disable interrupts */
  874         mge_intrs_ctrl(sc, 0);
  875 
  876         /* Set MAC address */
  877         mge_set_mac_address(sc);
  878 
  879         /* Setup multicast filters */
  880         mge_setup_multicast(sc);
  881 
  882         if (sc->mge_ver == 2) {
  883                 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
  884                 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
  885         }
  886 
  887         /* Initialize TX queue configuration registers */
  888         MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
  889         MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
  890         MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
  891 
  892         /* Clear TX queue configuration registers for unused queues */
  893         for (i = 1; i < 7; i++) {
  894                 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
  895                 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
  896                 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
  897         }
  898 
  899         /* Set default MTU */
  900         MGE_WRITE(sc, sc->mge_mtu, 0);
  901 
  902         /* Port configuration */
  903         MGE_WRITE(sc, MGE_PORT_CONFIG,
  904             PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
  905             PORT_CONFIG_ARO_RXQ(0));
  906         MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
  907 
  908         /* Setup port configuration */
  909         reg_val = mge_set_port_serial_control(sc->mge_media_status);
  910         MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
  911 
  912         /* Setup SDMA configuration */
  913         MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
  914             MGE_SDMA_TX_BYTE_SWAP |
  915             MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
  916             MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
  917 
  918         MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
  919 
  920         MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
  921         MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
  922             sc->rx_desc_start);
  923 
  924         /* Reset descriptor indexes */
  925         sc->tx_desc_curr = 0;
  926         sc->rx_desc_curr = 0;
  927         sc->tx_desc_used_idx = 0;
  928         sc->tx_desc_used_count = 0;
  929 
  930         /* Enable RX descriptors */
  931         for (i = 0; i < MGE_RX_DESC_NUM; i++) {
  932                 dw = &sc->mge_rx_desc[i];
  933                 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
  934                 dw->mge_desc->buff_size = MCLBYTES;
  935                 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
  936                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  937         }
  938 
  939         /* Enable RX queue */
  940         MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
  941 
  942         /* Enable port */
  943         reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
  944         reg_val |= PORT_SERIAL_ENABLE;
  945         MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
  946         count = 0x100000;
  947         for (;;) {
  948                 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
  949                 if (reg_val & MGE_STATUS_LINKUP)
  950                         break;
  951                 DELAY(100);
  952                 if (--count == 0) {
  953                         if_printf(sc->ifp, "Timeout on link-up\n");
  954                         break;
  955                 }
  956         }
  957 
  958         /* Setup interrupts coalescing */
  959         mge_set_rxic(sc);
  960         mge_set_txic(sc);
  961 
  962         /* Enable interrupts */
  963 #ifdef DEVICE_POLLING
  964         /*
  965          * * ...only if polling is not turned on. Disable interrupts explicitly
  966          * if polling is enabled.
  967          */
  968         if (sc->ifp->if_capenable & IFCAP_POLLING)
  969                 mge_intrs_ctrl(sc, 0);
  970         else
  971 #endif /* DEVICE_POLLING */
  972         mge_intrs_ctrl(sc, 1);
  973 
  974         /* Activate network interface */
  975         sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
  976         sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  977         sc->wd_timer = 0;
  978 
  979         /* Schedule watchdog timeout */
  980         callout_reset(&sc->wd_callout, hz, mge_tick, sc);
  981 }
  982 
  983 static void
  984 mge_intr_err(void *arg)
  985 {
  986         struct mge_softc *sc = arg;
  987         struct ifnet *ifp;
  988 
  989         ifp = sc->ifp;
  990         if_printf(ifp, "%s\n", __FUNCTION__);
  991 }
  992 
  993 static void
  994 mge_intr_misc(void *arg)
  995 {
  996         struct mge_softc *sc = arg;
  997         struct ifnet *ifp;
  998 
  999         ifp = sc->ifp;
 1000         if_printf(ifp, "%s\n", __FUNCTION__);
 1001 }
 1002 
 1003 static void
 1004 mge_intr_rx(void *arg) {
 1005         struct mge_softc *sc = arg;
 1006         uint32_t int_cause, int_cause_ext;
 1007 
 1008         MGE_RECEIVE_LOCK(sc);
 1009 
 1010 #ifdef DEVICE_POLLING
 1011         if (sc->ifp->if_capenable & IFCAP_POLLING) {
 1012                 MGE_RECEIVE_UNLOCK(sc);
 1013                 return;
 1014         }
 1015 #endif
 1016 
 1017         /* Get interrupt cause */
 1018         int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
 1019         int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
 1020 
 1021         /* Check for resource error */
 1022         if (int_cause & MGE_PORT_INT_RXERRQ0) {
 1023                 mge_reinit_rx(sc);
 1024                 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
 1025                     int_cause & ~MGE_PORT_INT_RXERRQ0);
 1026         }
 1027 
 1028         int_cause &= MGE_PORT_INT_RXQ0;
 1029         int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
 1030 
 1031         if (int_cause || int_cause_ext) {
 1032                 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
 1033                 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
 1034                 mge_intr_rx_locked(sc, -1);
 1035         }
 1036 
 1037         MGE_RECEIVE_UNLOCK(sc);
 1038 }
 1039 
 1040 
 1041 static int
 1042 mge_intr_rx_locked(struct mge_softc *sc, int count)
 1043 {
 1044         struct ifnet *ifp = sc->ifp;
 1045         uint32_t status;
 1046         uint16_t bufsize;
 1047         struct mge_desc_wrapper* dw;
 1048         struct mbuf *mb;
 1049         int rx_npkts = 0;
 1050 
 1051         MGE_RECEIVE_LOCK_ASSERT(sc);
 1052 
 1053         while (count != 0) {
 1054                 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
 1055                 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
 1056                     BUS_DMASYNC_POSTREAD);
 1057 
 1058                 /* Get status */
 1059                 status = dw->mge_desc->cmd_status;
 1060                 bufsize = dw->mge_desc->buff_size;
 1061                 if ((status & MGE_DMA_OWNED) != 0)
 1062                         break;
 1063 
 1064                 if (dw->mge_desc->byte_count &&
 1065                     ~(status & MGE_ERR_SUMMARY)) {
 1066 
 1067                         bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
 1068                             BUS_DMASYNC_POSTREAD);
 1069 
 1070                         mb = m_devget(dw->buffer->m_data,
 1071                             dw->mge_desc->byte_count - ETHER_CRC_LEN,
 1072                             0, ifp, NULL);
 1073 
 1074                         if (mb == NULL)
 1075                                 /* Give up if no mbufs */
 1076                                 break;
 1077 
 1078                         mb->m_len -= 2;
 1079                         mb->m_pkthdr.len -= 2;
 1080                         mb->m_data += 2;
 1081 
 1082                         mge_offload_process_frame(ifp, mb, status,
 1083                             bufsize);
 1084 
 1085                         MGE_RECEIVE_UNLOCK(sc);
 1086                         (*ifp->if_input)(ifp, mb);
 1087                         MGE_RECEIVE_LOCK(sc);
 1088                         rx_npkts++;
 1089                 }
 1090 
 1091                 dw->mge_desc->byte_count = 0;
 1092                 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
 1093                 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
 1094                 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
 1095                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1096 
 1097                 if (count > 0)
 1098                         count -= 1;
 1099         }
 1100 
 1101         return (rx_npkts);
 1102 }
 1103 
 1104 static void
 1105 mge_intr_sum(void *arg)
 1106 {
 1107         struct mge_softc *sc = arg;
 1108         struct ifnet *ifp;
 1109 
 1110         ifp = sc->ifp;
 1111         if_printf(ifp, "%s\n", __FUNCTION__);
 1112 }
 1113 
 1114 static void
 1115 mge_intr_tx(void *arg)
 1116 {
 1117         struct mge_softc *sc = arg;
 1118         uint32_t int_cause_ext;
 1119 
 1120         MGE_TRANSMIT_LOCK(sc);
 1121 
 1122 #ifdef DEVICE_POLLING
 1123         if (sc->ifp->if_capenable & IFCAP_POLLING) {
 1124                 MGE_TRANSMIT_UNLOCK(sc);
 1125                 return;
 1126         }
 1127 #endif
 1128 
 1129         /* Ack the interrupt */
 1130         int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
 1131         MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
 1132             int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
 1133 
 1134         mge_intr_tx_locked(sc);
 1135 
 1136         MGE_TRANSMIT_UNLOCK(sc);
 1137 }
 1138 
 1139 
 1140 static void
 1141 mge_intr_tx_locked(struct mge_softc *sc)
 1142 {
 1143         struct ifnet *ifp = sc->ifp;
 1144         struct mge_desc_wrapper *dw;
 1145         struct mge_desc *desc;
 1146         uint32_t status;
 1147         int send = 0;
 1148 
 1149         MGE_TRANSMIT_LOCK_ASSERT(sc);
 1150 
 1151         /* Disable watchdog */
 1152         sc->wd_timer = 0;
 1153 
 1154         while (sc->tx_desc_used_count) {
 1155                 /* Get the descriptor */
 1156                 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
 1157                 desc = dw->mge_desc;
 1158                 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
 1159                     BUS_DMASYNC_POSTREAD);
 1160 
 1161                 /* Get descriptor status */
 1162                 status = desc->cmd_status;
 1163 
 1164                 if (status & MGE_DMA_OWNED)
 1165                         break;
 1166 
 1167                 sc->tx_desc_used_idx =
 1168                         (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
 1169                 sc->tx_desc_used_count--;
 1170 
 1171                 /* Update collision statistics */
 1172                 if (status & MGE_ERR_SUMMARY) {
 1173                         if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
 1174                                 ifp->if_collisions++;
 1175                         if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
 1176                                 ifp->if_collisions += 16;
 1177                 }
 1178 
 1179                 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
 1180                     BUS_DMASYNC_POSTWRITE);
 1181                 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
 1182                 m_freem(dw->buffer);
 1183                 dw->buffer = (struct mbuf*)NULL;
 1184                 send++;
 1185 
 1186                 ifp->if_opackets++;
 1187         }
 1188 
 1189         if (send) {
 1190                 /* Now send anything that was pending */
 1191                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1192                 mge_start_locked(ifp);
 1193         }
 1194 }
 1195 
 1196 static int
 1197 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 1198 {
 1199         struct mge_softc *sc = ifp->if_softc;
 1200         struct ifreq *ifr = (struct ifreq *)data;
 1201         int mask, error;
 1202         uint32_t flags;
 1203 
 1204         error = 0;
 1205 
 1206         switch (command) {
 1207         case SIOCSIFFLAGS:
 1208                 MGE_GLOBAL_LOCK(sc);
 1209 
 1210                 if (ifp->if_flags & IFF_UP) {
 1211                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1212                                 flags = ifp->if_flags ^ sc->mge_if_flags;
 1213                                 if (flags & IFF_PROMISC)
 1214                                         mge_set_prom_mode(sc,
 1215                                             MGE_RX_DEFAULT_QUEUE);
 1216 
 1217                                 if (flags & IFF_ALLMULTI)
 1218                                         mge_setup_multicast(sc);
 1219                         } else
 1220                                 mge_init_locked(sc);
 1221                 }
 1222                 else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1223                         mge_stop(sc);
 1224 
 1225                 sc->mge_if_flags = ifp->if_flags;
 1226                 MGE_GLOBAL_UNLOCK(sc);
 1227                 break;
 1228         case SIOCADDMULTI:
 1229         case SIOCDELMULTI:
 1230                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1231                         MGE_GLOBAL_LOCK(sc);
 1232                         mge_setup_multicast(sc);
 1233                         MGE_GLOBAL_UNLOCK(sc);
 1234                 }
 1235                 break;
 1236         case SIOCSIFCAP:
 1237                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
 1238                 if (mask & IFCAP_HWCSUM) {
 1239                         ifp->if_capenable &= ~IFCAP_HWCSUM;
 1240                         ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
 1241                         if (ifp->if_capenable & IFCAP_TXCSUM)
 1242                                 ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
 1243                         else
 1244                                 ifp->if_hwassist = 0;
 1245                 }
 1246 #ifdef DEVICE_POLLING
 1247                 if (mask & IFCAP_POLLING) {
 1248                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
 1249                                 error = ether_poll_register(mge_poll, ifp);
 1250                                 if (error)
 1251                                         return(error);
 1252 
 1253                                 MGE_GLOBAL_LOCK(sc);
 1254                                 mge_intrs_ctrl(sc, 0);
 1255                                 ifp->if_capenable |= IFCAP_POLLING;
 1256                                 MGE_GLOBAL_UNLOCK(sc);
 1257                         } else {
 1258                                 error = ether_poll_deregister(ifp);
 1259                                 MGE_GLOBAL_LOCK(sc);
 1260                                 mge_intrs_ctrl(sc, 1);
 1261                                 ifp->if_capenable &= ~IFCAP_POLLING;
 1262                                 MGE_GLOBAL_UNLOCK(sc);
 1263                         }
 1264                 }
 1265 #endif
 1266                 break;
 1267         case SIOCGIFMEDIA: /* fall through */
 1268         case SIOCSIFMEDIA:
 1269                 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
 1270                     && !(ifr->ifr_media & IFM_FDX)) {
 1271                         device_printf(sc->dev,
 1272                             "1000baseTX half-duplex unsupported\n");
 1273                         return 0;
 1274                 }
 1275                 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
 1276                 break;
 1277         default:
 1278                 error = ether_ioctl(ifp, command, data);
 1279         }
 1280         return (error);
 1281 }
 1282 
 1283 static int
 1284 mge_miibus_readreg(device_t dev, int phy, int reg)
 1285 {
 1286         struct mge_softc *sc;
 1287         uint32_t retries;
 1288 
 1289         sc = device_get_softc(dev);
 1290 
 1291         MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
 1292             (MGE_SMI_READ | (reg << 21) | (phy << 16)));
 1293 
 1294         retries = MGE_SMI_READ_RETRIES;
 1295         while (--retries &&
 1296             !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
 1297                 DELAY(MGE_SMI_READ_DELAY);
 1298 
 1299         if (retries == 0)
 1300                 device_printf(dev, "Timeout while reading from PHY\n");
 1301 
 1302         return (MGE_READ(sc->phy_sc, MGE_REG_SMI) & 0xffff);
 1303 }
 1304 
 1305 static int
 1306 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
 1307 {
 1308         struct mge_softc *sc;
 1309         uint32_t retries;
 1310 
 1311         sc = device_get_softc(dev);
 1312 
 1313         MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
 1314             (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
 1315 
 1316         retries = MGE_SMI_WRITE_RETRIES;
 1317         while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
 1318                 DELAY(MGE_SMI_WRITE_DELAY);
 1319 
 1320         if (retries == 0)
 1321                 device_printf(dev, "Timeout while writing to PHY\n");
 1322         return (0);
 1323 }
 1324 
 1325 static int
 1326 mge_probe(device_t dev)
 1327 {
 1328 
 1329         if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
 1330                 return (ENXIO);
 1331 
 1332         device_set_desc(dev, "Marvell Gigabit Ethernet controller");
 1333         return (BUS_PROBE_DEFAULT);
 1334 }
 1335 
 1336 static int
 1337 mge_resume(device_t dev)
 1338 {
 1339 
 1340         device_printf(dev, "%s\n", __FUNCTION__);
 1341         return (0);
 1342 }
 1343 
 1344 static int
 1345 mge_shutdown(device_t dev)
 1346 {
 1347         struct mge_softc *sc = device_get_softc(dev);
 1348 
 1349         MGE_GLOBAL_LOCK(sc);
 1350 
 1351 #ifdef DEVICE_POLLING
 1352         if (sc->ifp->if_capenable & IFCAP_POLLING)
 1353                 ether_poll_deregister(sc->ifp);
 1354 #endif
 1355 
 1356         mge_stop(sc);
 1357 
 1358         MGE_GLOBAL_UNLOCK(sc);
 1359 
 1360         return (0);
 1361 }
 1362 
 1363 static int
 1364 mge_encap(struct mge_softc *sc, struct mbuf *m0)
 1365 {
 1366         struct mge_desc_wrapper *dw = NULL;
 1367         struct ifnet *ifp;
 1368         bus_dma_segment_t segs[MGE_TX_DESC_NUM];
 1369         bus_dmamap_t mapp;
 1370         int error;
 1371         int seg, nsegs;
 1372         int desc_no;
 1373 
 1374         ifp = sc->ifp;
 1375 
 1376         /* Check for free descriptors */
 1377         if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
 1378                 /* No free descriptors */
 1379                 return (-1);
 1380         }
 1381 
 1382         /* Fetch unused map */
 1383         desc_no = sc->tx_desc_curr;
 1384         dw = &sc->mge_tx_desc[desc_no];
 1385         mapp = dw->buffer_dmap;
 1386 
 1387         /* Create mapping in DMA memory */
 1388         error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
 1389             BUS_DMA_NOWAIT);
 1390         if (error != 0 || nsegs != 1 ) {
 1391                 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
 1392                 return ((error != 0) ? error : -1);
 1393         }
 1394 
 1395         bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
 1396 
 1397         /* Everything is ok, now we can send buffers */
 1398         for (seg = 0; seg < nsegs; seg++) {
 1399                 dw->mge_desc->byte_count = segs[seg].ds_len;
 1400                 dw->mge_desc->buffer = segs[seg].ds_addr;
 1401                 dw->buffer = m0;
 1402                 dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
 1403                     MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
 1404                     MGE_DMA_OWNED;
 1405 
 1406                 if (seg == 0)
 1407                         mge_offload_setup_descriptor(sc, dw);
 1408         }
 1409 
 1410         bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
 1411             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1412 
 1413         sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
 1414         sc->tx_desc_used_count++;
 1415         return (0);
 1416 }
 1417 
 1418 static void
 1419 mge_tick(void *msc)
 1420 {
 1421         struct mge_softc *sc = msc;
 1422 
 1423         /* Check for TX timeout */
 1424         mge_watchdog(sc);
 1425 
 1426         mii_tick(sc->mii);
 1427 
 1428         /* Check for media type change */
 1429         if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
 1430                 mge_ifmedia_upd(sc->ifp);
 1431 
 1432         /* Schedule another timeout one second from now */
 1433         callout_reset(&sc->wd_callout, hz, mge_tick, sc);
 1434 }
 1435 
 1436 static void
 1437 mge_watchdog(struct mge_softc *sc)
 1438 {
 1439         struct ifnet *ifp;
 1440 
 1441         ifp = sc->ifp;
 1442 
 1443         MGE_GLOBAL_LOCK(sc);
 1444 
 1445         if (sc->wd_timer == 0 || --sc->wd_timer) {
 1446                 MGE_GLOBAL_UNLOCK(sc);
 1447                 return;
 1448         }
 1449 
 1450         ifp->if_oerrors++;
 1451         if_printf(ifp, "watchdog timeout\n");
 1452 
 1453         mge_stop(sc);
 1454         mge_init_locked(sc);
 1455 
 1456         MGE_GLOBAL_UNLOCK(sc);
 1457 }
 1458 
 1459 static void
 1460 mge_start(struct ifnet *ifp)
 1461 {
 1462         struct mge_softc *sc = ifp->if_softc;
 1463 
 1464         MGE_TRANSMIT_LOCK(sc);
 1465 
 1466         mge_start_locked(ifp);
 1467 
 1468         MGE_TRANSMIT_UNLOCK(sc);
 1469 }
 1470 
 1471 static void
 1472 mge_start_locked(struct ifnet *ifp)
 1473 {
 1474         struct mge_softc *sc;
 1475         struct mbuf *m0, *mtmp;
 1476         uint32_t reg_val, queued = 0;
 1477 
 1478         sc = ifp->if_softc;
 1479 
 1480         MGE_TRANSMIT_LOCK_ASSERT(sc);
 1481 
 1482         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1483             IFF_DRV_RUNNING)
 1484                 return;
 1485 
 1486         for (;;) {
 1487                 /* Get packet from the queue */
 1488                 IF_DEQUEUE(&ifp->if_snd, m0);
 1489                 if (m0 == NULL)
 1490                         break;
 1491 
 1492                 mtmp = m_defrag(m0, M_NOWAIT);
 1493                 if (mtmp)
 1494                         m0 = mtmp;
 1495 
 1496                 if (mge_encap(sc, m0)) {
 1497                         IF_PREPEND(&ifp->if_snd, m0);
 1498                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1499                         break;
 1500                 }
 1501                 queued++;
 1502                 BPF_MTAP(ifp, m0);
 1503         }
 1504 
 1505         if (queued) {
 1506                 /* Enable transmitter and watchdog timer */
 1507                 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
 1508                 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
 1509                 sc->wd_timer = 5;
 1510         }
 1511 }
 1512 
 1513 static void
 1514 mge_stop(struct mge_softc *sc)
 1515 {
 1516         struct ifnet *ifp;
 1517         volatile uint32_t reg_val, status;
 1518         struct mge_desc_wrapper *dw;
 1519         struct mge_desc *desc;
 1520         int count;
 1521 
 1522         ifp = sc->ifp;
 1523 
 1524         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 1525                 return;
 1526 
 1527         /* Stop tick engine */
 1528         callout_stop(&sc->wd_callout);
 1529 
 1530         /* Disable interface */
 1531         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1532         sc->wd_timer = 0;
 1533 
 1534         /* Disable interrupts */
 1535         mge_intrs_ctrl(sc, 0);
 1536 
 1537         /* Disable Rx and Tx */
 1538         reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
 1539         MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
 1540         MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
 1541 
 1542         /* Remove pending data from TX queue */
 1543         while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
 1544             sc->tx_desc_used_count) {
 1545                 /* Get the descriptor */
 1546                 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
 1547                 desc = dw->mge_desc;
 1548                 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
 1549                     BUS_DMASYNC_POSTREAD);
 1550 
 1551                 /* Get descriptor status */
 1552                 status = desc->cmd_status;
 1553 
 1554                 if (status & MGE_DMA_OWNED)
 1555                         break;
 1556 
 1557                 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
 1558                     MGE_TX_DESC_NUM;
 1559                 sc->tx_desc_used_count--;
 1560 
 1561                 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
 1562                     BUS_DMASYNC_POSTWRITE);
 1563                 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
 1564 
 1565                 m_freem(dw->buffer);
 1566                 dw->buffer = (struct mbuf*)NULL;
 1567         }
 1568 
 1569         /* Wait for end of transmission */
 1570         count = 0x100000;
 1571         while (count--) {
 1572                 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
 1573                 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
 1574                     (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
 1575                         break;
 1576                 DELAY(100);
 1577         }
 1578 
 1579         if(!count)
 1580                 if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
 1581                     __FUNCTION__);
 1582 
 1583         reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
 1584         reg_val &= ~(PORT_SERIAL_ENABLE);
 1585         MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
 1586 }
 1587 
 1588 static int
 1589 mge_suspend(device_t dev)
 1590 {
 1591 
 1592         device_printf(dev, "%s\n", __FUNCTION__);
 1593         return (0);
 1594 }
 1595 
 1596 static void
 1597 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
 1598     uint32_t status, uint16_t bufsize)
 1599 {
 1600         int csum_flags = 0;
 1601 
 1602         if (ifp->if_capenable & IFCAP_RXCSUM) {
 1603                 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
 1604                         csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
 1605 
 1606                 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
 1607                     (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
 1608                     (status & MGE_RX_L4_CSUM_OK)) {
 1609                         csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
 1610                         frame->m_pkthdr.csum_data = 0xFFFF;
 1611                 }
 1612 
 1613                 frame->m_pkthdr.csum_flags = csum_flags;
 1614         }
 1615 }
 1616 
 1617 static void
 1618 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
 1619 {
 1620         struct mbuf *m0 = dw->buffer;
 1621         struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
 1622         int csum_flags = m0->m_pkthdr.csum_flags;
 1623         int cmd_status = 0;
 1624         struct ip *ip;
 1625         int ehlen, etype;
 1626 
 1627         if (csum_flags) {
 1628                 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 1629                         etype = ntohs(eh->evl_proto);
 1630                         ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 1631                         csum_flags |= MGE_TX_VLAN_TAGGED;
 1632                 } else {
 1633                         etype = ntohs(eh->evl_encap_proto);
 1634                         ehlen = ETHER_HDR_LEN;
 1635                 }
 1636 
 1637                 if (etype != ETHERTYPE_IP) {
 1638                         if_printf(sc->ifp,
 1639                             "TCP/IP Offload enabled for unsupported "
 1640                             "protocol!\n");
 1641                         return;
 1642                 }
 1643 
 1644                 ip = (struct ip *)(m0->m_data + ehlen);
 1645                 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
 1646 
 1647                 if ((m0->m_flags & M_FRAG) == 0)
 1648                         cmd_status |= MGE_TX_NOT_FRAGMENT;
 1649         }
 1650 
 1651         if (csum_flags & CSUM_IP)
 1652                 cmd_status |= MGE_TX_GEN_IP_CSUM;
 1653 
 1654         if (csum_flags & CSUM_TCP)
 1655                 cmd_status |= MGE_TX_GEN_L4_CSUM;
 1656 
 1657         if (csum_flags & CSUM_UDP)
 1658                 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
 1659 
 1660         dw->mge_desc->cmd_status |= cmd_status;
 1661 }
 1662 
 1663 static void
 1664 mge_intrs_ctrl(struct mge_softc *sc, int enable)
 1665 {
 1666 
 1667         if (enable) {
 1668                 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
 1669                     MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
 1670                 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
 1671                     MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
 1672                     MGE_PORT_INT_EXT_TXBUF0);
 1673         } else {
 1674                 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
 1675                 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
 1676 
 1677                 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
 1678                 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
 1679 
 1680                 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
 1681                 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
 1682         }
 1683 }
 1684 
 1685 static uint8_t
 1686 mge_crc8(uint8_t *data, int size)
 1687 {
 1688         uint8_t crc = 0;
 1689         static const uint8_t ct[256] = {
 1690                 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
 1691                 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
 1692                 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
 1693                 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
 1694                 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
 1695                 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
 1696                 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
 1697                 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
 1698                 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
 1699                 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
 1700                 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
 1701                 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
 1702                 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
 1703                 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
 1704                 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
 1705                 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
 1706                 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
 1707                 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
 1708                 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
 1709                 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
 1710                 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
 1711                 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
 1712                 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
 1713                 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
 1714                 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
 1715                 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
 1716                 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
 1717                 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
 1718                 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
 1719                 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
 1720                 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
 1721                 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
 1722         };
 1723 
 1724         while(size--)
 1725                 crc = ct[crc ^ *(data++)];
 1726 
 1727         return(crc);
 1728 }
 1729 
 1730 static void
 1731 mge_setup_multicast(struct mge_softc *sc)
 1732 {
 1733         uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
 1734         uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
 1735         uint32_t smt[MGE_MCAST_REG_NUMBER];
 1736         uint32_t omt[MGE_MCAST_REG_NUMBER];
 1737         struct ifnet *ifp = sc->ifp;
 1738         struct ifmultiaddr *ifma;
 1739         uint8_t *mac;
 1740         int i;
 1741 
 1742         if (ifp->if_flags & IFF_ALLMULTI) {
 1743                 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
 1744                         smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
 1745         } else {
 1746                 memset(smt, 0, sizeof(smt));
 1747                 memset(omt, 0, sizeof(omt));
 1748 
 1749                 if_maddr_rlock(ifp);
 1750                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 1751                         if (ifma->ifma_addr->sa_family != AF_LINK)
 1752                                 continue;
 1753 
 1754                         mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
 1755                         if (memcmp(mac, special, sizeof(special)) == 0) {
 1756                                 i = mac[5];
 1757                                 smt[i >> 2] |= v << ((i & 0x03) << 3);
 1758                         } else {
 1759                                 i = mge_crc8(mac, ETHER_ADDR_LEN);
 1760                                 omt[i >> 2] |= v << ((i & 0x03) << 3);
 1761                         }
 1762                 }
 1763                 if_maddr_runlock(ifp);
 1764         }
 1765 
 1766         for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
 1767                 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
 1768                 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
 1769         }
 1770 }
 1771 
 1772 static void
 1773 mge_set_rxic(struct mge_softc *sc)
 1774 {
 1775         uint32_t reg;
 1776 
 1777         if (sc->rx_ic_time > sc->mge_rx_ipg_max)
 1778                 sc->rx_ic_time = sc->mge_rx_ipg_max;
 1779 
 1780         reg = MGE_READ(sc, MGE_SDMA_CONFIG);
 1781         reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
 1782         reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
 1783         MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
 1784 }
 1785 
 1786 static void
 1787 mge_set_txic(struct mge_softc *sc)
 1788 {
 1789         uint32_t reg;
 1790 
 1791         if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
 1792                 sc->tx_ic_time = sc->mge_tfut_ipg_max;
 1793 
 1794         reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
 1795         reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
 1796         reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
 1797         MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
 1798 }
 1799 
 1800 static int
 1801 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
 1802 {
 1803         struct mge_softc *sc = (struct mge_softc *)arg1;
 1804         uint32_t time;
 1805         int error;
 1806 
 1807         time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; 
 1808         error = sysctl_handle_int(oidp, &time, 0, req);
 1809         if (error != 0)
 1810                 return(error);
 1811 
 1812         MGE_GLOBAL_LOCK(sc);
 1813         if (arg2 == MGE_IC_RX) {
 1814                 sc->rx_ic_time = time;
 1815                 mge_set_rxic(sc);
 1816         } else {
 1817                 sc->tx_ic_time = time;
 1818                 mge_set_txic(sc);
 1819         }
 1820         MGE_GLOBAL_UNLOCK(sc);
 1821 
 1822         return(0);
 1823 }
 1824 
 1825 static void
 1826 mge_add_sysctls(struct mge_softc *sc)
 1827 {
 1828         struct sysctl_ctx_list *ctx;
 1829         struct sysctl_oid_list *children;
 1830         struct sysctl_oid *tree;
 1831 
 1832         ctx = device_get_sysctl_ctx(sc->dev);
 1833         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 1834         tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
 1835             CTLFLAG_RD, 0, "MGE Interrupts coalescing");
 1836         children = SYSCTL_CHILDREN(tree);
 1837 
 1838         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
 1839             CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
 1840             "I", "IC RX time threshold");
 1841         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
 1842             CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
 1843             "I", "IC TX time threshold");
 1844 }

Cache object: 66445be32eb9eafcbf90b3e8a529b52b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.