The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/ps3/if_glc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (C) 2010 Nathan Whitehorn
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   19  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   20  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   22  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   23  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   24  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   25  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   26  *
   27  * $FreeBSD$
   28  */
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/sockio.h>
   33 #include <sys/endian.h>
   34 #include <sys/lock.h>
   35 #include <sys/mbuf.h>
   36 #include <sys/module.h>
   37 #include <sys/malloc.h>
   38 #include <sys/mutex.h>
   39 #include <sys/kernel.h>
   40 #include <sys/socket.h>
   41 
   42 #include <vm/vm.h>
   43 #include <vm/pmap.h>
   44 
   45 #include <net/bpf.h>
   46 #include <net/if.h>
   47 #include <net/if_var.h>
   48 #include <net/ethernet.h>
   49 #include <net/if_media.h>
   50 #include <net/if_types.h>
   51 #include <net/if_dl.h>
   52 
   53 #include <machine/pio.h>
   54 #include <machine/bus.h>
   55 #include <machine/platform.h>
   56 #include <machine/resource.h>
   57 #include <sys/bus.h>
   58 #include <sys/rman.h>
   59 
   60 #include "ps3bus.h"
   61 #include "ps3-hvcall.h"
   62 #include "if_glcreg.h"
   63 
   64 static int      glc_probe(device_t);
   65 static int      glc_attach(device_t);
   66 static void     glc_init(void *xsc);
   67 static void     glc_start(struct ifnet *ifp);
   68 static int      glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
   69 static void     glc_set_multicast(struct glc_softc *sc);
   70 static int      glc_add_rxbuf(struct glc_softc *sc, int idx);
   71 static int      glc_add_rxbuf_dma(struct glc_softc *sc, int idx);
   72 static int      glc_encap(struct glc_softc *sc, struct mbuf **m_head,
   73                     bus_addr_t *pktdesc);
   74 static int      glc_intr_filter(void *xsc);
   75 static void     glc_intr(void *xsc);
   76 static void     glc_tick(void *xsc);
   77 static void     glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
   78 static int      glc_media_change(struct ifnet *ifp);
   79 
   80 static MALLOC_DEFINE(M_GLC, "gelic", "PS3 GELIC ethernet");
   81 
   82 static device_method_t glc_methods[] = {
   83         /* Device interface */
   84         DEVMETHOD(device_probe,         glc_probe),
   85         DEVMETHOD(device_attach,        glc_attach),
   86         { 0, 0 }
   87 };
   88 
   89 static driver_t glc_driver = {
   90         "glc",
   91         glc_methods,
   92         sizeof(struct glc_softc)
   93 };
   94 
   95 DRIVER_MODULE(glc, ps3bus, glc_driver, 0, 0);
   96 
   97 static int 
   98 glc_probe(device_t dev) 
   99 {
  100 
  101         if (ps3bus_get_bustype(dev) != PS3_BUSTYPE_SYSBUS ||
  102             ps3bus_get_devtype(dev) != PS3_DEVTYPE_GELIC)
  103                 return (ENXIO);
  104 
  105         device_set_desc(dev, "Playstation 3 GELIC Network Controller");
  106         return (BUS_PROBE_SPECIFIC);
  107 }
  108 
  109 static void
  110 glc_getphys(void *xaddr, bus_dma_segment_t *segs, int nsegs, int error)
  111 {
  112         if (error != 0)
  113                 return;
  114 
  115         *(bus_addr_t *)xaddr = segs[0].ds_addr;
  116 }
  117 
  118 static int 
  119 glc_attach(device_t dev) 
  120 {
  121         struct glc_softc *sc;
  122         struct glc_txsoft *txs;
  123         uint64_t mac64, val, junk;
  124         int i, err;
  125 
  126         sc = device_get_softc(dev);
  127 
  128         sc->sc_bus = ps3bus_get_bus(dev);
  129         sc->sc_dev = ps3bus_get_device(dev);
  130         sc->sc_self = dev;
  131 
  132         mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  133             MTX_DEF);
  134         callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
  135         sc->next_txdma_slot = 0;
  136         sc->bsy_txdma_slots = 0;
  137         sc->sc_next_rxdma_slot = 0;
  138         sc->first_used_txdma_slot = -1;
  139 
  140         /*
  141          * Shut down existing tasks.
  142          */
  143 
  144         lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
  145         lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
  146 
  147         sc->sc_ifp = if_alloc(IFT_ETHER);
  148         sc->sc_ifp->if_softc = sc;
  149 
  150         /*
  151          * Get MAC address and VLAN id
  152          */
  153 
  154         lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_MAC_ADDRESS,
  155             0, 0, 0, &mac64, &junk);
  156         memcpy(sc->sc_enaddr, &((uint8_t *)&mac64)[2], sizeof(sc->sc_enaddr));
  157         sc->sc_tx_vlan = sc->sc_rx_vlan = -1;
  158         err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
  159             GELIC_VLAN_TX_ETHERNET, 0, 0, &val, &junk);
  160         if (err == 0)
  161                 sc->sc_tx_vlan = val;
  162         err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
  163             GELIC_VLAN_RX_ETHERNET, 0, 0, &val, &junk);
  164         if (err == 0)
  165                 sc->sc_rx_vlan = val;
  166 
  167         /*
  168          * Set up interrupt handler
  169          */
  170         sc->sc_irqid = 0;
  171         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqid,
  172             RF_ACTIVE);
  173         if (sc->sc_irq == NULL) {
  174                 device_printf(dev, "Could not allocate IRQ!\n");
  175                 mtx_destroy(&sc->sc_mtx);
  176                 return (ENXIO);
  177         }
  178 
  179         bus_setup_intr(dev, sc->sc_irq,
  180             INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY,
  181             glc_intr_filter, glc_intr, sc, &sc->sc_irqctx);
  182         sc->sc_hwirq_status = (uint64_t *)contigmalloc(8, M_GLC, M_ZERO, 0,
  183             BUS_SPACE_MAXADDR_32BIT, 8, PAGE_SIZE);
  184         lv1_net_set_interrupt_status_indicator(sc->sc_bus, sc->sc_dev,
  185             vtophys(sc->sc_hwirq_status), 0);
  186         lv1_net_set_interrupt_mask(sc->sc_bus, sc->sc_dev,
  187             GELIC_INT_RXDONE | GELIC_INT_RXFRAME | GELIC_INT_PHY |
  188             GELIC_INT_TX_CHAIN_END, 0);
  189 
  190         /*
  191          * Set up DMA.
  192          */
  193 
  194         err = bus_dma_tag_create(bus_get_dma_tag(dev), 32, 0,
  195             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
  196             129*sizeof(struct glc_dmadesc), 1, 128*sizeof(struct glc_dmadesc),
  197             0, NULL,NULL, &sc->sc_dmadesc_tag);
  198 
  199         err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_txdmadesc,
  200             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
  201             &sc->sc_txdmadesc_map);
  202         err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
  203             sc->sc_txdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
  204             &sc->sc_txdmadesc_phys, 0);
  205         err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_rxdmadesc,
  206             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
  207             &sc->sc_rxdmadesc_map);
  208         err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
  209             sc->sc_rxdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
  210             &sc->sc_rxdmadesc_phys, 0);
  211 
  212         err = bus_dma_tag_create(bus_get_dma_tag(dev), 128, 0,
  213             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
  214             BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
  215             &sc->sc_rxdma_tag);
  216         err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
  217             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
  218             BUS_SPACE_MAXSIZE_32BIT, 16, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
  219             &sc->sc_txdma_tag);
  220 
  221         /* init transmit descriptors */
  222         STAILQ_INIT(&sc->sc_txfreeq);
  223         STAILQ_INIT(&sc->sc_txdirtyq);
  224 
  225         /* create TX DMA maps */
  226         err = ENOMEM;
  227         for (i = 0; i < GLC_MAX_TX_PACKETS; i++) {
  228                 txs = &sc->sc_txsoft[i];
  229                 txs->txs_mbuf = NULL;
  230                 err = bus_dmamap_create(sc->sc_txdma_tag, 0, &txs->txs_dmamap);
  231                 if (err) {
  232                         device_printf(dev,
  233                             "unable to create TX DMA map %d, error = %d\n",
  234                             i, err);
  235                 }
  236                 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
  237         }
  238 
  239         /* Create the receive buffer DMA maps. */
  240         for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
  241                 err = bus_dmamap_create(sc->sc_rxdma_tag, 0,
  242                     &sc->sc_rxsoft[i].rxs_dmamap);
  243                 if (err) {
  244                         device_printf(dev,
  245                             "unable to create RX DMA map %d, error = %d\n",
  246                             i, err);
  247                 }
  248                 sc->sc_rxsoft[i].rxs_mbuf = NULL;
  249         }
  250 
  251         /*
  252          * Attach to network stack
  253          */
  254 
  255         if_initname(sc->sc_ifp, device_get_name(dev), device_get_unit(dev));
  256         sc->sc_ifp->if_mtu = ETHERMTU;
  257         sc->sc_ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  258         sc->sc_ifp->if_hwassist = CSUM_TCP | CSUM_UDP;
  259         sc->sc_ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_RXCSUM;
  260         sc->sc_ifp->if_capenable = IFCAP_HWCSUM | IFCAP_RXCSUM;
  261         sc->sc_ifp->if_start = glc_start;
  262         sc->sc_ifp->if_ioctl = glc_ioctl;
  263         sc->sc_ifp->if_init = glc_init;
  264 
  265         ifmedia_init(&sc->sc_media, IFM_IMASK, glc_media_change,
  266             glc_media_status);
  267         ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
  268         ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
  269         ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
  270         ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
  271         ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
  272         ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
  273         ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
  274 
  275         IFQ_SET_MAXLEN(&sc->sc_ifp->if_snd, GLC_MAX_TX_PACKETS);
  276         sc->sc_ifp->if_snd.ifq_drv_maxlen = GLC_MAX_TX_PACKETS;
  277         IFQ_SET_READY(&sc->sc_ifp->if_snd);
  278 
  279         ether_ifattach(sc->sc_ifp, sc->sc_enaddr);
  280         sc->sc_ifp->if_hwassist = 0;
  281 
  282         return (0);
  283 
  284         mtx_destroy(&sc->sc_mtx);
  285         if_free(sc->sc_ifp);
  286         return (ENXIO);
  287 }
  288 
  289 static void
  290 glc_init_locked(struct glc_softc *sc)
  291 {
  292         int i, error;
  293         struct glc_rxsoft *rxs;
  294         struct glc_txsoft *txs;
  295 
  296         mtx_assert(&sc->sc_mtx, MA_OWNED);
  297 
  298         lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
  299         lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
  300 
  301         glc_set_multicast(sc);
  302 
  303         for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
  304                 rxs = &sc->sc_rxsoft[i];
  305                 rxs->rxs_desc_slot = i;
  306 
  307                 if (rxs->rxs_mbuf == NULL) {
  308                         glc_add_rxbuf(sc, i);
  309 
  310                         if (rxs->rxs_mbuf == NULL) {
  311                                 rxs->rxs_desc_slot = -1;
  312                                 break;
  313                         }
  314                 }
  315 
  316                 glc_add_rxbuf_dma(sc, i);
  317                 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
  318                     BUS_DMASYNC_PREREAD);
  319         }
  320 
  321         /* Clear TX dirty queue */
  322         while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
  323                 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
  324                 bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
  325 
  326                 if (txs->txs_mbuf != NULL) {
  327                         m_freem(txs->txs_mbuf);
  328                         txs->txs_mbuf = NULL;
  329                 }
  330 
  331                 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
  332         }
  333         sc->first_used_txdma_slot = -1;
  334         sc->bsy_txdma_slots = 0;
  335 
  336         error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
  337             sc->sc_rxsoft[0].rxs_desc, 0);
  338         if (error != 0)
  339                 device_printf(sc->sc_self,
  340                     "lv1_net_start_rx_dma error: %d\n", error);
  341 
  342         sc->sc_ifp->if_drv_flags |= IFF_DRV_RUNNING;
  343         sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  344         sc->sc_ifpflags = sc->sc_ifp->if_flags;
  345 
  346         sc->sc_wdog_timer = 0;
  347         callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
  348 }
  349 
  350 static void
  351 glc_stop(void *xsc)
  352 {
  353         struct glc_softc *sc = xsc;
  354 
  355         mtx_assert(&sc->sc_mtx, MA_OWNED);
  356 
  357         lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
  358         lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
  359 }
  360 
  361 static void
  362 glc_init(void *xsc)
  363 {
  364         struct glc_softc *sc = xsc;
  365 
  366         mtx_lock(&sc->sc_mtx);
  367         glc_init_locked(sc);
  368         mtx_unlock(&sc->sc_mtx);
  369 }
  370 
  371 static void
  372 glc_tick(void *xsc)
  373 {
  374         struct glc_softc *sc = xsc;
  375 
  376         mtx_assert(&sc->sc_mtx, MA_OWNED);
  377 
  378         /*
  379          * XXX: Sometimes the RX queue gets stuck. Poke it periodically until
  380          * we figure out why. This will fail harmlessly if the RX queue is
  381          * already running.
  382          */
  383         lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
  384             sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0);
  385 
  386         if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) {
  387                 callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
  388                 return;
  389         }
  390 
  391         /* Problems */
  392         device_printf(sc->sc_self, "device timeout\n");
  393 
  394         glc_init_locked(sc);
  395 }
  396 
  397 static void
  398 glc_start_locked(struct ifnet *ifp)
  399 {
  400         struct glc_softc *sc = ifp->if_softc;
  401         bus_addr_t first, pktdesc;
  402         int kickstart = 0;
  403         int error;
  404         struct mbuf *mb_head;
  405 
  406         mtx_assert(&sc->sc_mtx, MA_OWNED);
  407         first = 0;
  408 
  409         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
  410             IFF_DRV_RUNNING)
  411                 return;
  412 
  413         if (STAILQ_EMPTY(&sc->sc_txdirtyq))
  414                 kickstart = 1;
  415 
  416         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
  417                 IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
  418 
  419                 if (mb_head == NULL)
  420                         break;
  421 
  422                 /* Check if the ring buffer is full */
  423                 if (sc->bsy_txdma_slots > 125) {
  424                         /* Put the packet back and stop */
  425                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  426                         IFQ_DRV_PREPEND(&ifp->if_snd, mb_head);
  427                         break;
  428                 }
  429 
  430                 BPF_MTAP(ifp, mb_head);
  431 
  432                 if (sc->sc_tx_vlan >= 0)
  433                         mb_head = ether_vlanencap(mb_head, sc->sc_tx_vlan);
  434 
  435                 if (glc_encap(sc, &mb_head, &pktdesc)) {
  436                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  437                         break;
  438                 }
  439 
  440                 if (first == 0)
  441                         first = pktdesc;
  442         }
  443 
  444         if (kickstart && first != 0) {
  445                 error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev, first, 0);
  446                 if (error != 0)
  447                         device_printf(sc->sc_self,
  448                             "lv1_net_start_tx_dma error: %d\n", error);
  449                 sc->sc_wdog_timer = 5;
  450         }
  451 }
  452 
  453 static void
  454 glc_start(struct ifnet *ifp)
  455 {
  456         struct glc_softc *sc = ifp->if_softc;
  457 
  458         mtx_lock(&sc->sc_mtx);
  459         glc_start_locked(ifp);
  460         mtx_unlock(&sc->sc_mtx);
  461 }
  462 
  463 static int
  464 glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  465 {
  466         struct glc_softc *sc = ifp->if_softc;
  467         struct ifreq *ifr = (struct ifreq *)data;
  468         int err = 0;
  469 
  470         switch (cmd) {
  471         case SIOCSIFFLAGS:
  472                 mtx_lock(&sc->sc_mtx);
  473                 if ((ifp->if_flags & IFF_UP) != 0) {
  474                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
  475                            ((ifp->if_flags ^ sc->sc_ifpflags) &
  476                             (IFF_ALLMULTI | IFF_PROMISC)) != 0)
  477                                 glc_set_multicast(sc);
  478                         else
  479                                 glc_init_locked(sc);
  480                 }
  481                 else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
  482                         glc_stop(sc);
  483                 sc->sc_ifpflags = ifp->if_flags;
  484                 mtx_unlock(&sc->sc_mtx);
  485                 break;
  486         case SIOCADDMULTI:
  487         case SIOCDELMULTI:
  488                 mtx_lock(&sc->sc_mtx);
  489                 glc_set_multicast(sc);
  490                 mtx_unlock(&sc->sc_mtx);
  491                 break;
  492         case SIOCGIFMEDIA:
  493         case SIOCSIFMEDIA:
  494                 err = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
  495                 break;
  496         default:
  497                 err = ether_ioctl(ifp, cmd, data);
  498                 break;
  499         }
  500 
  501         return (err);
  502 }
  503 
  504 static u_int
  505 glc_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
  506 {
  507         struct glc_softc *sc = arg;
  508         uint64_t addr;
  509 
  510         /*
  511          * Filter can only hold 32 addresses, so fall back to
  512          * the IFF_ALLMULTI case if we have too many. +1 is for
  513          * broadcast.
  514          */
  515         if (cnt + 1 == 32)
  516                 return (0);
  517 
  518         addr = 0;
  519         memcpy(&((uint8_t *)(&addr))[2], LLADDR(sdl), ETHER_ADDR_LEN);
  520         lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, addr, 0);
  521 
  522         return (1);
  523 }
  524 
  525 static void
  526 glc_set_multicast(struct glc_softc *sc)
  527 {
  528         struct ifnet *ifp = sc->sc_ifp;
  529         int naddrs;
  530 
  531         /* Clear multicast filter */
  532         lv1_net_remove_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
  533 
  534         /* Add broadcast */
  535         lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
  536             0xffffffffffffL, 0);
  537 
  538         if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
  539                 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
  540         } else {
  541                 naddrs = if_foreach_llmaddr(ifp, glc_add_maddr, sc);
  542                 if (naddrs + 1 == 32)
  543                         lv1_net_add_multicast_address(sc->sc_bus,
  544                             sc->sc_dev, 0, 1);
  545         }
  546 }
  547 
  548 static int
  549 glc_add_rxbuf(struct glc_softc *sc, int idx)
  550 {
  551         struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
  552         struct mbuf *m;
  553         bus_dma_segment_t segs[1];
  554         int error, nsegs;
  555                         
  556         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
  557         if (m == NULL)
  558                 return (ENOBUFS);
  559         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
  560 
  561         if (rxs->rxs_mbuf != NULL) {
  562                 bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap,
  563                     BUS_DMASYNC_POSTREAD);
  564                 bus_dmamap_unload(sc->sc_rxdma_tag, rxs->rxs_dmamap);
  565         }
  566 
  567         error = bus_dmamap_load_mbuf_sg(sc->sc_rxdma_tag, rxs->rxs_dmamap, m,
  568             segs, &nsegs, BUS_DMA_NOWAIT);
  569         if (error != 0) {
  570                 device_printf(sc->sc_self,
  571                     "cannot load RS DMA map %d, error = %d\n", idx, error);
  572                 m_freem(m);
  573                 return (error);
  574         }
  575         /* If nsegs is wrong then the stack is corrupt. */
  576         KASSERT(nsegs == 1,
  577             ("%s: too many DMA segments (%d)", __func__, nsegs));
  578         rxs->rxs_mbuf = m;
  579         rxs->segment = segs[0];
  580 
  581         bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
  582 
  583         return (0);
  584 }
  585 
  586 static int
  587 glc_add_rxbuf_dma(struct glc_softc *sc, int idx)
  588 {
  589         struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
  590 
  591         bzero(&sc->sc_rxdmadesc[idx], sizeof(sc->sc_rxdmadesc[idx]));
  592         sc->sc_rxdmadesc[idx].paddr = rxs->segment.ds_addr;
  593         sc->sc_rxdmadesc[idx].len = rxs->segment.ds_len;
  594         sc->sc_rxdmadesc[idx].next = sc->sc_rxdmadesc_phys +
  595             ((idx + 1) % GLC_MAX_RX_PACKETS)*sizeof(sc->sc_rxdmadesc[idx]);
  596         sc->sc_rxdmadesc[idx].cmd_stat = GELIC_DESCR_OWNED;
  597 
  598         rxs->rxs_desc_slot = idx;
  599         rxs->rxs_desc = sc->sc_rxdmadesc_phys + idx*sizeof(struct glc_dmadesc);
  600 
  601         return (0);
  602 }
  603 
  604 static int
  605 glc_encap(struct glc_softc *sc, struct mbuf **m_head, bus_addr_t *pktdesc)
  606 {
  607         bus_dma_segment_t segs[16];
  608         struct glc_txsoft *txs;
  609         struct mbuf *m;
  610         bus_addr_t firstslotphys;
  611         int i, idx, nsegs, nsegs_max;
  612         int err = 0;
  613 
  614         /* Max number of segments is the number of free DMA slots */
  615         nsegs_max = 128 - sc->bsy_txdma_slots;
  616 
  617         if (nsegs_max > 16 || sc->first_used_txdma_slot < 0)
  618                 nsegs_max = 16;
  619 
  620         /* Get a work queue entry. */
  621         if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
  622                 /* Ran out of descriptors. */
  623                 return (ENOBUFS);
  624         }
  625 
  626         nsegs = 0;
  627         for (m = *m_head; m != NULL; m = m->m_next)
  628                 nsegs++;
  629 
  630         if (nsegs > nsegs_max) {
  631                 m = m_collapse(*m_head, M_NOWAIT, nsegs_max);
  632                 if (m == NULL) {
  633                         m_freem(*m_head);
  634                         *m_head = NULL;
  635                         return (ENOBUFS);
  636                 }
  637                 *m_head = m;
  638         }
  639 
  640         err = bus_dmamap_load_mbuf_sg(sc->sc_txdma_tag, txs->txs_dmamap,
  641             *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
  642         if (err != 0) {
  643                 m_freem(*m_head);
  644                 *m_head = NULL;
  645                 return (err);
  646         }
  647 
  648         KASSERT(nsegs <= 128 - sc->bsy_txdma_slots,
  649             ("GLC: Mapped too many (%d) DMA segments with %d available",
  650             nsegs, 128 - sc->bsy_txdma_slots));
  651 
  652         if (nsegs == 0) {
  653                 m_freem(*m_head);
  654                 *m_head = NULL;
  655                 return (EIO);
  656         }
  657 
  658         txs->txs_ndescs = nsegs;
  659         txs->txs_firstdesc = sc->next_txdma_slot;
  660 
  661         idx = txs->txs_firstdesc;
  662         firstslotphys = sc->sc_txdmadesc_phys +
  663             txs->txs_firstdesc*sizeof(struct glc_dmadesc);
  664 
  665         for (i = 0; i < nsegs; i++) {
  666                 bzero(&sc->sc_txdmadesc[idx], sizeof(sc->sc_txdmadesc[idx]));
  667                 sc->sc_txdmadesc[idx].paddr = segs[i].ds_addr;
  668                 sc->sc_txdmadesc[idx].len = segs[i].ds_len;
  669                 sc->sc_txdmadesc[idx].next = sc->sc_txdmadesc_phys +
  670                     ((idx + 1) % GLC_MAX_TX_PACKETS)*sizeof(struct glc_dmadesc);
  671                 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_NOIPSEC;
  672 
  673                 if (i+1 == nsegs) {
  674                         txs->txs_lastdesc = idx;
  675                         sc->sc_txdmadesc[idx].next = 0;
  676                         sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_LAST;
  677                 }
  678 
  679                 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
  680                         sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_TCP;
  681                 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
  682                         sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_UDP;
  683                 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_DESCR_OWNED;
  684 
  685                 idx = (idx + 1) % GLC_MAX_TX_PACKETS;
  686         }
  687         sc->next_txdma_slot = idx;
  688         sc->bsy_txdma_slots += nsegs;
  689         if (txs->txs_firstdesc != 0)
  690                 idx = txs->txs_firstdesc - 1;
  691         else
  692                 idx = GLC_MAX_TX_PACKETS - 1;
  693 
  694         if (sc->first_used_txdma_slot < 0)
  695                 sc->first_used_txdma_slot = txs->txs_firstdesc;
  696 
  697         bus_dmamap_sync(sc->sc_txdma_tag, txs->txs_dmamap,
  698             BUS_DMASYNC_PREWRITE);
  699         sc->sc_txdmadesc[idx].next = firstslotphys;
  700 
  701         STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
  702         STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
  703         txs->txs_mbuf = *m_head;
  704         *pktdesc = firstslotphys;
  705 
  706         return (0);
  707 }
  708 
  709 static void
  710 glc_rxintr(struct glc_softc *sc)
  711 {
  712         int i, restart_rxdma, error;
  713         struct mbuf *m;
  714         struct ifnet *ifp = sc->sc_ifp;
  715 
  716         bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
  717             BUS_DMASYNC_POSTREAD);
  718 
  719         restart_rxdma = 0;
  720         while ((sc->sc_rxdmadesc[sc->sc_next_rxdma_slot].cmd_stat &
  721            GELIC_DESCR_OWNED) == 0) {
  722                 i = sc->sc_next_rxdma_slot;
  723                 sc->sc_next_rxdma_slot++;
  724                 if (sc->sc_next_rxdma_slot >= GLC_MAX_RX_PACKETS)
  725                         sc->sc_next_rxdma_slot = 0;
  726 
  727                 if (sc->sc_rxdmadesc[i].cmd_stat & GELIC_CMDSTAT_CHAIN_END)
  728                         restart_rxdma = 1;
  729 
  730                 if (sc->sc_rxdmadesc[i].rxerror & GELIC_RXERRORS) {
  731                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
  732                         goto requeue;
  733                 }
  734 
  735                 m = sc->sc_rxsoft[i].rxs_mbuf;
  736                 if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_IPCSUM) {
  737                         m->m_pkthdr.csum_flags |=
  738                             CSUM_IP_CHECKED | CSUM_IP_VALID;
  739                 }
  740                 if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_TCPUDPCSUM) {
  741                         m->m_pkthdr.csum_flags |=
  742                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
  743                         m->m_pkthdr.csum_data = 0xffff;
  744                 }
  745 
  746                 if (glc_add_rxbuf(sc, i)) {
  747                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
  748                         goto requeue;
  749                 }
  750 
  751                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
  752                 m->m_pkthdr.rcvif = ifp;
  753                 m->m_len = sc->sc_rxdmadesc[i].valid_size;
  754                 m->m_pkthdr.len = m->m_len;
  755 
  756                 /*
  757                  * Remove VLAN tag. Even on early firmwares that do not allow
  758                  * multiple VLANs, the VLAN tag is still in place here.
  759                  */
  760                 m_adj(m, 2);
  761 
  762                 mtx_unlock(&sc->sc_mtx);
  763                 (*ifp->if_input)(ifp, m);
  764                 mtx_lock(&sc->sc_mtx);
  765 
  766             requeue:
  767                 glc_add_rxbuf_dma(sc, i);       
  768         }
  769 
  770         bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
  771             BUS_DMASYNC_PREWRITE);
  772 
  773         if (restart_rxdma) {
  774                 error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
  775                     sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0);
  776                 if (error != 0)
  777                         device_printf(sc->sc_self,
  778                             "lv1_net_start_rx_dma error: %d\n", error);
  779         }
  780 }
  781 
  782 static void
  783 glc_txintr(struct glc_softc *sc)
  784 {
  785         struct ifnet *ifp = sc->sc_ifp;
  786         struct glc_txsoft *txs;
  787         int progress = 0, kickstart = 0, error;
  788 
  789         bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
  790             BUS_DMASYNC_POSTREAD);
  791 
  792         while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
  793                 if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat
  794                     & GELIC_DESCR_OWNED)
  795                         break;
  796 
  797                 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
  798                 bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
  799                 sc->bsy_txdma_slots -= txs->txs_ndescs;
  800 
  801                 if (txs->txs_mbuf != NULL) {
  802                         m_freem(txs->txs_mbuf);
  803                         txs->txs_mbuf = NULL;
  804                 }
  805 
  806                 if ((sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat & 0xf0000000)
  807                     != 0) {
  808                         lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
  809                         kickstart = 1;
  810                         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
  811                 }
  812 
  813                 if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat &
  814                     GELIC_CMDSTAT_CHAIN_END)
  815                         kickstart = 1;
  816 
  817                 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
  818                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
  819                 progress = 1;
  820         }
  821 
  822         if (txs != NULL)
  823                 sc->first_used_txdma_slot = txs->txs_firstdesc;
  824         else
  825                 sc->first_used_txdma_slot = -1;
  826 
  827         if (kickstart || txs != NULL) {
  828                 /* Speculatively (or necessarily) start the TX queue again */
  829                 error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev,
  830                     sc->sc_txdmadesc_phys +
  831                     ((txs == NULL) ? 0 : txs->txs_firstdesc)*
  832                      sizeof(struct glc_dmadesc), 0);
  833                 if (error != 0)
  834                         device_printf(sc->sc_self,
  835                             "lv1_net_start_tx_dma error: %d\n", error);
  836         }
  837 
  838         if (progress) {
  839                 /*
  840                  * We freed some descriptors, so reset IFF_DRV_OACTIVE
  841                  * and restart.
  842                  */
  843                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  844                 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
  845 
  846                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
  847                     !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
  848                         glc_start_locked(ifp);
  849         }
  850 }
  851 
  852 static int
  853 glc_intr_filter(void *xsc)
  854 {
  855         struct glc_softc *sc = xsc; 
  856 
  857         powerpc_sync();
  858         atomic_set_64(&sc->sc_interrupt_status, *sc->sc_hwirq_status);
  859         return (FILTER_SCHEDULE_THREAD);
  860 }
  861 
  862 static void
  863 glc_intr(void *xsc)
  864 {
  865         struct glc_softc *sc = xsc; 
  866         uint64_t status, linkstat, junk;
  867 
  868         mtx_lock(&sc->sc_mtx);
  869 
  870         status = atomic_readandclear_64(&sc->sc_interrupt_status);
  871 
  872         if (status == 0) {
  873                 mtx_unlock(&sc->sc_mtx);
  874                 return;
  875         }
  876 
  877         if (status & (GELIC_INT_RXDONE | GELIC_INT_RXFRAME))
  878                 glc_rxintr(sc);
  879 
  880         if (status & (GELIC_INT_TXDONE | GELIC_INT_TX_CHAIN_END))
  881                 glc_txintr(sc);
  882 
  883         if (status & GELIC_INT_PHY) {
  884                 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
  885                     GELIC_VLAN_TX_ETHERNET, 0, 0, &linkstat, &junk);
  886 
  887                 linkstat = (linkstat & GELIC_LINK_UP) ?
  888                     LINK_STATE_UP : LINK_STATE_DOWN;
  889                 if (linkstat != sc->sc_ifp->if_link_state)
  890                         if_link_state_change(sc->sc_ifp, linkstat);
  891         }
  892 
  893         mtx_unlock(&sc->sc_mtx);
  894 }
  895 
  896 static void
  897 glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
  898 {
  899         struct glc_softc *sc = ifp->if_softc; 
  900         uint64_t status, junk;
  901 
  902         ifmr->ifm_status = IFM_AVALID;
  903         ifmr->ifm_active = IFM_ETHER;
  904 
  905         lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
  906             GELIC_VLAN_TX_ETHERNET, 0, 0, &status, &junk);
  907 
  908         if (status & GELIC_LINK_UP)
  909                 ifmr->ifm_status |= IFM_ACTIVE;
  910 
  911         if (status & GELIC_SPEED_10)
  912                 ifmr->ifm_active |= IFM_10_T;
  913         else if (status & GELIC_SPEED_100)
  914                 ifmr->ifm_active |= IFM_100_TX;
  915         else if (status & GELIC_SPEED_1000)
  916                 ifmr->ifm_active |= IFM_1000_T;
  917 
  918         if (status & GELIC_FULL_DUPLEX)
  919                 ifmr->ifm_active |= IFM_FDX;
  920         else
  921                 ifmr->ifm_active |= IFM_HDX;
  922 }
  923 
  924 static int
  925 glc_media_change(struct ifnet *ifp)
  926 {
  927         struct glc_softc *sc = ifp->if_softc; 
  928         uint64_t mode, junk;
  929         int result;
  930 
  931         if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
  932                 return (EINVAL);
  933 
  934         switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
  935         case IFM_AUTO:
  936                 mode = GELIC_AUTO_NEG;
  937                 break;
  938         case IFM_10_T:
  939                 mode = GELIC_SPEED_10;
  940                 break;
  941         case IFM_100_TX:
  942                 mode = GELIC_SPEED_100;
  943                 break;
  944         case IFM_1000_T:
  945                 mode = GELIC_SPEED_1000 | GELIC_FULL_DUPLEX;
  946                 break;
  947         default:
  948                 return (EINVAL);
  949         }
  950 
  951         if (IFM_OPTIONS(sc->sc_media.ifm_media) & IFM_FDX)
  952                 mode |= GELIC_FULL_DUPLEX;
  953 
  954         result = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_SET_LINK_MODE,
  955             GELIC_VLAN_TX_ETHERNET, mode, 0, &junk, &junk);
  956 
  957         return (result ? EIO : 0);
  958 }

Cache object: b5a7d409dce0baaaf81aa72887c15ef0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.