The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/adm5120/if_admsw.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* $NetBSD: if_admsw.c,v 1.3 2007/04/22 19:26:25 dyoung Exp $ */
    2 
    3 /*-
    4  * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or
    8  * without modification, are permitted provided that the following
    9  * conditions are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above
   13  *    copyright notice, this list of conditions and the following
   14  *    disclaimer in the documentation and/or other materials provided
   15  *    with the distribution.
   16  * 3. The names of the authors may not be used to endorse or promote
   17  *    products derived from this software without specific prior
   18  *    written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
   21  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
   22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
   23  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
   25  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   26  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
   27  * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
   29  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
   31  * OF SUCH DAMAGE.
   32  */
   33 /*
   34  * Copyright (c) 2001 Wasabi Systems, Inc.
   35  * All rights reserved.
   36  *
   37  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
   38  *
   39  * Redistribution and use in source and binary forms, with or without
   40  * modification, are permitted provided that the following conditions
   41  * are met:
   42  * 1. Redistributions of source code must retain the above copyright
   43  *    notice, this list of conditions and the following disclaimer.
   44  * 2. Redistributions in binary form must reproduce the above copyright
   45  *    notice, this list of conditions and the following disclaimer in the
   46  *    documentation and/or other materials provided with the distribution.
   47  * 3. All advertising materials mentioning features or use of this software
   48  *    must display the following acknowledgement:
   49  *      This product includes software developed for the NetBSD Project by
   50  *      Wasabi Systems, Inc.
   51  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   52  *    or promote products derived from this software without specific prior
   53  *    written permission.
   54  *
   55  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   57  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   58  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   59  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   60  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   61  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   62  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   63  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   64  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   65  * POSSIBILITY OF SUCH DAMAGE.
   66  */
   67 
   68 /*
   69  * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media
   70  * Access Controller.
   71  *
   72  * TODO:
   73  *
   74  *      Better Rx buffer management; we want to get new Rx buffers
   75  *      to the chip more quickly than we currently do.
   76  */
   77 
   78 #include <sys/cdefs.h>
   79 __FBSDID("$FreeBSD: releng/8.2/sys/mips/adm5120/if_admsw.c 215938 2010-11-27 12:26:40Z jchandra $");
   80 
   81 #include <sys/param.h>
   82 #include <sys/systm.h>
   83 #include <sys/bus.h>
   84 #include <sys/kernel.h>
   85 #include <sys/mbuf.h>
   86 #include <sys/malloc.h>
   87 #include <sys/module.h>
   88 #include <sys/rman.h>
   89 #include <sys/socket.h>
   90 #include <sys/sockio.h>
   91 #include <sys/sysctl.h>
   92 #include <machine/bus.h>
   93 
   94 #include <net/ethernet.h>
   95 #include <net/if.h>
   96 #include <net/if_arp.h>
   97 #include <net/if_dl.h>
   98 #include <net/if_media.h>
   99 #include <net/if_mib.h>
  100 #include <net/if_types.h>
  101 
  102 #ifdef INET
  103 #include <netinet/in.h>
  104 #include <netinet/in_systm.h>
  105 #include <netinet/in_var.h>
  106 #include <netinet/ip.h>
  107 #endif
  108 
  109 #include <net/bpf.h>
  110 #include <net/bpfdesc.h>
  111 
  112 #include <mips/adm5120/adm5120reg.h>
  113 #include <mips/adm5120/if_admswreg.h>
  114 #include <mips/adm5120/if_admswvar.h>
  115 
  116 /* TODO: add locking */
  117 #define ADMSW_LOCK(sc) do {} while(0);
  118 #define ADMSW_UNLOCK(sc) do {} while(0);
  119 
  120 static uint8_t vlan_matrix[SW_DEVS] = {
  121         (1 << 6) | (1 << 0),            /* CPU + port0 */
  122         (1 << 6) | (1 << 1),            /* CPU + port1 */
  123         (1 << 6) | (1 << 2),            /* CPU + port2 */
  124         (1 << 6) | (1 << 3),            /* CPU + port3 */
  125         (1 << 6) | (1 << 4),            /* CPU + port4 */
  126         (1 << 6) | (1 << 5),            /* CPU + port5 */
  127 };
  128 
  129 /* ifnet entry points */
  130 static void     admsw_start(struct ifnet *);
  131 static void     admsw_watchdog(void *);
  132 static int      admsw_ioctl(struct ifnet *, u_long, caddr_t);
  133 static void     admsw_init(void *);
  134 static void     admsw_stop(struct ifnet *, int);
  135 
  136 static void     admsw_reset(struct admsw_softc *);
  137 static void     admsw_set_filter(struct admsw_softc *);
  138 
  139 static void     admsw_txintr(struct admsw_softc *, int);
  140 static void     admsw_rxintr(struct admsw_softc *, int);
  141 static int      admsw_add_rxbuf(struct admsw_softc *, int, int);
  142 #define admsw_add_rxhbuf(sc, idx)       admsw_add_rxbuf(sc, idx, 1)
  143 #define admsw_add_rxlbuf(sc, idx)       admsw_add_rxbuf(sc, idx, 0)
  144 
  145 static int      admsw_mediachange(struct ifnet *);
  146 static void     admsw_mediastatus(struct ifnet *, struct ifmediareq *);
  147 
  148 static int      admsw_intr(void *);
  149 
  150 /* bus entry points */
  151 static int      admsw_probe(device_t dev);
  152 static int      admsw_attach(device_t dev);
  153 static int      admsw_detach(device_t dev);
  154 static int      admsw_shutdown(device_t dev);
  155 
  156 static void
  157 admsw_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  158 {
  159         uint32_t *addr;
  160 
  161         if (error)
  162                 return;
  163 
  164         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
  165         addr = arg;
  166         *addr = segs->ds_addr;
  167 }
  168 
  169 static void
  170 admsw_rxbuf_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  171 {
  172         struct admsw_descsoft *ds;
  173 
  174         if (error)
  175                 return;
  176 
  177         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
  178 
  179         ds = arg;
  180         ds->ds_nsegs = nseg;
  181         ds->ds_addr[0] = segs[0].ds_addr;
  182         ds->ds_len[0] = segs[0].ds_len;
  183 
  184 }
  185 
  186 static void
  187 admsw_mbuf_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, 
  188     bus_size_t mapsize, int error)
  189 {
  190         struct admsw_descsoft *ds;
  191 
  192         if (error)
  193                 return;
  194 
  195         ds = arg;
  196 
  197         if((nseg != 1) && (nseg != 2))
  198                 panic("%s: nseg == %d\n", __func__, nseg);
  199 
  200         ds->ds_nsegs = nseg;
  201         ds->ds_addr[0] = segs[0].ds_addr;
  202         ds->ds_len[0] = segs[0].ds_len;
  203 
  204         if(nseg > 1) {
  205                 ds->ds_addr[1] = segs[1].ds_addr;
  206                 ds->ds_len[1] = segs[1].ds_len;
  207         }
  208 }
  209 
  210 
  211 
  212 static int
  213 admsw_probe(device_t dev)
  214 {
  215 
  216         device_set_desc(dev, "ADM5120 Switch Engine");
  217         return (0);
  218 }
  219 
  220 #define REG_READ(o)     bus_read_4((sc)->mem_res, (o))
  221 #define REG_WRITE(o,v)  bus_write_4((sc)->mem_res, (o),(v))
  222 
  223 static void
  224 admsw_init_bufs(struct admsw_softc *sc)
  225 {
  226         int i;
  227         struct admsw_desc *desc;
  228 
  229         for (i = 0; i < ADMSW_NTXHDESC; i++) {
  230                 if (sc->sc_txhsoft[i].ds_mbuf != NULL) {
  231                         m_freem(sc->sc_txhsoft[i].ds_mbuf);
  232                         sc->sc_txhsoft[i].ds_mbuf = NULL;
  233                 }
  234                 desc = &sc->sc_txhdescs[i];
  235                 desc->data = 0;
  236                 desc->cntl = 0;
  237                 desc->len = MAC_BUFLEN;
  238                 desc->status = 0;
  239                 ADMSW_CDTXHSYNC(sc, i,
  240                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  241         }
  242         sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND;
  243         ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1,
  244             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  245 
  246         for (i = 0; i < ADMSW_NRXHDESC; i++) {
  247                 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) {
  248                         if (admsw_add_rxhbuf(sc, i) != 0)
  249                                 panic("admsw_init_bufs\n");
  250                 } else
  251                         ADMSW_INIT_RXHDESC(sc, i);
  252         }
  253 
  254         for (i = 0; i < ADMSW_NTXLDESC; i++) {
  255                 if (sc->sc_txlsoft[i].ds_mbuf != NULL) {
  256                         m_freem(sc->sc_txlsoft[i].ds_mbuf);
  257                         sc->sc_txlsoft[i].ds_mbuf = NULL;
  258                 }
  259                 desc = &sc->sc_txldescs[i];
  260                 desc->data = 0;
  261                 desc->cntl = 0;
  262                 desc->len = MAC_BUFLEN;
  263                 desc->status = 0;
  264                 ADMSW_CDTXLSYNC(sc, i,
  265                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  266         }
  267         sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND;
  268         ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1,
  269             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  270 
  271         for (i = 0; i < ADMSW_NRXLDESC; i++) {
  272                 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) {
  273                         if (admsw_add_rxlbuf(sc, i) != 0)
  274                                 panic("admsw_init_bufs\n");
  275                 } else
  276                         ADMSW_INIT_RXLDESC(sc, i);
  277         }
  278 
  279         REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0));
  280         REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0));
  281         REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0));
  282         REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0));
  283 
  284         sc->sc_txfree = ADMSW_NTXLDESC;
  285         sc->sc_txnext = 0;
  286         sc->sc_txdirty = 0;
  287         sc->sc_rxptr = 0;
  288 }
  289 
  290 static void
  291 admsw_setvlan(struct admsw_softc *sc, char matrix[6])
  292 {
  293         uint32_t i;
  294 
  295         i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24);
  296         REG_WRITE(VLAN_G1_REG, i);
  297         i = matrix[4] + (matrix[5] << 8);
  298         REG_WRITE(VLAN_G2_REG, i);
  299 }
  300 
  301 static void
  302 admsw_reset(struct admsw_softc *sc)
  303 {
  304         uint32_t wdog1;
  305         int i;
  306 
  307         REG_WRITE(PORT_CONF0_REG,
  308             REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK);
  309         REG_WRITE(CPUP_CONF_REG,
  310             REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP);
  311 
  312         /* Wait for DMA to complete.  Overkill.  In 3ms, we can
  313          * send at least two entire 1500-byte packets at 10 Mb/s.
  314          */
  315         DELAY(3000);
  316 
  317         /* The datasheet recommends that we move all PHYs to reset
  318          * state prior to software reset.
  319          */
  320         REG_WRITE(PHY_CNTL2_REG,
  321             REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK);
  322 
  323         /* Reset the switch. */
  324         REG_WRITE(ADMSW_SW_RES, 0x1);
  325 
  326         DELAY(100 * 1000);
  327 
  328         REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO);
  329 
  330         /* begin old code */
  331         REG_WRITE(CPUP_CONF_REG,
  332             CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
  333             CPUP_CONF_DMCP_MASK);
  334 
  335         REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK);
  336 
  337         REG_WRITE(PHY_CNTL2_REG,
  338             REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK |
  339             PHY_CNTL2_AMDIX_MASK);
  340 
  341         REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT);
  342 
  343         REG_WRITE(ADMSW_INT_MASK, INT_MASK);
  344         REG_WRITE(ADMSW_INT_ST, INT_MASK);
  345 
  346         /*
  347          * While in DDB, we stop servicing interrupts, RX ring
  348          * fills up and when free block counter falls behind FC
  349          * threshold, the switch starts to emit 802.3x PAUSE
  350          * frames.  This can upset peer switches.
  351          *
  352          * Stop this from happening by disabling FC and D2
  353          * thresholds.
  354          */
  355         REG_WRITE(FC_TH_REG,
  356             REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK));
  357 
  358         admsw_setvlan(sc, vlan_matrix);
  359 
  360         for (i = 0; i < SW_DEVS; i++) {
  361                 REG_WRITE(MAC_WT1_REG,
  362                     sc->sc_enaddr[2] |
  363                     (sc->sc_enaddr[3]<<8) |
  364                     (sc->sc_enaddr[4]<<16) |
  365                     ((sc->sc_enaddr[5]+i)<<24));
  366                 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) |
  367                     (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) |
  368                     MAC_WT0_WRITE | MAC_WT0_VLANID_EN);
  369 
  370                 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE));
  371         }
  372 
  373         wdog1 = REG_READ(ADM5120_WDOG1);
  374         REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE);
  375 }
  376 
  377 static int
  378 admsw_attach(device_t dev)
  379 {
  380         uint8_t enaddr[ETHER_ADDR_LEN];
  381         struct admsw_softc *sc = (struct admsw_softc *) device_get_softc(dev);
  382         struct ifnet *ifp;
  383         int error, i, rid;
  384 
  385         sc->sc_dev = dev;
  386         device_printf(dev, "ADM5120 Switch Engine, %d ports\n", SW_DEVS);
  387         sc->ndevs = 0;
  388 
  389         /* XXXMIPS: fix it */
  390         enaddr[0] = 0x00;
  391         enaddr[1] = 0x0C;
  392         enaddr[2] = 0x42;
  393         enaddr[3] = 0x07;
  394         enaddr[4] = 0xB2;
  395         enaddr[5] = 0x4E;
  396 
  397         memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr));
  398 
  399         device_printf(sc->sc_dev, "base Ethernet address %s\n",
  400             ether_sprintf(enaddr));
  401         callout_init(&sc->sc_watchdog, 1);
  402 
  403         rid = 0;
  404         if ((sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 
  405             RF_ACTIVE)) == NULL) {
  406                 device_printf(dev, "unable to allocate memory resource\n");
  407                 return (ENXIO);
  408         }
  409 
  410         /* Hook up the interrupt handler. */
  411         rid = 0;
  412         if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 
  413             RF_SHAREABLE | RF_ACTIVE)) == NULL) {
  414                 device_printf(dev, "unable to allocate IRQ resource\n");
  415                 return (ENXIO);
  416         }
  417 
  418         if ((error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET, 
  419             admsw_intr, NULL, sc, &sc->sc_ih)) != 0) {
  420                 device_printf(dev, 
  421                     "WARNING: unable to register interrupt handler\n");
  422                 return (error);
  423         }
  424 
  425         /*
  426          * Allocate the control data structures, and create and load the
  427          * DMA map for it.
  428          */
  429         if ((error = bus_dma_tag_create(NULL, 4, 0, 
  430             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
  431             NULL, NULL, sizeof(struct admsw_control_data), 1,
  432             sizeof(struct admsw_control_data), 0, NULL, NULL, 
  433             &sc->sc_control_dmat)) != 0) {
  434                 device_printf(sc->sc_dev, 
  435                     "unable to create control data DMA map, error = %d\n", 
  436                     error);
  437                 return (error);
  438         }
  439 
  440         if ((error = bus_dmamem_alloc(sc->sc_control_dmat,
  441             (void **)&sc->sc_control_data, BUS_DMA_NOWAIT, 
  442             &sc->sc_cddmamap)) != 0) {
  443                 device_printf(sc->sc_dev, 
  444                     "unable to allocate control data, error = %d\n", error);
  445                 return (error);
  446         }
  447 
  448         if ((error = bus_dmamap_load(sc->sc_control_dmat, sc->sc_cddmamap,
  449             sc->sc_control_data, sizeof(struct admsw_control_data), 
  450             admsw_dma_map_addr, &sc->sc_cddma, 0)) != 0) {
  451                 device_printf(sc->sc_dev, 
  452                     "unable to load control data DMA map, error = %d\n", error);
  453                 return (error);
  454         }
  455 
  456         /*
  457          * Create the transmit buffer DMA maps.
  458          */
  459         if ((error = bus_dma_tag_create(NULL, 1, 0, 
  460             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
  461             NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, 
  462             &sc->sc_bufs_dmat)) != 0) {
  463                 device_printf(sc->sc_dev, 
  464                     "unable to create control data DMA map, error = %d\n", 
  465                     error);
  466                 return (error);
  467         }
  468 
  469         for (i = 0; i < ADMSW_NTXHDESC; i++) {
  470                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
  471                     &sc->sc_txhsoft[i].ds_dmamap)) != 0) {
  472                         device_printf(sc->sc_dev, 
  473                             "unable to create txh DMA map %d, error = %d\n", 
  474                             i, error);
  475                         return (error);
  476                 }
  477                 sc->sc_txhsoft[i].ds_mbuf = NULL;
  478         }
  479 
  480         for (i = 0; i < ADMSW_NTXLDESC; i++) {
  481                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
  482                     &sc->sc_txlsoft[i].ds_dmamap)) != 0) {
  483                         device_printf(sc->sc_dev, 
  484                             "unable to create txl DMA map %d, error = %d\n", 
  485                             i, error);
  486                         return (error);
  487                 }
  488                 sc->sc_txlsoft[i].ds_mbuf = NULL;
  489         }
  490 
  491         /*
  492          * Create the receive buffer DMA maps.
  493          */
  494         for (i = 0; i < ADMSW_NRXHDESC; i++) {
  495                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, 
  496                      &sc->sc_rxhsoft[i].ds_dmamap)) != 0) {
  497                         device_printf(sc->sc_dev, 
  498                             "unable to create rxh DMA map %d, error = %d\n", 
  499                             i, error);
  500                         return (error);
  501                 }
  502                 sc->sc_rxhsoft[i].ds_mbuf = NULL;
  503         }
  504 
  505         for (i = 0; i < ADMSW_NRXLDESC; i++) {
  506                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
  507                     &sc->sc_rxlsoft[i].ds_dmamap)) != 0) {
  508                         device_printf(sc->sc_dev, 
  509                             "unable to create rxl DMA map %d, error = %d\n",
  510                             i, error);
  511                         return (error);
  512                 }
  513                 sc->sc_rxlsoft[i].ds_mbuf = NULL;
  514         }
  515 
  516         admsw_init_bufs(sc);
  517         admsw_reset(sc);
  518 
  519         for (i = 0; i < SW_DEVS; i++) {
  520                 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, 
  521                     admsw_mediastatus);
  522                 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL);
  523                 ifmedia_add(&sc->sc_ifmedia[i], 
  524                     IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
  525                 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL);
  526                 ifmedia_add(&sc->sc_ifmedia[i], 
  527                     IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
  528                 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL);
  529                 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO);
  530 
  531                 ifp = sc->sc_ifnet[i] = if_alloc(IFT_ETHER);
  532 
  533                 /* Setup interface parameters */
  534                 ifp->if_softc = sc;
  535                 if_initname(ifp, device_get_name(dev), i);
  536                 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  537                 ifp->if_ioctl = admsw_ioctl;
  538                 ifp->if_output = ether_output;
  539                 ifp->if_start = admsw_start;
  540                 ifp->if_init = admsw_init;
  541                 ifp->if_mtu = ETHERMTU;
  542                 ifp->if_baudrate = IF_Mbps(100);
  543                 IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, ifqmaxlen));
  544                 ifp->if_snd.ifq_drv_maxlen = max(ADMSW_NTXLDESC, ifqmaxlen);
  545                 IFQ_SET_READY(&ifp->if_snd);
  546                 ifp->if_capabilities |= IFCAP_VLAN_MTU;
  547 
  548                 /* Attach the interface. */
  549                 ether_ifattach(ifp, enaddr);
  550                 enaddr[5]++;
  551         }
  552 
  553         /* XXX: admwdog_attach(sc); */
  554 
  555         /* leave interrupts and cpu port disabled */
  556         return (0);
  557 }
  558 
  559 static int
  560 admsw_detach(device_t dev)
  561 {
  562 
  563         printf("TODO: DETACH\n");
  564         return (0);
  565 }
  566 
  567 /*
  568  * admsw_shutdown:
  569  *
  570  *      Make sure the interface is stopped at reboot time.
  571  */
  572 static int
  573 admsw_shutdown(device_t dev)
  574 {
  575         struct admsw_softc *sc;
  576         int i;
  577 
  578         sc = device_get_softc(dev);
  579         for (i = 0; i < SW_DEVS; i++)
  580                 admsw_stop(sc->sc_ifnet[i], 1);
  581 
  582         return (0);
  583 }
  584 
  585 /*
  586  * admsw_start:         [ifnet interface function]
  587  *
  588  *      Start packet transmission on the interface.
  589  */
  590 static void
  591 admsw_start(struct ifnet *ifp)
  592 {
  593         struct admsw_softc *sc = ifp->if_softc;
  594         struct mbuf *m0, *m;
  595         struct admsw_descsoft *ds;
  596         struct admsw_desc *desc;
  597         bus_dmamap_t dmamap;
  598         struct ether_header *eh;
  599         int error, nexttx, len, i;
  600         static int vlan = 0;
  601 
  602         /*
  603          * Loop through the send queues, setting up transmit descriptors
  604          * unitl we drain the queues, or use up all available transmit
  605          * descriptors.
  606          */
  607         for (;;) {
  608                 vlan++;
  609                 if (vlan == SW_DEVS)
  610                         vlan = 0;
  611                 i = vlan;
  612                 for (;;) {
  613                         ifp = sc->sc_ifnet[i];
  614                         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) 
  615                             == IFF_DRV_RUNNING) {
  616                                 /* Grab a packet off the queue. */
  617                                 IF_DEQUEUE(&ifp->if_snd, m0);
  618                                 if (m0 != NULL)
  619                                         break;
  620                         }
  621                         i++;
  622                         if (i == SW_DEVS)
  623                                 i = 0;
  624                         if (i == vlan)
  625                                 return;
  626                 }
  627                 vlan = i;
  628                 m = NULL;
  629 
  630                 /* Get a spare descriptor. */
  631                 if (sc->sc_txfree == 0) {
  632                         /* No more slots left; notify upper layer. */
  633                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  634                         break;
  635                 }
  636                 nexttx = sc->sc_txnext;
  637                 desc = &sc->sc_txldescs[nexttx];
  638                 ds = &sc->sc_txlsoft[nexttx];
  639                 dmamap = ds->ds_dmamap;
  640 
  641                 /*
  642                  * Load the DMA map.  If this fails, the packet either
  643                  * didn't fit in the alloted number of segments, or we
  644                  * were short on resources.  In this case, we'll copy
  645                  * and try again.
  646                  */
  647                 if (m0->m_pkthdr.len < ETHER_MIN_LEN ||
  648                     bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m0,
  649                     admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT) != 0) {
  650                         MGETHDR(m, M_DONTWAIT, MT_DATA);
  651                         if (m == NULL) {
  652                                 device_printf(sc->sc_dev, 
  653                                     "unable to allocate Tx mbuf\n");
  654                                 break;
  655                         }
  656                         if (m0->m_pkthdr.len > MHLEN) {
  657                                 MCLGET(m, M_DONTWAIT);
  658                                 if ((m->m_flags & M_EXT) == 0) {
  659                                         device_printf(sc->sc_dev, 
  660                                             "unable to allocate Tx cluster\n");
  661                                         m_freem(m);
  662                                         break;
  663                                 }
  664                         }
  665                         m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
  666                         m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
  667                         m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
  668                         if (m->m_pkthdr.len < ETHER_MIN_LEN) {
  669                                 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len)
  670                                         panic("admsw_start: M_TRAILINGSPACE\n");
  671                                 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0,
  672                                     ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len);
  673                                 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN;
  674                         }
  675                         error = bus_dmamap_load_mbuf(sc->sc_bufs_dmat, 
  676                             dmamap, m, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT);
  677                         if (error) {
  678                                 device_printf(sc->sc_dev, 
  679                                     "unable to load Tx buffer, error = %d\n", 
  680                                     error);
  681                                 break;
  682                         }
  683                 }
  684 
  685                 if (m != NULL) {
  686                         m_freem(m0);
  687                         m0 = m;
  688                 }
  689 
  690                 /*
  691                  * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
  692                  */
  693 
  694                 /* Sync the DMA map. */
  695                 bus_dmamap_sync(sc->sc_bufs_dmat, dmamap, BUS_DMASYNC_PREWRITE);
  696 
  697                 if (ds->ds_nsegs != 1 && ds->ds_nsegs != 2)
  698                         panic("admsw_start: nsegs == %d\n", ds->ds_nsegs);
  699                 desc->data = ds->ds_addr[0];
  700                 desc->len = len = ds->ds_len[0];
  701                 if (ds->ds_nsegs > 1) {
  702                         len += ds->ds_len[1];
  703                         desc->cntl = ds->ds_addr[1] | ADM5120_DMA_BUF2ENABLE;
  704                 } else
  705                         desc->cntl = 0;
  706                 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan);
  707                 eh = mtod(m0, struct ether_header *);
  708                 if (ntohs(eh->ether_type) == ETHERTYPE_IP &&
  709                     m0->m_pkthdr.csum_flags & CSUM_IP)
  710                         desc->status |= ADM5120_DMA_CSUM;
  711                 if (nexttx == ADMSW_NTXLDESC - 1)
  712                         desc->data |= ADM5120_DMA_RINGEND;
  713                 desc->data |= ADM5120_DMA_OWN;
  714 
  715                 /* Sync the descriptor. */
  716                 ADMSW_CDTXLSYNC(sc, nexttx,
  717                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  718 
  719                 REG_WRITE(SEND_TRIG_REG, 1);
  720                 /* printf("send slot %d\n",nexttx); */
  721 
  722                 /*
  723                  * Store a pointer to the packet so we can free it later.
  724                  */
  725                 ds->ds_mbuf = m0;
  726 
  727                 /* Advance the Tx pointer. */
  728                 sc->sc_txfree--;
  729                 sc->sc_txnext = ADMSW_NEXTTXL(nexttx);
  730 
  731                 /* Pass the packet to any BPF listeners. */
  732                 BPF_MTAP(ifp, m0);
  733 
  734                 /* Set a watchdog timer in case the chip flakes out. */
  735                 sc->sc_timer = 5;
  736         }
  737 }
  738 
  739 /*
  740  * admsw_watchdog:      [ifnet interface function]
  741  *
  742  *      Watchdog timer handler.
  743  */
  744 static void
  745 admsw_watchdog(void *arg)
  746 {
  747         struct admsw_softc *sc = arg;
  748         struct ifnet *ifp;
  749         int vlan;
  750 
  751         callout_reset(&sc->sc_watchdog, hz, admsw_watchdog, sc);
  752         if (sc->sc_timer == 0 || --sc->sc_timer > 0)
  753                 return;
  754 
  755         /* Check if an interrupt was lost. */
  756         if (sc->sc_txfree == ADMSW_NTXLDESC) {
  757                 device_printf(sc->sc_dev, "watchdog false alarm\n");
  758                 return;
  759         }
  760         if (sc->sc_timer != 0)
  761                 device_printf(sc->sc_dev, "watchdog timer is %d!\n",  
  762                     sc->sc_timer);
  763         admsw_txintr(sc, 0);
  764         if (sc->sc_txfree == ADMSW_NTXLDESC) {
  765                 device_printf(sc->sc_dev, "tx IRQ lost (queue empty)\n");
  766                 return;
  767         }
  768         if (sc->sc_timer != 0) {
  769                 device_printf(sc->sc_dev, "tx IRQ lost (timer recharged)\n");
  770                 return;
  771         }
  772 
  773         device_printf(sc->sc_dev, "device timeout, txfree = %d\n",  
  774             sc->sc_txfree);
  775         for (vlan = 0; vlan < SW_DEVS; vlan++)
  776                 admsw_stop(sc->sc_ifnet[vlan], 0);
  777         admsw_init(sc);
  778 
  779         ifp = sc->sc_ifnet[0];
  780 
  781         /* Try to get more packets going. */
  782         admsw_start(ifp);
  783 }
  784 
  785 /*
  786  * admsw_ioctl:         [ifnet interface function]
  787  *
  788  *      Handle control requests from the operator.
  789  */
  790 static int
  791 admsw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  792 {
  793         struct admsw_softc *sc = ifp->if_softc;
  794         struct ifdrv *ifd;
  795         int error, port;
  796 
  797         ADMSW_LOCK(sc);
  798 
  799         switch (cmd) {
  800         case SIOCSIFMEDIA:
  801         case SIOCGIFMEDIA:
  802                 port = 0;
  803                 while(port < SW_DEVS)
  804                         if(ifp == sc->sc_ifnet[port])
  805                                  break;
  806                         else 
  807                                 port++;
  808                 if (port >= SW_DEVS)
  809                         error = EOPNOTSUPP;
  810                 else
  811                         error = ifmedia_ioctl(ifp, (struct ifreq *)data,
  812                             &sc->sc_ifmedia[port], cmd);
  813                 break;
  814 
  815         case SIOCGDRVSPEC:
  816         case SIOCSDRVSPEC:
  817                 ifd = (struct ifdrv *) data;
  818                 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) {
  819                         error = EINVAL;
  820                         break;
  821                 }
  822                 if (cmd == SIOCGDRVSPEC) {
  823                         error = copyout(vlan_matrix, ifd->ifd_data,
  824                             sizeof(vlan_matrix));
  825                 } else {
  826                         error = copyin(ifd->ifd_data, vlan_matrix,
  827                             sizeof(vlan_matrix));
  828                         admsw_setvlan(sc, vlan_matrix);
  829                 }
  830                 break;
  831 
  832         default:
  833                 error = ether_ioctl(ifp, cmd, data);
  834                 if (error == ENETRESET) {
  835                         /*
  836                          * Multicast list has changed; set the hardware filter
  837                          * accordingly.
  838                          */
  839                         admsw_set_filter(sc);
  840                         error = 0;
  841                 }
  842                 break;
  843         }
  844 
  845         /* Try to get more packets going. */
  846         admsw_start(ifp);
  847 
  848         ADMSW_UNLOCK(sc);
  849         return (error);
  850 }
  851 
  852 
  853 /*
  854  * admsw_intr:
  855  *
  856  *      Interrupt service routine.
  857  */
  858 static int
  859 admsw_intr(void *arg)
  860 {
  861         struct admsw_softc *sc = arg;
  862         uint32_t pending;
  863 
  864         pending = REG_READ(ADMSW_INT_ST);
  865         REG_WRITE(ADMSW_INT_ST, pending);
  866 
  867         if (sc->ndevs == 0)
  868                 return (FILTER_STRAY);
  869 
  870         if ((pending & ADMSW_INTR_RHD) != 0)
  871                 admsw_rxintr(sc, 1);
  872 
  873         if ((pending & ADMSW_INTR_RLD) != 0)
  874                 admsw_rxintr(sc, 0);
  875 
  876         if ((pending & ADMSW_INTR_SHD) != 0)
  877                 admsw_txintr(sc, 1);
  878 
  879         if ((pending & ADMSW_INTR_SLD) != 0)
  880                 admsw_txintr(sc, 0);
  881 
  882         return (FILTER_HANDLED);
  883 }
  884 
  885 /*
  886  * admsw_txintr:
  887  *
  888  *      Helper; handle transmit interrupts.
  889  */
  890 static void
  891 admsw_txintr(struct admsw_softc *sc, int prio)
  892 {
  893         struct ifnet *ifp;
  894         struct admsw_desc *desc;
  895         struct admsw_descsoft *ds;
  896         int i, vlan;
  897         int gotone = 0;
  898 
  899         /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
  900         for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC;
  901             i = ADMSW_NEXTTXL(i)) {
  902 
  903                 ADMSW_CDTXLSYNC(sc, i,
  904                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  905 
  906                 desc = &sc->sc_txldescs[i];
  907                 ds = &sc->sc_txlsoft[i];
  908                 if (desc->data & ADM5120_DMA_OWN) {
  909                         ADMSW_CDTXLSYNC(sc, i,
  910                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  911                         break;
  912                 }
  913 
  914                 bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap, 
  915                     BUS_DMASYNC_POSTWRITE);
  916                 bus_dmamap_unload(sc->sc_bufs_dmat, ds->ds_dmamap);
  917                 m_freem(ds->ds_mbuf);
  918                 ds->ds_mbuf = NULL;
  919 
  920                 vlan = ffs(desc->status & 0x3f) - 1;
  921                 if (vlan < 0 || vlan >= SW_DEVS)
  922                         panic("admsw_txintr: bad vlan\n");
  923                 ifp = sc->sc_ifnet[vlan];
  924                 gotone = 1;
  925                 /* printf("clear tx slot %d\n",i); */
  926 
  927                 ifp->if_opackets++;
  928 
  929                 sc->sc_txfree++;
  930         }
  931 
  932         if (gotone) {
  933                 sc->sc_txdirty = i;
  934                 for (vlan = 0; vlan < SW_DEVS; vlan++)
  935                         sc->sc_ifnet[vlan]->if_drv_flags &= ~IFF_DRV_OACTIVE;
  936 
  937                 ifp = sc->sc_ifnet[0];
  938 
  939                 /* Try to queue more packets. */
  940                 admsw_start(ifp);
  941 
  942                 /*
  943                  * If there are no more pending transmissions,
  944                  * cancel the watchdog timer.
  945                  */
  946                 if (sc->sc_txfree == ADMSW_NTXLDESC)
  947                         sc->sc_timer = 0;
  948 
  949         }
  950 
  951         /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
  952 }
  953 
  954 /*
  955  * admsw_rxintr:
  956  *
  957  *      Helper; handle receive interrupts.
  958  */
  959 static void
  960 admsw_rxintr(struct admsw_softc *sc, int high)
  961 {
  962         struct ifnet *ifp;
  963         struct admsw_descsoft *ds;
  964         struct mbuf *m;
  965         uint32_t stat;
  966         int i, len, port, vlan;
  967 
  968         /* printf("rxintr\n"); */
  969 
  970         if (high)
  971                 panic("admsw_rxintr: high priority packet\n");
  972 
  973 #if 1
  974         ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
  975             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  976         if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
  977                 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
  978                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  979         else {
  980                 i = sc->sc_rxptr;
  981                 do {
  982                         ADMSW_CDRXLSYNC(sc, i, 
  983                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  984                         i = ADMSW_NEXTRXL(i);
  985                         /* the ring is empty, just return. */
  986                         if (i == sc->sc_rxptr)
  987                                 return;
  988                         ADMSW_CDRXLSYNC(sc, i, 
  989                             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  990                 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN);
  991 
  992                 ADMSW_CDRXLSYNC(sc, i, 
  993                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  994 
  995                 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
  996                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  997 
  998                 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
  999                         ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
 1000                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1001                 else {
 1002                         ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
 1003                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1004                         /* We've fallen behind the chip: catch it. */
 1005 #if 0
 1006                         device_printf(sc->sc_dev, 
 1007                            "RX ring resync, base=%x, work=%x, %d -> %d\n",
 1008                             REG_READ(RECV_LBADDR_REG),
 1009                             REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i);
 1010 #endif
 1011                         sc->sc_rxptr = i;
 1012                         /* ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); */
 1013                 }
 1014         }
 1015 #endif
 1016         for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) {
 1017                 ds = &sc->sc_rxlsoft[i];
 1018 
 1019                 ADMSW_CDRXLSYNC(sc, i, 
 1020                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1021 
 1022                 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) {
 1023                         ADMSW_CDRXLSYNC(sc, i, 
 1024                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1025                         break;
 1026                 }
 1027 
 1028                 /* printf("process slot %d\n",i); */
 1029 
 1030                 bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap,
 1031                     BUS_DMASYNC_POSTREAD);
 1032 
 1033                 stat = sc->sc_rxldescs[i].status;
 1034                 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT;
 1035                 len -= ETHER_CRC_LEN;
 1036                 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT;
 1037 
 1038                 for (vlan = 0; vlan < SW_DEVS; vlan++)
 1039                         if ((1 << port) & vlan_matrix[vlan])
 1040                                 break;
 1041 
 1042                 if (vlan == SW_DEVS)
 1043                         vlan = 0;
 1044 
 1045                 ifp = sc->sc_ifnet[vlan];
 1046 
 1047                 m = ds->ds_mbuf;
 1048                 if (admsw_add_rxlbuf(sc, i) != 0) {
 1049                         ifp->if_ierrors++;
 1050                         ADMSW_INIT_RXLDESC(sc, i);
 1051                         bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap,
 1052                             BUS_DMASYNC_PREREAD);
 1053                         continue;
 1054                 }
 1055 
 1056                 m->m_pkthdr.rcvif = ifp;
 1057                 m->m_pkthdr.len = m->m_len = len;
 1058                 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) {
 1059                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1060                         if (!(stat & ADM5120_DMA_CSUMFAIL))
 1061                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1062                 }
 1063 
 1064                 BPF_MTAP(ifp, m);
 1065 
 1066                 /* Pass it on. */
 1067                 (*ifp->if_input)(ifp, m);
 1068                 ifp->if_ipackets++;
 1069         }
 1070 
 1071         /* Update the receive pointer. */
 1072         sc->sc_rxptr = i;
 1073 }
 1074 
 1075 /*
 1076  * admsw_init:          [ifnet interface function]
 1077  *
 1078  *      Initialize the interface.
 1079  */
 1080 static void
 1081 admsw_init(void *xsc)
 1082 {
 1083         struct admsw_softc *sc = xsc;
 1084         struct ifnet *ifp;
 1085         int i;
 1086 
 1087         for (i = 0; i < SW_DEVS; i++) {
 1088                 ifp = sc->sc_ifnet[i];
 1089                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 1090                         if (sc->ndevs == 0) {
 1091                                 admsw_init_bufs(sc);
 1092                                 admsw_reset(sc);
 1093                                 REG_WRITE(CPUP_CONF_REG,
 1094                                     CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
 1095                                     CPUP_CONF_DMCP_MASK);
 1096                                 /* clear all pending interrupts */
 1097                                 REG_WRITE(ADMSW_INT_ST, INT_MASK);
 1098 
 1099                                 /* enable needed interrupts */
 1100                                 REG_WRITE(ADMSW_INT_MASK, 
 1101                                     REG_READ(ADMSW_INT_MASK) & 
 1102                                     ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | 
 1103                                         ADMSW_INTR_RHD | ADMSW_INTR_RLD | 
 1104                                         ADMSW_INTR_HDF | ADMSW_INTR_LDF));
 1105 
 1106                                 callout_reset(&sc->sc_watchdog, hz,
 1107                                     admsw_watchdog, sc);
 1108                         }
 1109                         sc->ndevs++;
 1110                 }
 1111 
 1112 
 1113                 /* mark iface as running */
 1114                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1115                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1116         }
 1117 
 1118         /* Set the receive filter. */
 1119         admsw_set_filter(sc);
 1120 }
 1121 
 1122 /*
 1123  * admsw_stop:          [ifnet interface function]
 1124  *
 1125  *      Stop transmission on the interface.
 1126  */
 1127 static void
 1128 admsw_stop(struct ifnet *ifp, int disable)
 1129 {
 1130         struct admsw_softc *sc = ifp->if_softc;
 1131 
 1132         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1133                 return;
 1134 
 1135         if (--sc->ndevs == 0) {
 1136                 /* printf("debug: de-initializing hardware\n"); */
 1137 
 1138                 /* disable cpu port */
 1139                 REG_WRITE(CPUP_CONF_REG,
 1140                                 CPUP_CONF_DCPUP | CPUP_CONF_CRCP |
 1141                                 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK);
 1142 
 1143                 /* XXX We should disable, then clear? --dyoung */
 1144                 /* clear all pending interrupts */
 1145                 REG_WRITE(ADMSW_INT_ST, INT_MASK);
 1146 
 1147                 /* disable interrupts */
 1148                 REG_WRITE(ADMSW_INT_MASK, INT_MASK);
 1149 
 1150                 /* Cancel the watchdog timer. */
 1151                 sc->sc_timer = 0;
 1152                 callout_stop(&sc->sc_watchdog);
 1153         }
 1154 
 1155         /* Mark the interface as down. */
 1156         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1157 
 1158         return;
 1159 }
 1160 
 1161 /*
 1162  * admsw_set_filter:
 1163  *
 1164  *      Set up the receive filter.
 1165  */
 1166 static void
 1167 admsw_set_filter(struct admsw_softc *sc)
 1168 {
 1169         int i;
 1170         uint32_t allmc, anymc, conf, promisc;
 1171         struct ifnet *ifp;
 1172         struct ifmultiaddr *ifma;
 1173 
 1174         /* Find which ports should be operated in promisc mode. */
 1175         allmc = anymc = promisc = 0;
 1176         for (i = 0; i < SW_DEVS; i++) {
 1177                 ifp = sc->sc_ifnet[i];
 1178                 if (ifp->if_flags & IFF_PROMISC)
 1179                         promisc |= vlan_matrix[i];
 1180 
 1181                 ifp->if_flags &= ~IFF_ALLMULTI;
 1182 
 1183                 if_maddr_rlock(ifp);
 1184                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
 1185                 {
 1186                         if (ifma->ifma_addr->sa_family != AF_LINK)
 1187                                 continue;
 1188 
 1189                         anymc |= vlan_matrix[i];
 1190                 }
 1191                 if_maddr_runlock(ifp);
 1192         }
 1193 
 1194         conf = REG_READ(CPUP_CONF_REG);
 1195         /* 1 Disable forwarding of unknown & multicast packets to
 1196          *   CPU on all ports.
 1197          * 2 Enable forwarding of unknown & multicast packets to
 1198          *   CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set.
 1199          */
 1200         conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK;
 1201         /* Enable forwarding of unknown packets to CPU on selected ports. */
 1202         conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK);
 1203         conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
 1204         conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
 1205         REG_WRITE(CPUP_CONF_REG, conf);
 1206 }
 1207 
 1208 /*
 1209  * admsw_add_rxbuf:
 1210  *
 1211  *      Add a receive buffer to the indicated descriptor.
 1212  */
 1213 int
 1214 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high)
 1215 {
 1216         struct admsw_descsoft *ds;
 1217         struct mbuf *m;
 1218         int error;
 1219 
 1220         if (high)
 1221                 ds = &sc->sc_rxhsoft[idx];
 1222         else
 1223                 ds = &sc->sc_rxlsoft[idx];
 1224 
 1225         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1226         if (m == NULL)
 1227                 return (ENOBUFS);
 1228 
 1229         MCLGET(m, M_DONTWAIT);
 1230         if ((m->m_flags & M_EXT) == 0) {
 1231                 m_freem(m);
 1232                 return (ENOBUFS);
 1233         }
 1234 
 1235         if (ds->ds_mbuf != NULL)
 1236                 bus_dmamap_unload(sc->sc_bufs_dmat, ds->ds_dmamap);
 1237 
 1238         ds->ds_mbuf = m;
 1239 
 1240         error = bus_dmamap_load(sc->sc_bufs_dmat, ds->ds_dmamap,
 1241             m->m_ext.ext_buf, m->m_ext.ext_size, admsw_rxbuf_map_addr, 
 1242             ds, BUS_DMA_NOWAIT);
 1243         if (error) {
 1244                 device_printf(sc->sc_dev, 
 1245                     "can't load rx DMA map %d, error = %d\n", idx, error);
 1246                 panic("admsw_add_rxbuf");       /* XXX */
 1247         }
 1248 
 1249         bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap, BUS_DMASYNC_PREREAD);
 1250 
 1251         if (high)
 1252                 ADMSW_INIT_RXHDESC(sc, idx);
 1253         else
 1254                 ADMSW_INIT_RXLDESC(sc, idx);
 1255 
 1256         return (0);
 1257 }
 1258 
 1259 int
 1260 admsw_mediachange(struct ifnet *ifp)
 1261 {
 1262         struct admsw_softc *sc = ifp->if_softc;
 1263         int port = 0;
 1264         struct ifmedia *ifm;
 1265         int old, new, val;
 1266 
 1267         while(port < SW_DEVS) {
 1268                 if(ifp == sc->sc_ifnet[port])
 1269                         break;
 1270                 else
 1271                         port++;
 1272         }
 1273 
 1274         ifm = &sc->sc_ifmedia[port];
 1275 
 1276         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 1277                 return (EINVAL);
 1278 
 1279         if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
 1280                 val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX;
 1281         } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
 1282                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1283                         val = PHY_CNTL2_100M|PHY_CNTL2_FDX;
 1284                 else
 1285                         val = PHY_CNTL2_100M;
 1286         } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
 1287                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1288                         val = PHY_CNTL2_FDX;
 1289                 else
 1290                         val = 0;
 1291         } else
 1292                 return (EINVAL);
 1293 
 1294         old = REG_READ(PHY_CNTL2_REG);
 1295         new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port);
 1296         new |= (val << port);
 1297 
 1298         if (new != old)
 1299                 REG_WRITE(PHY_CNTL2_REG, new);
 1300 
 1301         return (0);
 1302 }
 1303 
 1304 void
 1305 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1306 {
 1307         struct admsw_softc *sc = ifp->if_softc;
 1308         int port = 0;
 1309         int status;
 1310 
 1311         while(port < SW_DEVS) {
 1312                 if(ifp == sc->sc_ifnet[port])
 1313                         break;
 1314                 else
 1315                         port++;
 1316         }
 1317 
 1318         ifmr->ifm_status = IFM_AVALID;
 1319         ifmr->ifm_active = IFM_ETHER;
 1320 
 1321         status = REG_READ(PHY_ST_REG) >> port;
 1322 
 1323         if ((status & PHY_ST_LINKUP) == 0) {
 1324                 ifmr->ifm_active |= IFM_NONE;
 1325                 return;
 1326         }
 1327 
 1328         ifmr->ifm_status |= IFM_ACTIVE;
 1329         ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T;
 1330         if (status & PHY_ST_FDX)
 1331                 ifmr->ifm_active |= IFM_FDX;
 1332 }
 1333 
 1334 static device_method_t admsw_methods[] = {
 1335         /* Device interface */
 1336         DEVMETHOD(device_probe,         admsw_probe),
 1337         DEVMETHOD(device_attach,        admsw_attach),
 1338         DEVMETHOD(device_detach,        admsw_detach),
 1339         DEVMETHOD(device_shutdown,      admsw_shutdown),
 1340 
 1341         { 0, 0 }
 1342 };
 1343 
 1344 static devclass_t admsw_devclass;
 1345 
 1346 static driver_t admsw_driver = {
 1347         "admsw",
 1348         admsw_methods,
 1349         sizeof(struct admsw_softc),
 1350 };
 1351 
 1352 DRIVER_MODULE(admsw, obio, admsw_driver, admsw_devclass, 0, 0);
 1353 MODULE_DEPEND(admsw, ether, 1, 1, 1);

Cache object: 564036d6f917db4d127bc16a901d32e6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.