The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/adm5120/if_admsw.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* $NetBSD: if_admsw.c,v 1.3 2007/04/22 19:26:25 dyoung Exp $ */
    2 
    3 /*-
    4  * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or
    8  * without modification, are permitted provided that the following
    9  * conditions are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above
   13  *    copyright notice, this list of conditions and the following
   14  *    disclaimer in the documentation and/or other materials provided
   15  *    with the distribution.
   16  * 3. The names of the authors may not be used to endorse or promote
   17  *    products derived from this software without specific prior
   18  *    written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
   21  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
   22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
   23  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
   25  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   26  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
   27  * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
   29  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
   31  * OF SUCH DAMAGE.
   32  */
   33 /*
   34  * Copyright (c) 2001 Wasabi Systems, Inc.
   35  * All rights reserved.
   36  *
   37  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
   38  *
   39  * Redistribution and use in source and binary forms, with or without
   40  * modification, are permitted provided that the following conditions
   41  * are met:
   42  * 1. Redistributions of source code must retain the above copyright
   43  *    notice, this list of conditions and the following disclaimer.
   44  * 2. Redistributions in binary form must reproduce the above copyright
   45  *    notice, this list of conditions and the following disclaimer in the
   46  *    documentation and/or other materials provided with the distribution.
   47  * 3. All advertising materials mentioning features or use of this software
   48  *    must display the following acknowledgement:
   49  *      This product includes software developed for the NetBSD Project by
   50  *      Wasabi Systems, Inc.
   51  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   52  *    or promote products derived from this software without specific prior
   53  *    written permission.
   54  *
   55  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   57  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   58  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   59  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   60  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   61  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   62  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   63  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   64  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   65  * POSSIBILITY OF SUCH DAMAGE.
   66  */
   67 
   68 /*
   69  * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media
   70  * Access Controller.
   71  *
   72  * TODO:
   73  *
   74  *      Better Rx buffer management; we want to get new Rx buffers
   75  *      to the chip more quickly than we currently do.
   76  */
   77 
   78 #include <sys/cdefs.h>
   79 __FBSDID("$FreeBSD: releng/8.0/sys/mips/adm5120/if_admsw.c 195049 2009-06-26 11:45:06Z rwatson $");
   80 
   81 #include <sys/param.h>
   82 #include <sys/systm.h>
   83 #include <sys/bus.h>
   84 #include <sys/kernel.h>
   85 #include <sys/mbuf.h>
   86 #include <sys/malloc.h>
   87 #include <sys/module.h>
   88 #include <sys/rman.h>
   89 #include <sys/socket.h>
   90 #include <sys/sockio.h>
   91 #include <sys/sysctl.h>
   92 #include <machine/bus.h>
   93 
   94 #include <net/ethernet.h>
   95 #include <net/if.h>
   96 #include <net/if_arp.h>
   97 #include <net/if_dl.h>
   98 #include <net/if_media.h>
   99 #include <net/if_mib.h>
  100 #include <net/if_types.h>
  101 
  102 #ifdef INET
  103 #include <netinet/in.h>
  104 #include <netinet/in_systm.h>
  105 #include <netinet/in_var.h>
  106 #include <netinet/ip.h>
  107 #endif
  108 
  109 #include <net/bpf.h>
  110 #include <net/bpfdesc.h>
  111 
  112 #include <mips/adm5120/adm5120reg.h>
  113 #include <mips/adm5120/if_admswreg.h>
  114 #include <mips/adm5120/if_admswvar.h>
  115 
  116 /* TODO: add locking */
  117 #define ADMSW_LOCK(sc) do {} while(0);
  118 #define ADMSW_UNLOCK(sc) do {} while(0);
  119 
  120 static uint8_t vlan_matrix[SW_DEVS] = {
  121         (1 << 6) | (1 << 0),            /* CPU + port0 */
  122         (1 << 6) | (1 << 1),            /* CPU + port1 */
  123         (1 << 6) | (1 << 2),            /* CPU + port2 */
  124         (1 << 6) | (1 << 3),            /* CPU + port3 */
  125         (1 << 6) | (1 << 4),            /* CPU + port4 */
  126         (1 << 6) | (1 << 5),            /* CPU + port5 */
  127 };
  128 
  129 /* ifnet entry points */
  130 static void     admsw_start(struct ifnet *);
  131 static void     admsw_watchdog(struct ifnet *);
  132 static int      admsw_ioctl(struct ifnet *, u_long, caddr_t);
  133 static void     admsw_init(void *);
  134 static void     admsw_stop(struct ifnet *, int);
  135 
  136 static void     admsw_reset(struct admsw_softc *);
  137 static void     admsw_set_filter(struct admsw_softc *);
  138 
  139 static void     admsw_txintr(struct admsw_softc *, int);
  140 static void     admsw_rxintr(struct admsw_softc *, int);
  141 static int      admsw_add_rxbuf(struct admsw_softc *, int, int);
  142 #define admsw_add_rxhbuf(sc, idx)       admsw_add_rxbuf(sc, idx, 1)
  143 #define admsw_add_rxlbuf(sc, idx)       admsw_add_rxbuf(sc, idx, 0)
  144 
  145 static int      admsw_mediachange(struct ifnet *);
  146 static void     admsw_mediastatus(struct ifnet *, struct ifmediareq *);
  147 
  148 static int      admsw_intr(void *);
  149 
  150 /* bus entry points */
  151 static int      admsw_probe(device_t dev);
  152 static int      admsw_attach(device_t dev);
  153 static int      admsw_detach(device_t dev);
  154 static int      admsw_shutdown(device_t dev);
  155 
  156 static void
  157 admsw_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  158 {
  159         uint32_t *addr;
  160 
  161         if (error)
  162                 return;
  163 
  164         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
  165         addr = arg;
  166         *addr = segs->ds_addr;
  167 }
  168 
  169 static void
  170 admsw_rxbuf_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  171 {
  172         struct admsw_descsoft *ds;
  173 
  174         if (error)
  175                 return;
  176 
  177         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
  178 
  179         ds = arg;
  180         ds->ds_nsegs = nseg;
  181         ds->ds_addr[0] = segs[0].ds_addr;
  182         ds->ds_len[0] = segs[0].ds_len;
  183 
  184 }
  185 
  186 static void
  187 admsw_mbuf_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, 
  188     bus_size_t mapsize, int error)
  189 {
  190         struct admsw_descsoft *ds;
  191 
  192         if (error)
  193                 return;
  194 
  195         ds = arg;
  196 
  197         if((nseg != 1) && (nseg != 2))
  198                 panic("%s: nseg == %d\n", __func__, nseg);
  199 
  200         ds->ds_nsegs = nseg;
  201         ds->ds_addr[0] = segs[0].ds_addr;
  202         ds->ds_len[0] = segs[0].ds_len;
  203 
  204         if(nseg > 1) {
  205                 ds->ds_addr[1] = segs[1].ds_addr;
  206                 ds->ds_len[1] = segs[1].ds_len;
  207         }
  208 }
  209 
  210 
  211 
  212 static int
  213 admsw_probe(device_t dev)
  214 {
  215 
  216         device_set_desc(dev, "ADM5120 Switch Engine");
  217         return (0);
  218 }
  219 
  220 #define REG_READ(o)     bus_read_4((sc)->mem_res, (o))
  221 #define REG_WRITE(o,v)  bus_write_4((sc)->mem_res, (o),(v))
  222 
  223 static void
  224 admsw_init_bufs(struct admsw_softc *sc)
  225 {
  226         int i;
  227         struct admsw_desc *desc;
  228 
  229         for (i = 0; i < ADMSW_NTXHDESC; i++) {
  230                 if (sc->sc_txhsoft[i].ds_mbuf != NULL) {
  231                         m_freem(sc->sc_txhsoft[i].ds_mbuf);
  232                         sc->sc_txhsoft[i].ds_mbuf = NULL;
  233                 }
  234                 desc = &sc->sc_txhdescs[i];
  235                 desc->data = 0;
  236                 desc->cntl = 0;
  237                 desc->len = MAC_BUFLEN;
  238                 desc->status = 0;
  239                 ADMSW_CDTXHSYNC(sc, i,
  240                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  241         }
  242         sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND;
  243         ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1,
  244             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  245 
  246         for (i = 0; i < ADMSW_NRXHDESC; i++) {
  247                 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) {
  248                         if (admsw_add_rxhbuf(sc, i) != 0)
  249                                 panic("admsw_init_bufs\n");
  250                 } else
  251                         ADMSW_INIT_RXHDESC(sc, i);
  252         }
  253 
  254         for (i = 0; i < ADMSW_NTXLDESC; i++) {
  255                 if (sc->sc_txlsoft[i].ds_mbuf != NULL) {
  256                         m_freem(sc->sc_txlsoft[i].ds_mbuf);
  257                         sc->sc_txlsoft[i].ds_mbuf = NULL;
  258                 }
  259                 desc = &sc->sc_txldescs[i];
  260                 desc->data = 0;
  261                 desc->cntl = 0;
  262                 desc->len = MAC_BUFLEN;
  263                 desc->status = 0;
  264                 ADMSW_CDTXLSYNC(sc, i,
  265                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  266         }
  267         sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND;
  268         ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1,
  269             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  270 
  271         for (i = 0; i < ADMSW_NRXLDESC; i++) {
  272                 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) {
  273                         if (admsw_add_rxlbuf(sc, i) != 0)
  274                                 panic("admsw_init_bufs\n");
  275                 } else
  276                         ADMSW_INIT_RXLDESC(sc, i);
  277         }
  278 
  279         REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0));
  280         REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0));
  281         REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0));
  282         REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0));
  283 
  284         sc->sc_txfree = ADMSW_NTXLDESC;
  285         sc->sc_txnext = 0;
  286         sc->sc_txdirty = 0;
  287         sc->sc_rxptr = 0;
  288 }
  289 
  290 static void
  291 admsw_setvlan(struct admsw_softc *sc, char matrix[6])
  292 {
  293         uint32_t i;
  294 
  295         i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24);
  296         REG_WRITE(VLAN_G1_REG, i);
  297         i = matrix[4] + (matrix[5] << 8);
  298         REG_WRITE(VLAN_G2_REG, i);
  299 }
  300 
  301 static void
  302 admsw_reset(struct admsw_softc *sc)
  303 {
  304         uint32_t wdog1;
  305         int i;
  306 
  307         REG_WRITE(PORT_CONF0_REG,
  308             REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK);
  309         REG_WRITE(CPUP_CONF_REG,
  310             REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP);
  311 
  312         /* Wait for DMA to complete.  Overkill.  In 3ms, we can
  313          * send at least two entire 1500-byte packets at 10 Mb/s.
  314          */
  315         DELAY(3000);
  316 
  317         /* The datasheet recommends that we move all PHYs to reset
  318          * state prior to software reset.
  319          */
  320         REG_WRITE(PHY_CNTL2_REG,
  321             REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK);
  322 
  323         /* Reset the switch. */
  324         REG_WRITE(ADMSW_SW_RES, 0x1);
  325 
  326         DELAY(100 * 1000);
  327 
  328         REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO);
  329 
  330         /* begin old code */
  331         REG_WRITE(CPUP_CONF_REG,
  332             CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
  333             CPUP_CONF_DMCP_MASK);
  334 
  335         REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK);
  336 
  337         REG_WRITE(PHY_CNTL2_REG,
  338             REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK |
  339             PHY_CNTL2_AMDIX_MASK);
  340 
  341         REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT);
  342 
  343         REG_WRITE(ADMSW_INT_MASK, INT_MASK);
  344         REG_WRITE(ADMSW_INT_ST, INT_MASK);
  345 
  346         /*
  347          * While in DDB, we stop servicing interrupts, RX ring
  348          * fills up and when free block counter falls behind FC
  349          * threshold, the switch starts to emit 802.3x PAUSE
  350          * frames.  This can upset peer switches.
  351          *
  352          * Stop this from happening by disabling FC and D2
  353          * thresholds.
  354          */
  355         REG_WRITE(FC_TH_REG,
  356             REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK));
  357 
  358         admsw_setvlan(sc, vlan_matrix);
  359 
  360         for (i = 0; i < SW_DEVS; i++) {
  361                 REG_WRITE(MAC_WT1_REG,
  362                     sc->sc_enaddr[2] |
  363                     (sc->sc_enaddr[3]<<8) |
  364                     (sc->sc_enaddr[4]<<16) |
  365                     ((sc->sc_enaddr[5]+i)<<24));
  366                 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) |
  367                     (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) |
  368                     MAC_WT0_WRITE | MAC_WT0_VLANID_EN);
  369 
  370                 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE));
  371         }
  372 
  373         wdog1 = REG_READ(ADM5120_WDOG1);
  374         REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE);
  375 }
  376 
  377 static int
  378 admsw_attach(device_t dev)
  379 {
  380         uint8_t enaddr[ETHER_ADDR_LEN];
  381         struct admsw_softc *sc = (struct admsw_softc *) device_get_softc(dev);
  382         struct ifnet *ifp;
  383         int error, i, rid;
  384 
  385         sc->sc_dev = dev;
  386         device_printf(dev, "ADM5120 Switch Engine, %d ports\n", SW_DEVS);
  387         sc->ndevs = 0;
  388 
  389         /* XXXMIPS: fix it */
  390         enaddr[0] = 0x00;
  391         enaddr[1] = 0x0C;
  392         enaddr[2] = 0x42;
  393         enaddr[3] = 0x07;
  394         enaddr[4] = 0xB2;
  395         enaddr[5] = 0x4E;
  396 
  397         memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr));
  398 
  399         device_printf(sc->sc_dev, "base Ethernet address %s\n",
  400             ether_sprintf(enaddr));
  401 
  402         rid = 0;
  403         if ((sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 
  404             RF_ACTIVE)) == NULL) {
  405                 device_printf(dev, "unable to allocate memory resource\n");
  406                 return (ENXIO);
  407         }
  408 
  409         /* Hook up the interrupt handler. */
  410         rid = 0;
  411         if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 
  412             RF_SHAREABLE | RF_ACTIVE)) == NULL) {
  413                 device_printf(dev, "unable to allocate IRQ resource\n");
  414                 return (ENXIO);
  415         }
  416 
  417         if ((error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET, 
  418             admsw_intr, NULL, sc, &sc->sc_ih)) != 0) {
  419                 device_printf(dev, 
  420                     "WARNING: unable to register interrupt handler\n");
  421                 return (error);
  422         }
  423 
  424         /*
  425          * Allocate the control data structures, and create and load the
  426          * DMA map for it.
  427          */
  428         if ((error = bus_dma_tag_create(NULL, 4, 0, 
  429             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
  430             NULL, NULL, sizeof(struct admsw_control_data), 1,
  431             sizeof(struct admsw_control_data), 0, NULL, NULL, 
  432             &sc->sc_control_dmat)) != 0) {
  433                 device_printf(sc->sc_dev, 
  434                     "unable to create control data DMA map, error = %d\n", 
  435                     error);
  436                 return (error);
  437         }
  438 
  439         if ((error = bus_dmamem_alloc(sc->sc_control_dmat,
  440             (void **)&sc->sc_control_data, BUS_DMA_NOWAIT, 
  441             &sc->sc_cddmamap)) != 0) {
  442                 device_printf(sc->sc_dev, 
  443                     "unable to allocate control data, error = %d\n", error);
  444                 return (error);
  445         }
  446 
  447         if ((error = bus_dmamap_load(sc->sc_control_dmat, sc->sc_cddmamap,
  448             sc->sc_control_data, sizeof(struct admsw_control_data), 
  449             admsw_dma_map_addr, &sc->sc_cddma, 0)) != 0) {
  450                 device_printf(sc->sc_dev, 
  451                     "unable to load control data DMA map, error = %d\n", error);
  452                 return (error);
  453         }
  454 
  455         /*
  456          * Create the transmit buffer DMA maps.
  457          */
  458         if ((error = bus_dma_tag_create(NULL, 1, 0, 
  459             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
  460             NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, 
  461             &sc->sc_bufs_dmat)) != 0) {
  462                 device_printf(sc->sc_dev, 
  463                     "unable to create control data DMA map, error = %d\n", 
  464                     error);
  465                 return (error);
  466         }
  467 
  468         for (i = 0; i < ADMSW_NTXHDESC; i++) {
  469                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
  470                     &sc->sc_txhsoft[i].ds_dmamap)) != 0) {
  471                         device_printf(sc->sc_dev, 
  472                             "unable to create txh DMA map %d, error = %d\n", 
  473                             i, error);
  474                         return (error);
  475                 }
  476                 sc->sc_txhsoft[i].ds_mbuf = NULL;
  477         }
  478 
  479         for (i = 0; i < ADMSW_NTXLDESC; i++) {
  480                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
  481                     &sc->sc_txlsoft[i].ds_dmamap)) != 0) {
  482                         device_printf(sc->sc_dev, 
  483                             "unable to create txl DMA map %d, error = %d\n", 
  484                             i, error);
  485                         return (error);
  486                 }
  487                 sc->sc_txlsoft[i].ds_mbuf = NULL;
  488         }
  489 
  490         /*
  491          * Create the receive buffer DMA maps.
  492          */
  493         for (i = 0; i < ADMSW_NRXHDESC; i++) {
  494                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, 
  495                      &sc->sc_rxhsoft[i].ds_dmamap)) != 0) {
  496                         device_printf(sc->sc_dev, 
  497                             "unable to create rxh DMA map %d, error = %d\n", 
  498                             i, error);
  499                         return (error);
  500                 }
  501                 sc->sc_rxhsoft[i].ds_mbuf = NULL;
  502         }
  503 
  504         for (i = 0; i < ADMSW_NRXLDESC; i++) {
  505                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
  506                     &sc->sc_rxlsoft[i].ds_dmamap)) != 0) {
  507                         device_printf(sc->sc_dev, 
  508                             "unable to create rxl DMA map %d, error = %d\n",
  509                             i, error);
  510                         return (error);
  511                 }
  512                 sc->sc_rxlsoft[i].ds_mbuf = NULL;
  513         }
  514 
  515         admsw_init_bufs(sc);
  516         admsw_reset(sc);
  517 
  518         for (i = 0; i < SW_DEVS; i++) {
  519                 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, 
  520                     admsw_mediastatus);
  521                 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL);
  522                 ifmedia_add(&sc->sc_ifmedia[i], 
  523                     IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
  524                 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL);
  525                 ifmedia_add(&sc->sc_ifmedia[i], 
  526                     IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
  527                 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL);
  528                 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO);
  529 
  530                 ifp = sc->sc_ifnet[i] = if_alloc(IFT_ETHER);;
  531 
  532                 /* Setup interface parameters */
  533                 ifp->if_softc = sc;
  534                 if_initname(ifp, device_get_name(dev), i);
  535                 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  536                 ifp->if_ioctl = admsw_ioctl;
  537                 ifp->if_output = ether_output;
  538                 ifp->if_start = admsw_start;
  539                 ifp->if_watchdog = admsw_watchdog;
  540                 ifp->if_timer = 0;
  541                 ifp->if_init = admsw_init;
  542                 ifp->if_mtu = ETHERMTU;
  543                 ifp->if_baudrate = IF_Mbps(100);
  544                 IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, IFQ_MAXLEN));
  545                 ifp->if_snd.ifq_drv_maxlen = max(ADMSW_NTXLDESC, IFQ_MAXLEN);
  546                 IFQ_SET_READY(&ifp->if_snd);
  547                 ifp->if_capabilities |= IFCAP_VLAN_MTU;
  548 
  549                 /* Attach the interface. */
  550                 ether_ifattach(ifp, enaddr);
  551                 enaddr[5]++;
  552         }
  553 
  554         /* XXX: admwdog_attach(sc); */
  555 
  556         /* leave interrupts and cpu port disabled */
  557         return (0);
  558 }
  559 
  560 static int
  561 admsw_detach(device_t dev)
  562 {
  563 
  564         printf("TODO: DETACH\n");
  565         return (0);
  566 }
  567 
  568 /*
  569  * admsw_shutdown:
  570  *
  571  *      Make sure the interface is stopped at reboot time.
  572  */
  573 static int
  574 admsw_shutdown(device_t dev)
  575 {
  576         struct admsw_softc *sc;
  577         int i;
  578 
  579         sc = device_get_softc(dev);
  580         for (i = 0; i < SW_DEVS; i++)
  581                 admsw_stop(sc->sc_ifnet[i], 1);
  582 
  583         return (0);
  584 }
  585 
  586 /*
  587  * admsw_start:         [ifnet interface function]
  588  *
  589  *      Start packet transmission on the interface.
  590  */
  591 static void
  592 admsw_start(struct ifnet *ifp)
  593 {
  594         struct admsw_softc *sc = ifp->if_softc;
  595         struct mbuf *m0, *m;
  596         struct admsw_descsoft *ds;
  597         struct admsw_desc *desc;
  598         bus_dmamap_t dmamap;
  599         struct ether_header *eh;
  600         int error, nexttx, len, i;
  601         static int vlan = 0;
  602 
  603         /*
  604          * Loop through the send queues, setting up transmit descriptors
  605          * unitl we drain the queues, or use up all available transmit
  606          * descriptors.
  607          */
  608         for (;;) {
  609                 vlan++;
  610                 if (vlan == SW_DEVS)
  611                         vlan = 0;
  612                 i = vlan;
  613                 for (;;) {
  614                         ifp = sc->sc_ifnet[i];
  615                         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) 
  616                             == IFF_DRV_RUNNING) {
  617                                 /* Grab a packet off the queue. */
  618                                 IF_DEQUEUE(&ifp->if_snd, m0);
  619                                 if (m0 != NULL)
  620                                         break;
  621                         }
  622                         i++;
  623                         if (i == SW_DEVS)
  624                                 i = 0;
  625                         if (i == vlan)
  626                                 return;
  627                 }
  628                 vlan = i;
  629                 m = NULL;
  630 
  631                 /* Get a spare descriptor. */
  632                 if (sc->sc_txfree == 0) {
  633                         /* No more slots left; notify upper layer. */
  634                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  635                         break;
  636                 }
  637                 nexttx = sc->sc_txnext;
  638                 desc = &sc->sc_txldescs[nexttx];
  639                 ds = &sc->sc_txlsoft[nexttx];
  640                 dmamap = ds->ds_dmamap;
  641 
  642                 /*
  643                  * Load the DMA map.  If this fails, the packet either
  644                  * didn't fit in the alloted number of segments, or we
  645                  * were short on resources.  In this case, we'll copy
  646                  * and try again.
  647                  */
  648                 if (m0->m_pkthdr.len < ETHER_MIN_LEN ||
  649                     bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m0,
  650                     admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT) != 0) {
  651                         MGETHDR(m, M_DONTWAIT, MT_DATA);
  652                         if (m == NULL) {
  653                                 device_printf(sc->sc_dev, 
  654                                     "unable to allocate Tx mbuf\n");
  655                                 break;
  656                         }
  657                         if (m0->m_pkthdr.len > MHLEN) {
  658                                 MCLGET(m, M_DONTWAIT);
  659                                 if ((m->m_flags & M_EXT) == 0) {
  660                                         device_printf(sc->sc_dev, 
  661                                             "unable to allocate Tx cluster\n");
  662                                         m_freem(m);
  663                                         break;
  664                                 }
  665                         }
  666                         m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
  667                         m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
  668                         m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
  669                         if (m->m_pkthdr.len < ETHER_MIN_LEN) {
  670                                 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len)
  671                                         panic("admsw_start: M_TRAILINGSPACE\n");
  672                                 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0,
  673                                     ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len);
  674                                 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN;
  675                         }
  676                         error = bus_dmamap_load_mbuf(sc->sc_bufs_dmat, 
  677                             dmamap, m, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT);
  678                         if (error) {
  679                                 device_printf(sc->sc_dev, 
  680                                     "unable to load Tx buffer, error = %d\n", 
  681                                     error);
  682                                 break;
  683                         }
  684                 }
  685 
  686                 if (m != NULL) {
  687                         m_freem(m0);
  688                         m0 = m;
  689                 }
  690 
  691                 /*
  692                  * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
  693                  */
  694 
  695                 /* Sync the DMA map. */
  696                 bus_dmamap_sync(sc->sc_bufs_dmat, dmamap, BUS_DMASYNC_PREWRITE);
  697 
  698                 if (ds->ds_nsegs != 1 && ds->ds_nsegs != 2)
  699                         panic("admsw_start: nsegs == %d\n", ds->ds_nsegs);
  700                 desc->data = ds->ds_addr[0];
  701                 desc->len = len = ds->ds_len[0];
  702                 if (ds->ds_nsegs > 1) {
  703                         len += ds->ds_len[1];
  704                         desc->cntl = ds->ds_addr[1] | ADM5120_DMA_BUF2ENABLE;
  705                 } else
  706                         desc->cntl = 0;
  707                 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan);
  708                 eh = mtod(m0, struct ether_header *);
  709                 if (ntohs(eh->ether_type) == ETHERTYPE_IP &&
  710                     m0->m_pkthdr.csum_flags & CSUM_IP)
  711                         desc->status |= ADM5120_DMA_CSUM;
  712                 if (nexttx == ADMSW_NTXLDESC - 1)
  713                         desc->data |= ADM5120_DMA_RINGEND;
  714                 desc->data |= ADM5120_DMA_OWN;
  715 
  716                 /* Sync the descriptor. */
  717                 ADMSW_CDTXLSYNC(sc, nexttx,
  718                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  719 
  720                 REG_WRITE(SEND_TRIG_REG, 1);
  721                 /* printf("send slot %d\n",nexttx); */
  722 
  723                 /*
  724                  * Store a pointer to the packet so we can free it later.
  725                  */
  726                 ds->ds_mbuf = m0;
  727 
  728                 /* Advance the Tx pointer. */
  729                 sc->sc_txfree--;
  730                 sc->sc_txnext = ADMSW_NEXTTXL(nexttx);
  731 
  732                 /* Pass the packet to any BPF listeners. */
  733                 BPF_MTAP(ifp, m0);
  734 
  735                 /* Set a watchdog timer in case the chip flakes out. */
  736                 sc->sc_ifnet[0]->if_timer = 5;
  737         }
  738 }
  739 
  740 /*
  741  * admsw_watchdog:      [ifnet interface function]
  742  *
  743  *      Watchdog timer handler.
  744  */
  745 static void
  746 admsw_watchdog(struct ifnet *ifp)
  747 {
  748         struct admsw_softc *sc = ifp->if_softc;
  749         int vlan;
  750 
  751         /* Check if an interrupt was lost. */
  752         if (sc->sc_txfree == ADMSW_NTXLDESC) {
  753                 device_printf(sc->sc_dev, "watchdog false alarm\n");
  754                 return;
  755         }
  756         if (sc->sc_ifnet[0]->if_timer != 0)
  757                 device_printf(sc->sc_dev, "watchdog timer is %d!\n",  
  758                     sc->sc_ifnet[0]->if_timer);
  759         admsw_txintr(sc, 0);
  760         if (sc->sc_txfree == ADMSW_NTXLDESC) {
  761                 device_printf(sc->sc_dev, "tx IRQ lost (queue empty)\n");
  762                 return;
  763         }
  764         if (sc->sc_ifnet[0]->if_timer != 0) {
  765                 device_printf(sc->sc_dev, "tx IRQ lost (timer recharged)\n");
  766                 return;
  767         }
  768 
  769         device_printf(sc->sc_dev, "device timeout, txfree = %d\n",  
  770             sc->sc_txfree);
  771         for (vlan = 0; vlan < SW_DEVS; vlan++)
  772                 admsw_stop(sc->sc_ifnet[vlan], 0);
  773         admsw_init(sc);
  774 
  775         /* Try to get more packets going. */
  776         admsw_start(ifp);
  777 }
  778 
  779 /*
  780  * admsw_ioctl:         [ifnet interface function]
  781  *
  782  *      Handle control requests from the operator.
  783  */
  784 static int
  785 admsw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  786 {
  787         struct admsw_softc *sc = ifp->if_softc;
  788         struct ifdrv *ifd;
  789         int error, port;
  790 
  791         ADMSW_LOCK(sc);
  792 
  793         switch (cmd) {
  794         case SIOCSIFMEDIA:
  795         case SIOCGIFMEDIA:
  796                 port = 0;
  797                 while(port < SW_DEVS)
  798                         if(ifp == sc->sc_ifnet[port])
  799                                  break;
  800                         else 
  801                                 port++;
  802                 if (port >= SW_DEVS)
  803                         error = EOPNOTSUPP;
  804                 else
  805                         error = ifmedia_ioctl(ifp, (struct ifreq *)data,
  806                             &sc->sc_ifmedia[port], cmd);
  807                 break;
  808 
  809         case SIOCGDRVSPEC:
  810         case SIOCSDRVSPEC:
  811                 ifd = (struct ifdrv *) data;
  812                 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) {
  813                         error = EINVAL;
  814                         break;
  815                 }
  816                 if (cmd == SIOCGDRVSPEC) {
  817                         error = copyout(vlan_matrix, ifd->ifd_data,
  818                             sizeof(vlan_matrix));
  819                 } else {
  820                         error = copyin(ifd->ifd_data, vlan_matrix,
  821                             sizeof(vlan_matrix));
  822                         admsw_setvlan(sc, vlan_matrix);
  823                 }
  824                 break;
  825 
  826         default:
  827                 error = ether_ioctl(ifp, cmd, data);
  828                 if (error == ENETRESET) {
  829                         /*
  830                          * Multicast list has changed; set the hardware filter
  831                          * accordingly.
  832                          */
  833                         admsw_set_filter(sc);
  834                         error = 0;
  835                 }
  836                 break;
  837         }
  838 
  839         /* Try to get more packets going. */
  840         admsw_start(ifp);
  841 
  842         ADMSW_UNLOCK(sc);
  843         return (error);
  844 }
  845 
  846 
  847 /*
  848  * admsw_intr:
  849  *
  850  *      Interrupt service routine.
  851  */
  852 static int
  853 admsw_intr(void *arg)
  854 {
  855         struct admsw_softc *sc = arg;
  856         uint32_t pending;
  857 
  858         pending = REG_READ(ADMSW_INT_ST);
  859         REG_WRITE(ADMSW_INT_ST, pending);
  860 
  861         if (sc->ndevs == 0)
  862                 return (FILTER_STRAY);
  863 
  864         if ((pending & ADMSW_INTR_RHD) != 0)
  865                 admsw_rxintr(sc, 1);
  866 
  867         if ((pending & ADMSW_INTR_RLD) != 0)
  868                 admsw_rxintr(sc, 0);
  869 
  870         if ((pending & ADMSW_INTR_SHD) != 0)
  871                 admsw_txintr(sc, 1);
  872 
  873         if ((pending & ADMSW_INTR_SLD) != 0)
  874                 admsw_txintr(sc, 0);
  875 
  876         return (FILTER_HANDLED);
  877 }
  878 
  879 /*
  880  * admsw_txintr:
  881  *
  882  *      Helper; handle transmit interrupts.
  883  */
  884 static void
  885 admsw_txintr(struct admsw_softc *sc, int prio)
  886 {
  887         struct ifnet *ifp;
  888         struct admsw_desc *desc;
  889         struct admsw_descsoft *ds;
  890         int i, vlan;
  891         int gotone = 0;
  892 
  893         /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
  894         for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC;
  895             i = ADMSW_NEXTTXL(i)) {
  896 
  897                 ADMSW_CDTXLSYNC(sc, i,
  898                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  899 
  900                 desc = &sc->sc_txldescs[i];
  901                 ds = &sc->sc_txlsoft[i];
  902                 if (desc->data & ADM5120_DMA_OWN) {
  903                         ADMSW_CDTXLSYNC(sc, i,
  904                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  905                         break;
  906                 }
  907 
  908                 bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap, 
  909                     BUS_DMASYNC_POSTWRITE);
  910                 bus_dmamap_unload(sc->sc_bufs_dmat, ds->ds_dmamap);
  911                 m_freem(ds->ds_mbuf);
  912                 ds->ds_mbuf = NULL;
  913 
  914                 vlan = ffs(desc->status & 0x3f) - 1;
  915                 if (vlan < 0 || vlan >= SW_DEVS)
  916                         panic("admsw_txintr: bad vlan\n");
  917                 ifp = sc->sc_ifnet[vlan];
  918                 gotone = 1;
  919                 /* printf("clear tx slot %d\n",i); */
  920 
  921                 ifp->if_opackets++;
  922 
  923                 sc->sc_txfree++;
  924         }
  925 
  926         if (gotone) {
  927                 sc->sc_txdirty = i;
  928                 for (vlan = 0; vlan < SW_DEVS; vlan++)
  929                         sc->sc_ifnet[vlan]->if_drv_flags &= ~IFF_DRV_OACTIVE;
  930 
  931                 ifp = sc->sc_ifnet[0];
  932 
  933                 /* Try to queue more packets. */
  934                 admsw_start(ifp);
  935 
  936                 /*
  937                  * If there are no more pending transmissions,
  938                  * cancel the watchdog timer.
  939                  */
  940                 if (sc->sc_txfree == ADMSW_NTXLDESC)
  941                         ifp->if_timer = 0;
  942 
  943         }
  944 
  945         /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
  946 }
  947 
  948 /*
  949  * admsw_rxintr:
  950  *
  951  *      Helper; handle receive interrupts.
  952  */
  953 static void
  954 admsw_rxintr(struct admsw_softc *sc, int high)
  955 {
  956         struct ifnet *ifp;
  957         struct admsw_descsoft *ds;
  958         struct mbuf *m;
  959         uint32_t stat;
  960         int i, len, port, vlan;
  961 
  962         /* printf("rxintr\n"); */
  963 
  964         if (high)
  965                 panic("admsw_rxintr: high priority packet\n");
  966 
  967 #if 1
  968         ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
  969             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  970         if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
  971                 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
  972                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  973         else {
  974                 i = sc->sc_rxptr;
  975                 do {
  976                         ADMSW_CDRXLSYNC(sc, i, 
  977                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  978                         i = ADMSW_NEXTRXL(i);
  979                         /* the ring is empty, just return. */
  980                         if (i == sc->sc_rxptr)
  981                                 return;
  982                         ADMSW_CDRXLSYNC(sc, i, 
  983                             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  984                 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN);
  985 
  986                 ADMSW_CDRXLSYNC(sc, i, 
  987                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  988 
  989                 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
  990                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  991 
  992                 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
  993                         ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
  994                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  995                 else {
  996                         ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
  997                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  998                         /* We've fallen behind the chip: catch it. */
  999 #if 0
 1000                         device_printf(sc->sc_dev, 
 1001                            "RX ring resync, base=%x, work=%x, %d -> %d\n",
 1002                             REG_READ(RECV_LBADDR_REG),
 1003                             REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i);
 1004 #endif
 1005                         sc->sc_rxptr = i;
 1006                         /* ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); */
 1007                 }
 1008         }
 1009 #endif
 1010         for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) {
 1011                 ds = &sc->sc_rxlsoft[i];
 1012 
 1013                 ADMSW_CDRXLSYNC(sc, i, 
 1014                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1015 
 1016                 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) {
 1017                         ADMSW_CDRXLSYNC(sc, i, 
 1018                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1019                         break;
 1020                 }
 1021 
 1022                 /* printf("process slot %d\n",i); */
 1023 
 1024                 bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap,
 1025                     BUS_DMASYNC_POSTREAD);
 1026 
 1027                 stat = sc->sc_rxldescs[i].status;
 1028                 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT;
 1029                 len -= ETHER_CRC_LEN;
 1030                 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT;
 1031 
 1032                 for (vlan = 0; vlan < SW_DEVS; vlan++)
 1033                         if ((1 << port) & vlan_matrix[vlan])
 1034                                 break;
 1035 
 1036                 if (vlan == SW_DEVS)
 1037                         vlan = 0;
 1038 
 1039                 ifp = sc->sc_ifnet[vlan];
 1040 
 1041                 m = ds->ds_mbuf;
 1042                 if (admsw_add_rxlbuf(sc, i) != 0) {
 1043                         ifp->if_ierrors++;
 1044                         ADMSW_INIT_RXLDESC(sc, i);
 1045                         bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap,
 1046                             BUS_DMASYNC_PREREAD);
 1047                         continue;
 1048                 }
 1049 
 1050                 m->m_pkthdr.rcvif = ifp;
 1051                 m->m_pkthdr.len = m->m_len = len;
 1052                 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) {
 1053                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1054                         if (!(stat & ADM5120_DMA_CSUMFAIL))
 1055                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1056                 }
 1057 
 1058                 BPF_MTAP(ifp, m);
 1059 
 1060                 /* Pass it on. */
 1061                 (*ifp->if_input)(ifp, m);
 1062                 ifp->if_ipackets++;
 1063         }
 1064 
 1065         /* Update the receive pointer. */
 1066         sc->sc_rxptr = i;
 1067 }
 1068 
 1069 /*
 1070  * admsw_init:          [ifnet interface function]
 1071  *
 1072  *      Initialize the interface.
 1073  */
 1074 static void
 1075 admsw_init(void *xsc)
 1076 {
 1077         struct admsw_softc *sc = xsc;
 1078         struct ifnet *ifp;
 1079         int i;
 1080 
 1081         for (i = 0; i < SW_DEVS; i++) {
 1082                 ifp = sc->sc_ifnet[i];
 1083                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 1084                         if (sc->ndevs == 0) {
 1085                                 admsw_init_bufs(sc);
 1086                                 admsw_reset(sc);
 1087                                 REG_WRITE(CPUP_CONF_REG,
 1088                                     CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
 1089                                     CPUP_CONF_DMCP_MASK);
 1090                                 /* clear all pending interrupts */
 1091                                 REG_WRITE(ADMSW_INT_ST, INT_MASK);
 1092 
 1093                                 /* enable needed interrupts */
 1094                                 REG_WRITE(ADMSW_INT_MASK, 
 1095                                     REG_READ(ADMSW_INT_MASK) & 
 1096                                     ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | 
 1097                                         ADMSW_INTR_RHD | ADMSW_INTR_RLD | 
 1098                                         ADMSW_INTR_HDF | ADMSW_INTR_LDF));
 1099                         }
 1100                         sc->ndevs++;
 1101                 }
 1102 
 1103 
 1104                 /* mark iface as running */
 1105                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1106                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1107         }
 1108 
 1109         /* Set the receive filter. */
 1110         admsw_set_filter(sc);
 1111 }
 1112 
 1113 /*
 1114  * admsw_stop:          [ifnet interface function]
 1115  *
 1116  *      Stop transmission on the interface.
 1117  */
 1118 static void
 1119 admsw_stop(struct ifnet *ifp, int disable)
 1120 {
 1121         struct admsw_softc *sc = ifp->if_softc;
 1122 
 1123         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1124                 return;
 1125 
 1126         if (--sc->ndevs == 0) {
 1127                 /* printf("debug: de-initializing hardware\n"); */
 1128 
 1129                 /* disable cpu port */
 1130                 REG_WRITE(CPUP_CONF_REG,
 1131                                 CPUP_CONF_DCPUP | CPUP_CONF_CRCP |
 1132                                 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK);
 1133 
 1134                 /* XXX We should disable, then clear? --dyoung */
 1135                 /* clear all pending interrupts */
 1136                 REG_WRITE(ADMSW_INT_ST, INT_MASK);
 1137 
 1138                 /* disable interrupts */
 1139                 REG_WRITE(ADMSW_INT_MASK, INT_MASK);
 1140         }
 1141 
 1142         /* Mark the interface as down and cancel the watchdog timer. */
 1143         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1144         ifp->if_timer = 0;
 1145 
 1146         return;
 1147 }
 1148 
 1149 /*
 1150  * admsw_set_filter:
 1151  *
 1152  *      Set up the receive filter.
 1153  */
 1154 static void
 1155 admsw_set_filter(struct admsw_softc *sc)
 1156 {
 1157         int i;
 1158         uint32_t allmc, anymc, conf, promisc;
 1159         struct ifnet *ifp;
 1160         struct ifmultiaddr *ifma;
 1161 
 1162         /* Find which ports should be operated in promisc mode. */
 1163         allmc = anymc = promisc = 0;
 1164         for (i = 0; i < SW_DEVS; i++) {
 1165                 ifp = sc->sc_ifnet[i];
 1166                 if (ifp->if_flags & IFF_PROMISC)
 1167                         promisc |= vlan_matrix[i];
 1168 
 1169                 ifp->if_flags &= ~IFF_ALLMULTI;
 1170 
 1171                 if_maddr_rlock(ifp);
 1172                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
 1173                 {
 1174                         if (ifma->ifma_addr->sa_family != AF_LINK)
 1175                                 continue;
 1176 
 1177                         anymc |= vlan_matrix[i];
 1178                 }
 1179                 if_maddr_runlock(ifp);
 1180         }
 1181 
 1182         conf = REG_READ(CPUP_CONF_REG);
 1183         /* 1 Disable forwarding of unknown & multicast packets to
 1184          *   CPU on all ports.
 1185          * 2 Enable forwarding of unknown & multicast packets to
 1186          *   CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set.
 1187          */
 1188         conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK;
 1189         /* Enable forwarding of unknown packets to CPU on selected ports. */
 1190         conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK);
 1191         conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
 1192         conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
 1193         REG_WRITE(CPUP_CONF_REG, conf);
 1194 }
 1195 
 1196 /*
 1197  * admsw_add_rxbuf:
 1198  *
 1199  *      Add a receive buffer to the indicated descriptor.
 1200  */
 1201 int
 1202 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high)
 1203 {
 1204         struct admsw_descsoft *ds;
 1205         struct mbuf *m;
 1206         int error;
 1207 
 1208         if (high)
 1209                 ds = &sc->sc_rxhsoft[idx];
 1210         else
 1211                 ds = &sc->sc_rxlsoft[idx];
 1212 
 1213         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1214         if (m == NULL)
 1215                 return (ENOBUFS);
 1216 
 1217         MCLGET(m, M_DONTWAIT);
 1218         if ((m->m_flags & M_EXT) == 0) {
 1219                 m_freem(m);
 1220                 return (ENOBUFS);
 1221         }
 1222 
 1223         if (ds->ds_mbuf != NULL)
 1224                 bus_dmamap_unload(sc->sc_bufs_dmat, ds->ds_dmamap);
 1225 
 1226         ds->ds_mbuf = m;
 1227 
 1228         error = bus_dmamap_load(sc->sc_bufs_dmat, ds->ds_dmamap,
 1229             m->m_ext.ext_buf, m->m_ext.ext_size, admsw_rxbuf_map_addr, 
 1230             ds, BUS_DMA_NOWAIT);
 1231         if (error) {
 1232                 device_printf(sc->sc_dev, 
 1233                     "can't load rx DMA map %d, error = %d\n", idx, error);
 1234                 panic("admsw_add_rxbuf");       /* XXX */
 1235         }
 1236 
 1237         bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap, BUS_DMASYNC_PREREAD);
 1238 
 1239         if (high)
 1240                 ADMSW_INIT_RXHDESC(sc, idx);
 1241         else
 1242                 ADMSW_INIT_RXLDESC(sc, idx);
 1243 
 1244         return (0);
 1245 }
 1246 
 1247 int
 1248 admsw_mediachange(struct ifnet *ifp)
 1249 {
 1250         struct admsw_softc *sc = ifp->if_softc;
 1251         int port = 0;
 1252         struct ifmedia *ifm;
 1253         int old, new, val;
 1254 
 1255         while(port < SW_DEVS) {
 1256                 if(ifp == sc->sc_ifnet[port])
 1257                         break;
 1258                 else
 1259                         port++;
 1260         }
 1261 
 1262         ifm = &sc->sc_ifmedia[port];
 1263 
 1264         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 1265                 return (EINVAL);
 1266 
 1267         if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
 1268                 val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX;
 1269         } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
 1270                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1271                         val = PHY_CNTL2_100M|PHY_CNTL2_FDX;
 1272                 else
 1273                         val = PHY_CNTL2_100M;
 1274         } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
 1275                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1276                         val = PHY_CNTL2_FDX;
 1277                 else
 1278                         val = 0;
 1279         } else
 1280                 return (EINVAL);
 1281 
 1282         old = REG_READ(PHY_CNTL2_REG);
 1283         new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port);
 1284         new |= (val << port);
 1285 
 1286         if (new != old)
 1287                 REG_WRITE(PHY_CNTL2_REG, new);
 1288 
 1289         return (0);
 1290 }
 1291 
 1292 void
 1293 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1294 {
 1295         struct admsw_softc *sc = ifp->if_softc;
 1296         int port = 0;
 1297         int status;
 1298 
 1299         while(port < SW_DEVS) {
 1300                 if(ifp == sc->sc_ifnet[port])
 1301                         break;
 1302                 else
 1303                         port++;
 1304         }
 1305 
 1306         ifmr->ifm_status = IFM_AVALID;
 1307         ifmr->ifm_active = IFM_ETHER;
 1308 
 1309         status = REG_READ(PHY_ST_REG) >> port;
 1310 
 1311         if ((status & PHY_ST_LINKUP) == 0) {
 1312                 ifmr->ifm_active |= IFM_NONE;
 1313                 return;
 1314         }
 1315 
 1316         ifmr->ifm_status |= IFM_ACTIVE;
 1317         ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T;
 1318         if (status & PHY_ST_FDX)
 1319                 ifmr->ifm_active |= IFM_FDX;
 1320 }
 1321 
 1322 static device_method_t admsw_methods[] = {
 1323         /* Device interface */
 1324         DEVMETHOD(device_probe,         admsw_probe),
 1325         DEVMETHOD(device_attach,        admsw_attach),
 1326         DEVMETHOD(device_detach,        admsw_detach),
 1327         DEVMETHOD(device_shutdown,      admsw_shutdown),
 1328 
 1329         { 0, 0 }
 1330 };
 1331 
 1332 static devclass_t admsw_devclass;
 1333 
 1334 static driver_t admsw_driver = {
 1335         "admsw",
 1336         admsw_methods,
 1337         sizeof(struct admsw_softc),
 1338 };
 1339 
 1340 DRIVER_MODULE(admsw, obio, admsw_driver, admsw_devclass, 0, 0);
 1341 MODULE_DEPEND(admsw, ether, 1, 1, 1);

Cache object: 6170a67b81fc969369ab35902cafe097


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.