The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/adm5120/if_admsw.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* $NetBSD: if_admsw.c,v 1.3 2007/04/22 19:26:25 dyoung Exp $ */
    2 
    3 /*-
    4  * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or
    8  * without modification, are permitted provided that the following
    9  * conditions are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above
   13  *    copyright notice, this list of conditions and the following
   14  *    disclaimer in the documentation and/or other materials provided
   15  *    with the distribution.
   16  * 3. The names of the authors may not be used to endorse or promote
   17  *    products derived from this software without specific prior
   18  *    written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
   21  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
   22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
   23  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
   25  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   26  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
   27  * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
   29  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
   31  * OF SUCH DAMAGE.
   32  */
   33 /*
   34  * Copyright (c) 2001 Wasabi Systems, Inc.
   35  * All rights reserved.
   36  *
   37  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
   38  *
   39  * Redistribution and use in source and binary forms, with or without
   40  * modification, are permitted provided that the following conditions
   41  * are met:
   42  * 1. Redistributions of source code must retain the above copyright
   43  *    notice, this list of conditions and the following disclaimer.
   44  * 2. Redistributions in binary form must reproduce the above copyright
   45  *    notice, this list of conditions and the following disclaimer in the
   46  *    documentation and/or other materials provided with the distribution.
   47  * 3. All advertising materials mentioning features or use of this software
   48  *    must display the following acknowledgement:
   49  *      This product includes software developed for the NetBSD Project by
   50  *      Wasabi Systems, Inc.
   51  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   52  *    or promote products derived from this software without specific prior
   53  *    written permission.
   54  *
   55  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   57  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   58  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   59  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   60  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   61  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   62  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   63  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   64  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   65  * POSSIBILITY OF SUCH DAMAGE.
   66  */
   67 
   68 /*
   69  * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media
   70  * Access Controller.
   71  *
   72  * TODO:
   73  *
   74  *      Better Rx buffer management; we want to get new Rx buffers
   75  *      to the chip more quickly than we currently do.
   76  */
   77 
   78 #include <sys/cdefs.h>
   79 __FBSDID("$FreeBSD: releng/11.2/sys/mips/adm5120/if_admsw.c 331722 2018-03-29 02:50:57Z eadler $");
   80 
   81 #include <sys/param.h>
   82 #include <sys/systm.h>
   83 #include <sys/bus.h>
   84 #include <sys/kernel.h>
   85 #include <sys/mbuf.h>
   86 #include <sys/malloc.h>
   87 #include <sys/module.h>
   88 #include <sys/rman.h>
   89 #include <sys/socket.h>
   90 #include <sys/sockio.h>
   91 #include <sys/sysctl.h>
   92 #include <machine/bus.h>
   93 
   94 #include <net/ethernet.h>
   95 #include <net/if.h>
   96 #include <net/if_arp.h>
   97 #include <net/if_dl.h>
   98 #include <net/if_media.h>
   99 #include <net/if_mib.h>
  100 #include <net/if_types.h>
  101 #include <net/if_var.h>
  102 
  103 #ifdef INET
  104 #include <netinet/in.h>
  105 #include <netinet/in_systm.h>
  106 #include <netinet/in_var.h>
  107 #include <netinet/ip.h>
  108 #endif
  109 
  110 #include <net/bpf.h>
  111 #include <net/bpfdesc.h>
  112 
  113 #include <mips/adm5120/adm5120reg.h>
  114 #include <mips/adm5120/if_admswreg.h>
  115 #include <mips/adm5120/if_admswvar.h>
  116 
  117 /* TODO: add locking */
  118 #define ADMSW_LOCK(sc) do {} while(0);
  119 #define ADMSW_UNLOCK(sc) do {} while(0);
  120 
  121 static uint8_t vlan_matrix[SW_DEVS] = {
  122         (1 << 6) | (1 << 0),            /* CPU + port0 */
  123         (1 << 6) | (1 << 1),            /* CPU + port1 */
  124         (1 << 6) | (1 << 2),            /* CPU + port2 */
  125         (1 << 6) | (1 << 3),            /* CPU + port3 */
  126         (1 << 6) | (1 << 4),            /* CPU + port4 */
  127         (1 << 6) | (1 << 5),            /* CPU + port5 */
  128 };
  129 
  130 /* ifnet entry points */
  131 static void     admsw_start(struct ifnet *);
  132 static void     admsw_watchdog(void *);
  133 static int      admsw_ioctl(struct ifnet *, u_long, caddr_t);
  134 static void     admsw_init(void *);
  135 static void     admsw_stop(struct ifnet *, int);
  136 
  137 static void     admsw_reset(struct admsw_softc *);
  138 static void     admsw_set_filter(struct admsw_softc *);
  139 
  140 static void     admsw_txintr(struct admsw_softc *, int);
  141 static void     admsw_rxintr(struct admsw_softc *, int);
  142 static int      admsw_add_rxbuf(struct admsw_softc *, int, int);
  143 #define admsw_add_rxhbuf(sc, idx)       admsw_add_rxbuf(sc, idx, 1)
  144 #define admsw_add_rxlbuf(sc, idx)       admsw_add_rxbuf(sc, idx, 0)
  145 
  146 static int      admsw_mediachange(struct ifnet *);
  147 static void     admsw_mediastatus(struct ifnet *, struct ifmediareq *);
  148 
  149 static int      admsw_intr(void *);
  150 
  151 /* bus entry points */
  152 static int      admsw_probe(device_t dev);
  153 static int      admsw_attach(device_t dev);
  154 static int      admsw_detach(device_t dev);
  155 static int      admsw_shutdown(device_t dev);
  156 
  157 static void
  158 admsw_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  159 {
  160         uint32_t *addr;
  161 
  162         if (error)
  163                 return;
  164 
  165         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
  166         addr = arg;
  167         *addr = segs->ds_addr;
  168 }
  169 
  170 static void
  171 admsw_rxbuf_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  172 {
  173         struct admsw_descsoft *ds;
  174 
  175         if (error)
  176                 return;
  177 
  178         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
  179 
  180         ds = arg;
  181         ds->ds_nsegs = nseg;
  182         ds->ds_addr[0] = segs[0].ds_addr;
  183         ds->ds_len[0] = segs[0].ds_len;
  184 
  185 }
  186 
  187 static void
  188 admsw_mbuf_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, 
  189     bus_size_t mapsize, int error)
  190 {
  191         struct admsw_descsoft *ds;
  192 
  193         if (error)
  194                 return;
  195 
  196         ds = arg;
  197 
  198         if((nseg != 1) && (nseg != 2))
  199                 panic("%s: nseg == %d\n", __func__, nseg);
  200 
  201         ds->ds_nsegs = nseg;
  202         ds->ds_addr[0] = segs[0].ds_addr;
  203         ds->ds_len[0] = segs[0].ds_len;
  204 
  205         if(nseg > 1) {
  206                 ds->ds_addr[1] = segs[1].ds_addr;
  207                 ds->ds_len[1] = segs[1].ds_len;
  208         }
  209 }
  210 
  211 
  212 
  213 static int
  214 admsw_probe(device_t dev)
  215 {
  216 
  217         device_set_desc(dev, "ADM5120 Switch Engine");
  218         return (0);
  219 }
  220 
  221 #define REG_READ(o)     bus_read_4((sc)->mem_res, (o))
  222 #define REG_WRITE(o,v)  bus_write_4((sc)->mem_res, (o),(v))
  223 
  224 static void
  225 admsw_init_bufs(struct admsw_softc *sc)
  226 {
  227         int i;
  228         struct admsw_desc *desc;
  229 
  230         for (i = 0; i < ADMSW_NTXHDESC; i++) {
  231                 if (sc->sc_txhsoft[i].ds_mbuf != NULL) {
  232                         m_freem(sc->sc_txhsoft[i].ds_mbuf);
  233                         sc->sc_txhsoft[i].ds_mbuf = NULL;
  234                 }
  235                 desc = &sc->sc_txhdescs[i];
  236                 desc->data = 0;
  237                 desc->cntl = 0;
  238                 desc->len = MAC_BUFLEN;
  239                 desc->status = 0;
  240                 ADMSW_CDTXHSYNC(sc, i,
  241                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  242         }
  243         sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND;
  244         ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1,
  245             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  246 
  247         for (i = 0; i < ADMSW_NRXHDESC; i++) {
  248                 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) {
  249                         if (admsw_add_rxhbuf(sc, i) != 0)
  250                                 panic("admsw_init_bufs\n");
  251                 } else
  252                         ADMSW_INIT_RXHDESC(sc, i);
  253         }
  254 
  255         for (i = 0; i < ADMSW_NTXLDESC; i++) {
  256                 if (sc->sc_txlsoft[i].ds_mbuf != NULL) {
  257                         m_freem(sc->sc_txlsoft[i].ds_mbuf);
  258                         sc->sc_txlsoft[i].ds_mbuf = NULL;
  259                 }
  260                 desc = &sc->sc_txldescs[i];
  261                 desc->data = 0;
  262                 desc->cntl = 0;
  263                 desc->len = MAC_BUFLEN;
  264                 desc->status = 0;
  265                 ADMSW_CDTXLSYNC(sc, i,
  266                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  267         }
  268         sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND;
  269         ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1,
  270             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  271 
  272         for (i = 0; i < ADMSW_NRXLDESC; i++) {
  273                 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) {
  274                         if (admsw_add_rxlbuf(sc, i) != 0)
  275                                 panic("admsw_init_bufs\n");
  276                 } else
  277                         ADMSW_INIT_RXLDESC(sc, i);
  278         }
  279 
  280         REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0));
  281         REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0));
  282         REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0));
  283         REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0));
  284 
  285         sc->sc_txfree = ADMSW_NTXLDESC;
  286         sc->sc_txnext = 0;
  287         sc->sc_txdirty = 0;
  288         sc->sc_rxptr = 0;
  289 }
  290 
  291 static void
  292 admsw_setvlan(struct admsw_softc *sc, char matrix[6])
  293 {
  294         uint32_t i;
  295 
  296         i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24);
  297         REG_WRITE(VLAN_G1_REG, i);
  298         i = matrix[4] + (matrix[5] << 8);
  299         REG_WRITE(VLAN_G2_REG, i);
  300 }
  301 
  302 static void
  303 admsw_reset(struct admsw_softc *sc)
  304 {
  305         uint32_t wdog1;
  306         int i;
  307 
  308         REG_WRITE(PORT_CONF0_REG,
  309             REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK);
  310         REG_WRITE(CPUP_CONF_REG,
  311             REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP);
  312 
  313         /* Wait for DMA to complete.  Overkill.  In 3ms, we can
  314          * send at least two entire 1500-byte packets at 10 Mb/s.
  315          */
  316         DELAY(3000);
  317 
  318         /* The datasheet recommends that we move all PHYs to reset
  319          * state prior to software reset.
  320          */
  321         REG_WRITE(PHY_CNTL2_REG,
  322             REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK);
  323 
  324         /* Reset the switch. */
  325         REG_WRITE(ADMSW_SW_RES, 0x1);
  326 
  327         DELAY(100 * 1000);
  328 
  329         REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO);
  330 
  331         /* begin old code */
  332         REG_WRITE(CPUP_CONF_REG,
  333             CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
  334             CPUP_CONF_DMCP_MASK);
  335 
  336         REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK);
  337 
  338         REG_WRITE(PHY_CNTL2_REG,
  339             REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK |
  340             PHY_CNTL2_AMDIX_MASK);
  341 
  342         REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT);
  343 
  344         REG_WRITE(ADMSW_INT_MASK, INT_MASK);
  345         REG_WRITE(ADMSW_INT_ST, INT_MASK);
  346 
  347         /*
  348          * While in DDB, we stop servicing interrupts, RX ring
  349          * fills up and when free block counter falls behind FC
  350          * threshold, the switch starts to emit 802.3x PAUSE
  351          * frames.  This can upset peer switches.
  352          *
  353          * Stop this from happening by disabling FC and D2
  354          * thresholds.
  355          */
  356         REG_WRITE(FC_TH_REG,
  357             REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK));
  358 
  359         admsw_setvlan(sc, vlan_matrix);
  360 
  361         for (i = 0; i < SW_DEVS; i++) {
  362                 REG_WRITE(MAC_WT1_REG,
  363                     sc->sc_enaddr[2] |
  364                     (sc->sc_enaddr[3]<<8) |
  365                     (sc->sc_enaddr[4]<<16) |
  366                     ((sc->sc_enaddr[5]+i)<<24));
  367                 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) |
  368                     (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) |
  369                     MAC_WT0_WRITE | MAC_WT0_VLANID_EN);
  370 
  371                 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE));
  372         }
  373 
  374         wdog1 = REG_READ(ADM5120_WDOG1);
  375         REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE);
  376 }
  377 
  378 static int
  379 admsw_attach(device_t dev)
  380 {
  381         uint8_t enaddr[ETHER_ADDR_LEN];
  382         struct admsw_softc *sc = (struct admsw_softc *) device_get_softc(dev);
  383         struct ifnet *ifp;
  384         int error, i, rid;
  385 
  386         sc->sc_dev = dev;
  387         device_printf(dev, "ADM5120 Switch Engine, %d ports\n", SW_DEVS);
  388         sc->ndevs = 0;
  389 
  390         /* XXXMIPS: fix it */
  391         enaddr[0] = 0x00;
  392         enaddr[1] = 0x0C;
  393         enaddr[2] = 0x42;
  394         enaddr[3] = 0x07;
  395         enaddr[4] = 0xB2;
  396         enaddr[5] = 0x4E;
  397 
  398         memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr));
  399 
  400         device_printf(sc->sc_dev, "base Ethernet address %s\n",
  401             ether_sprintf(enaddr));
  402         callout_init(&sc->sc_watchdog, 1);
  403 
  404         rid = 0;
  405         if ((sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 
  406             RF_ACTIVE)) == NULL) {
  407                 device_printf(dev, "unable to allocate memory resource\n");
  408                 return (ENXIO);
  409         }
  410 
  411         /* Hook up the interrupt handler. */
  412         rid = 0;
  413         if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 
  414             RF_SHAREABLE | RF_ACTIVE)) == NULL) {
  415                 device_printf(dev, "unable to allocate IRQ resource\n");
  416                 return (ENXIO);
  417         }
  418 
  419         if ((error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET, 
  420             admsw_intr, NULL, sc, &sc->sc_ih)) != 0) {
  421                 device_printf(dev, 
  422                     "WARNING: unable to register interrupt handler\n");
  423                 return (error);
  424         }
  425 
  426         /*
  427          * Allocate the control data structures, and create and load the
  428          * DMA map for it.
  429          */
  430         if ((error = bus_dma_tag_create(NULL, 4, 0, 
  431             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
  432             NULL, NULL, sizeof(struct admsw_control_data), 1,
  433             sizeof(struct admsw_control_data), 0, NULL, NULL, 
  434             &sc->sc_control_dmat)) != 0) {
  435                 device_printf(sc->sc_dev, 
  436                     "unable to create control data DMA map, error = %d\n", 
  437                     error);
  438                 return (error);
  439         }
  440 
  441         if ((error = bus_dmamem_alloc(sc->sc_control_dmat,
  442             (void **)&sc->sc_control_data, BUS_DMA_NOWAIT, 
  443             &sc->sc_cddmamap)) != 0) {
  444                 device_printf(sc->sc_dev, 
  445                     "unable to allocate control data, error = %d\n", error);
  446                 return (error);
  447         }
  448 
  449         if ((error = bus_dmamap_load(sc->sc_control_dmat, sc->sc_cddmamap,
  450             sc->sc_control_data, sizeof(struct admsw_control_data), 
  451             admsw_dma_map_addr, &sc->sc_cddma, 0)) != 0) {
  452                 device_printf(sc->sc_dev, 
  453                     "unable to load control data DMA map, error = %d\n", error);
  454                 return (error);
  455         }
  456 
  457         /*
  458          * Create the transmit buffer DMA maps.
  459          */
  460         if ((error = bus_dma_tag_create(NULL, 1, 0, 
  461             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
  462             NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, 
  463             &sc->sc_bufs_dmat)) != 0) {
  464                 device_printf(sc->sc_dev, 
  465                     "unable to create control data DMA map, error = %d\n", 
  466                     error);
  467                 return (error);
  468         }
  469 
  470         for (i = 0; i < ADMSW_NTXHDESC; i++) {
  471                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
  472                     &sc->sc_txhsoft[i].ds_dmamap)) != 0) {
  473                         device_printf(sc->sc_dev, 
  474                             "unable to create txh DMA map %d, error = %d\n", 
  475                             i, error);
  476                         return (error);
  477                 }
  478                 sc->sc_txhsoft[i].ds_mbuf = NULL;
  479         }
  480 
  481         for (i = 0; i < ADMSW_NTXLDESC; i++) {
  482                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
  483                     &sc->sc_txlsoft[i].ds_dmamap)) != 0) {
  484                         device_printf(sc->sc_dev, 
  485                             "unable to create txl DMA map %d, error = %d\n", 
  486                             i, error);
  487                         return (error);
  488                 }
  489                 sc->sc_txlsoft[i].ds_mbuf = NULL;
  490         }
  491 
  492         /*
  493          * Create the receive buffer DMA maps.
  494          */
  495         for (i = 0; i < ADMSW_NRXHDESC; i++) {
  496                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0, 
  497                      &sc->sc_rxhsoft[i].ds_dmamap)) != 0) {
  498                         device_printf(sc->sc_dev, 
  499                             "unable to create rxh DMA map %d, error = %d\n", 
  500                             i, error);
  501                         return (error);
  502                 }
  503                 sc->sc_rxhsoft[i].ds_mbuf = NULL;
  504         }
  505 
  506         for (i = 0; i < ADMSW_NRXLDESC; i++) {
  507                 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
  508                     &sc->sc_rxlsoft[i].ds_dmamap)) != 0) {
  509                         device_printf(sc->sc_dev, 
  510                             "unable to create rxl DMA map %d, error = %d\n",
  511                             i, error);
  512                         return (error);
  513                 }
  514                 sc->sc_rxlsoft[i].ds_mbuf = NULL;
  515         }
  516 
  517         admsw_init_bufs(sc);
  518         admsw_reset(sc);
  519 
  520         for (i = 0; i < SW_DEVS; i++) {
  521                 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, 
  522                     admsw_mediastatus);
  523                 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL);
  524                 ifmedia_add(&sc->sc_ifmedia[i], 
  525                     IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
  526                 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL);
  527                 ifmedia_add(&sc->sc_ifmedia[i], 
  528                     IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
  529                 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL);
  530                 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO);
  531 
  532                 ifp = sc->sc_ifnet[i] = if_alloc(IFT_ETHER);
  533 
  534                 /* Setup interface parameters */
  535                 ifp->if_softc = sc;
  536                 if_initname(ifp, device_get_name(dev), i);
  537                 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  538                 ifp->if_ioctl = admsw_ioctl;
  539                 ifp->if_output = ether_output;
  540                 ifp->if_start = admsw_start;
  541                 ifp->if_init = admsw_init;
  542                 ifp->if_mtu = ETHERMTU;
  543                 ifp->if_baudrate = IF_Mbps(100);
  544                 IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, ifqmaxlen));
  545                 ifp->if_snd.ifq_drv_maxlen = max(ADMSW_NTXLDESC, ifqmaxlen);
  546                 IFQ_SET_READY(&ifp->if_snd);
  547                 ifp->if_capabilities |= IFCAP_VLAN_MTU;
  548 
  549                 /* Attach the interface. */
  550                 ether_ifattach(ifp, enaddr);
  551                 enaddr[5]++;
  552         }
  553 
  554         /* XXX: admwdog_attach(sc); */
  555 
  556         /* leave interrupts and cpu port disabled */
  557         return (0);
  558 }
  559 
  560 static int
  561 admsw_detach(device_t dev)
  562 {
  563 
  564         printf("TODO: DETACH\n");
  565         return (0);
  566 }
  567 
  568 /*
  569  * admsw_shutdown:
  570  *
  571  *      Make sure the interface is stopped at reboot time.
  572  */
  573 static int
  574 admsw_shutdown(device_t dev)
  575 {
  576         struct admsw_softc *sc;
  577         int i;
  578 
  579         sc = device_get_softc(dev);
  580         for (i = 0; i < SW_DEVS; i++)
  581                 admsw_stop(sc->sc_ifnet[i], 1);
  582 
  583         return (0);
  584 }
  585 
  586 /*
  587  * admsw_start:         [ifnet interface function]
  588  *
  589  *      Start packet transmission on the interface.
  590  */
  591 static void
  592 admsw_start(struct ifnet *ifp)
  593 {
  594         struct admsw_softc *sc = ifp->if_softc;
  595         struct mbuf *m0, *m;
  596         struct admsw_descsoft *ds;
  597         struct admsw_desc *desc;
  598         bus_dmamap_t dmamap;
  599         struct ether_header *eh;
  600         int error, nexttx, len, i;
  601         static int vlan = 0;
  602 
  603         /*
  604          * Loop through the send queues, setting up transmit descriptors
  605          * unitl we drain the queues, or use up all available transmit
  606          * descriptors.
  607          */
  608         for (;;) {
  609                 vlan++;
  610                 if (vlan == SW_DEVS)
  611                         vlan = 0;
  612                 i = vlan;
  613                 for (;;) {
  614                         ifp = sc->sc_ifnet[i];
  615                         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) 
  616                             == IFF_DRV_RUNNING) {
  617                                 /* Grab a packet off the queue. */
  618                                 IF_DEQUEUE(&ifp->if_snd, m0);
  619                                 if (m0 != NULL)
  620                                         break;
  621                         }
  622                         i++;
  623                         if (i == SW_DEVS)
  624                                 i = 0;
  625                         if (i == vlan)
  626                                 return;
  627                 }
  628                 vlan = i;
  629                 m = NULL;
  630 
  631                 /* Get a spare descriptor. */
  632                 if (sc->sc_txfree == 0) {
  633                         /* No more slots left; notify upper layer. */
  634                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  635                         break;
  636                 }
  637                 nexttx = sc->sc_txnext;
  638                 desc = &sc->sc_txldescs[nexttx];
  639                 ds = &sc->sc_txlsoft[nexttx];
  640                 dmamap = ds->ds_dmamap;
  641 
  642                 /*
  643                  * Load the DMA map.  If this fails, the packet either
  644                  * didn't fit in the alloted number of segments, or we
  645                  * were short on resources.  In this case, we'll copy
  646                  * and try again.
  647                  */
  648                 if (m0->m_pkthdr.len < ETHER_MIN_LEN ||
  649                     bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m0,
  650                     admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT) != 0) {
  651                         MGETHDR(m, M_NOWAIT, MT_DATA);
  652                         if (m == NULL) {
  653                                 device_printf(sc->sc_dev, 
  654                                     "unable to allocate Tx mbuf\n");
  655                                 break;
  656                         }
  657                         if (m0->m_pkthdr.len > MHLEN) {
  658                                 if (!(MCLGET(m, M_NOWAIT))) {
  659                                         device_printf(sc->sc_dev, 
  660                                             "unable to allocate Tx cluster\n");
  661                                         m_freem(m);
  662                                         break;
  663                                 }
  664                         }
  665                         m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
  666                         m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
  667                         m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
  668                         if (m->m_pkthdr.len < ETHER_MIN_LEN) {
  669                                 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len)
  670                                         panic("admsw_start: M_TRAILINGSPACE\n");
  671                                 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0,
  672                                     ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len);
  673                                 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN;
  674                         }
  675                         error = bus_dmamap_load_mbuf(sc->sc_bufs_dmat, 
  676                             dmamap, m, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT);
  677                         if (error) {
  678                                 device_printf(sc->sc_dev, 
  679                                     "unable to load Tx buffer, error = %d\n", 
  680                                     error);
  681                                 break;
  682                         }
  683                 }
  684 
  685                 if (m != NULL) {
  686                         m_freem(m0);
  687                         m0 = m;
  688                 }
  689 
  690                 /*
  691                  * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
  692                  */
  693 
  694                 /* Sync the DMA map. */
  695                 bus_dmamap_sync(sc->sc_bufs_dmat, dmamap, BUS_DMASYNC_PREWRITE);
  696 
  697                 if (ds->ds_nsegs != 1 && ds->ds_nsegs != 2)
  698                         panic("admsw_start: nsegs == %d\n", ds->ds_nsegs);
  699                 desc->data = ds->ds_addr[0];
  700                 desc->len = len = ds->ds_len[0];
  701                 if (ds->ds_nsegs > 1) {
  702                         len += ds->ds_len[1];
  703                         desc->cntl = ds->ds_addr[1] | ADM5120_DMA_BUF2ENABLE;
  704                 } else
  705                         desc->cntl = 0;
  706                 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan);
  707                 eh = mtod(m0, struct ether_header *);
  708                 if (ntohs(eh->ether_type) == ETHERTYPE_IP &&
  709                     m0->m_pkthdr.csum_flags & CSUM_IP)
  710                         desc->status |= ADM5120_DMA_CSUM;
  711                 if (nexttx == ADMSW_NTXLDESC - 1)
  712                         desc->data |= ADM5120_DMA_RINGEND;
  713                 desc->data |= ADM5120_DMA_OWN;
  714 
  715                 /* Sync the descriptor. */
  716                 ADMSW_CDTXLSYNC(sc, nexttx,
  717                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  718 
  719                 REG_WRITE(SEND_TRIG_REG, 1);
  720                 /* printf("send slot %d\n",nexttx); */
  721 
  722                 /*
  723                  * Store a pointer to the packet so we can free it later.
  724                  */
  725                 ds->ds_mbuf = m0;
  726 
  727                 /* Advance the Tx pointer. */
  728                 sc->sc_txfree--;
  729                 sc->sc_txnext = ADMSW_NEXTTXL(nexttx);
  730 
  731                 /* Pass the packet to any BPF listeners. */
  732                 BPF_MTAP(ifp, m0);
  733 
  734                 /* Set a watchdog timer in case the chip flakes out. */
  735                 sc->sc_timer = 5;
  736         }
  737 }
  738 
  739 /*
  740  * admsw_watchdog:      [ifnet interface function]
  741  *
  742  *      Watchdog timer handler.
  743  */
  744 static void
  745 admsw_watchdog(void *arg)
  746 {
  747         struct admsw_softc *sc = arg;
  748         struct ifnet *ifp;
  749         int vlan;
  750 
  751         callout_reset(&sc->sc_watchdog, hz, admsw_watchdog, sc);
  752         if (sc->sc_timer == 0 || --sc->sc_timer > 0)
  753                 return;
  754 
  755         /* Check if an interrupt was lost. */
  756         if (sc->sc_txfree == ADMSW_NTXLDESC) {
  757                 device_printf(sc->sc_dev, "watchdog false alarm\n");
  758                 return;
  759         }
  760         if (sc->sc_timer != 0)
  761                 device_printf(sc->sc_dev, "watchdog timer is %d!\n",  
  762                     sc->sc_timer);
  763         admsw_txintr(sc, 0);
  764         if (sc->sc_txfree == ADMSW_NTXLDESC) {
  765                 device_printf(sc->sc_dev, "tx IRQ lost (queue empty)\n");
  766                 return;
  767         }
  768         if (sc->sc_timer != 0) {
  769                 device_printf(sc->sc_dev, "tx IRQ lost (timer recharged)\n");
  770                 return;
  771         }
  772 
  773         device_printf(sc->sc_dev, "device timeout, txfree = %d\n",  
  774             sc->sc_txfree);
  775         for (vlan = 0; vlan < SW_DEVS; vlan++)
  776                 admsw_stop(sc->sc_ifnet[vlan], 0);
  777         admsw_init(sc);
  778 
  779         ifp = sc->sc_ifnet[0];
  780 
  781         /* Try to get more packets going. */
  782         admsw_start(ifp);
  783 }
  784 
  785 /*
  786  * admsw_ioctl:         [ifnet interface function]
  787  *
  788  *      Handle control requests from the operator.
  789  */
  790 static int
  791 admsw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  792 {
  793         struct admsw_softc *sc = ifp->if_softc;
  794         struct ifdrv *ifd;
  795         int error, port;
  796 
  797         ADMSW_LOCK(sc);
  798 
  799         switch (cmd) {
  800         case SIOCSIFMEDIA:
  801         case SIOCGIFMEDIA:
  802                 port = 0;
  803                 while(port < SW_DEVS)
  804                         if(ifp == sc->sc_ifnet[port])
  805                                  break;
  806                         else 
  807                                 port++;
  808                 if (port >= SW_DEVS)
  809                         error = EOPNOTSUPP;
  810                 else
  811                         error = ifmedia_ioctl(ifp, (struct ifreq *)data,
  812                             &sc->sc_ifmedia[port], cmd);
  813                 break;
  814 
  815         case SIOCGDRVSPEC:
  816         case SIOCSDRVSPEC:
  817                 ifd = (struct ifdrv *) data;
  818                 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) {
  819                         error = EINVAL;
  820                         break;
  821                 }
  822                 if (cmd == SIOCGDRVSPEC) {
  823                         error = copyout(vlan_matrix, ifd->ifd_data,
  824                             sizeof(vlan_matrix));
  825                 } else {
  826                         error = copyin(ifd->ifd_data, vlan_matrix,
  827                             sizeof(vlan_matrix));
  828                         admsw_setvlan(sc, vlan_matrix);
  829                 }
  830                 break;
  831 
  832         default:
  833                 error = ether_ioctl(ifp, cmd, data);
  834                 if (error == ENETRESET) {
  835                         /*
  836                          * Multicast list has changed; set the hardware filter
  837                          * accordingly.
  838                          */
  839                         admsw_set_filter(sc);
  840                         error = 0;
  841                 }
  842                 break;
  843         }
  844 
  845         /* Try to get more packets going. */
  846         admsw_start(ifp);
  847 
  848         ADMSW_UNLOCK(sc);
  849         return (error);
  850 }
  851 
  852 
  853 /*
  854  * admsw_intr:
  855  *
  856  *      Interrupt service routine.
  857  */
  858 static int
  859 admsw_intr(void *arg)
  860 {
  861         struct admsw_softc *sc = arg;
  862         uint32_t pending;
  863 
  864         pending = REG_READ(ADMSW_INT_ST);
  865         REG_WRITE(ADMSW_INT_ST, pending);
  866 
  867         if (sc->ndevs == 0)
  868                 return (FILTER_STRAY);
  869 
  870         if ((pending & ADMSW_INTR_RHD) != 0)
  871                 admsw_rxintr(sc, 1);
  872 
  873         if ((pending & ADMSW_INTR_RLD) != 0)
  874                 admsw_rxintr(sc, 0);
  875 
  876         if ((pending & ADMSW_INTR_SHD) != 0)
  877                 admsw_txintr(sc, 1);
  878 
  879         if ((pending & ADMSW_INTR_SLD) != 0)
  880                 admsw_txintr(sc, 0);
  881 
  882         return (FILTER_HANDLED);
  883 }
  884 
  885 /*
  886  * admsw_txintr:
  887  *
  888  *      Helper; handle transmit interrupts.
  889  */
  890 static void
  891 admsw_txintr(struct admsw_softc *sc, int prio)
  892 {
  893         struct ifnet *ifp;
  894         struct admsw_desc *desc;
  895         struct admsw_descsoft *ds;
  896         int i, vlan;
  897         int gotone = 0;
  898 
  899         /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
  900         for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC;
  901             i = ADMSW_NEXTTXL(i)) {
  902 
  903                 ADMSW_CDTXLSYNC(sc, i,
  904                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  905 
  906                 desc = &sc->sc_txldescs[i];
  907                 ds = &sc->sc_txlsoft[i];
  908                 if (desc->data & ADM5120_DMA_OWN) {
  909                         ADMSW_CDTXLSYNC(sc, i,
  910                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  911                         break;
  912                 }
  913 
  914                 bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap, 
  915                     BUS_DMASYNC_POSTWRITE);
  916                 bus_dmamap_unload(sc->sc_bufs_dmat, ds->ds_dmamap);
  917                 m_freem(ds->ds_mbuf);
  918                 ds->ds_mbuf = NULL;
  919 
  920                 vlan = ffs(desc->status & 0x3f) - 1;
  921                 if (vlan < 0 || vlan >= SW_DEVS)
  922                         panic("admsw_txintr: bad vlan\n");
  923                 ifp = sc->sc_ifnet[vlan];
  924                 gotone = 1;
  925                 /* printf("clear tx slot %d\n",i); */
  926 
  927                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
  928 
  929                 sc->sc_txfree++;
  930         }
  931 
  932         if (gotone) {
  933                 sc->sc_txdirty = i;
  934                 for (vlan = 0; vlan < SW_DEVS; vlan++)
  935                         sc->sc_ifnet[vlan]->if_drv_flags &= ~IFF_DRV_OACTIVE;
  936 
  937                 ifp = sc->sc_ifnet[0];
  938 
  939                 /* Try to queue more packets. */
  940                 admsw_start(ifp);
  941 
  942                 /*
  943                  * If there are no more pending transmissions,
  944                  * cancel the watchdog timer.
  945                  */
  946                 if (sc->sc_txfree == ADMSW_NTXLDESC)
  947                         sc->sc_timer = 0;
  948 
  949         }
  950 
  951         /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
  952 }
  953 
  954 /*
  955  * admsw_rxintr:
  956  *
  957  *      Helper; handle receive interrupts.
  958  */
  959 static void
  960 admsw_rxintr(struct admsw_softc *sc, int high)
  961 {
  962         struct ifnet *ifp;
  963         struct admsw_descsoft *ds;
  964         struct mbuf *m;
  965         uint32_t stat;
  966         int i, len, port, vlan;
  967 
  968         /* printf("rxintr\n"); */
  969 
  970         if (high)
  971                 panic("admsw_rxintr: high priority packet\n");
  972 
  973 #if 1
  974         ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
  975             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  976         if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
  977                 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
  978                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  979         else {
  980                 i = sc->sc_rxptr;
  981                 do {
  982                         ADMSW_CDRXLSYNC(sc, i, 
  983                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  984                         i = ADMSW_NEXTRXL(i);
  985                         /* the ring is empty, just return. */
  986                         if (i == sc->sc_rxptr)
  987                                 return;
  988                         ADMSW_CDRXLSYNC(sc, i, 
  989                             BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  990                 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN);
  991 
  992                 ADMSW_CDRXLSYNC(sc, i, 
  993                     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  994 
  995                 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
  996                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  997 
  998                 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
  999                         ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
 1000                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1001                 else {
 1002                         ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 
 1003                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1004                         /* We've fallen behind the chip: catch it. */
 1005 #if 0
 1006                         device_printf(sc->sc_dev, 
 1007                            "RX ring resync, base=%x, work=%x, %d -> %d\n",
 1008                             REG_READ(RECV_LBADDR_REG),
 1009                             REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i);
 1010 #endif
 1011                         sc->sc_rxptr = i;
 1012                         /* ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); */
 1013                 }
 1014         }
 1015 #endif
 1016         for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) {
 1017                 ds = &sc->sc_rxlsoft[i];
 1018 
 1019                 ADMSW_CDRXLSYNC(sc, i, 
 1020                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 1021 
 1022                 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) {
 1023                         ADMSW_CDRXLSYNC(sc, i, 
 1024                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 1025                         break;
 1026                 }
 1027 
 1028                 /* printf("process slot %d\n",i); */
 1029 
 1030                 bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap,
 1031                     BUS_DMASYNC_POSTREAD);
 1032 
 1033                 stat = sc->sc_rxldescs[i].status;
 1034                 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT;
 1035                 len -= ETHER_CRC_LEN;
 1036                 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT;
 1037 
 1038                 for (vlan = 0; vlan < SW_DEVS; vlan++)
 1039                         if ((1 << port) & vlan_matrix[vlan])
 1040                                 break;
 1041 
 1042                 if (vlan == SW_DEVS)
 1043                         vlan = 0;
 1044 
 1045                 ifp = sc->sc_ifnet[vlan];
 1046 
 1047                 m = ds->ds_mbuf;
 1048                 if (admsw_add_rxlbuf(sc, i) != 0) {
 1049                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 1050                         ADMSW_INIT_RXLDESC(sc, i);
 1051                         bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap,
 1052                             BUS_DMASYNC_PREREAD);
 1053                         continue;
 1054                 }
 1055 
 1056                 m->m_pkthdr.rcvif = ifp;
 1057                 m->m_pkthdr.len = m->m_len = len;
 1058                 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) {
 1059                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1060                         if (!(stat & ADM5120_DMA_CSUMFAIL))
 1061                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1062                 }
 1063 
 1064                 BPF_MTAP(ifp, m);
 1065 
 1066                 /* Pass it on. */
 1067                 (*ifp->if_input)(ifp, m);
 1068                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 1069         }
 1070 
 1071         /* Update the receive pointer. */
 1072         sc->sc_rxptr = i;
 1073 }
 1074 
 1075 /*
 1076  * admsw_init:          [ifnet interface function]
 1077  *
 1078  *      Initialize the interface.
 1079  */
 1080 static void
 1081 admsw_init(void *xsc)
 1082 {
 1083         struct admsw_softc *sc = xsc;
 1084         struct ifnet *ifp;
 1085         int i;
 1086 
 1087         for (i = 0; i < SW_DEVS; i++) {
 1088                 ifp = sc->sc_ifnet[i];
 1089                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 1090                         if (sc->ndevs == 0) {
 1091                                 admsw_init_bufs(sc);
 1092                                 admsw_reset(sc);
 1093                                 REG_WRITE(CPUP_CONF_REG,
 1094                                     CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
 1095                                     CPUP_CONF_DMCP_MASK);
 1096                                 /* clear all pending interrupts */
 1097                                 REG_WRITE(ADMSW_INT_ST, INT_MASK);
 1098 
 1099                                 /* enable needed interrupts */
 1100                                 REG_WRITE(ADMSW_INT_MASK, 
 1101                                     REG_READ(ADMSW_INT_MASK) & 
 1102                                     ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | 
 1103                                         ADMSW_INTR_RHD | ADMSW_INTR_RLD | 
 1104                                         ADMSW_INTR_HDF | ADMSW_INTR_LDF));
 1105 
 1106                                 callout_reset(&sc->sc_watchdog, hz,
 1107                                     admsw_watchdog, sc);
 1108                         }
 1109                         sc->ndevs++;
 1110                 }
 1111 
 1112 
 1113                 /* mark iface as running */
 1114                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1115                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1116         }
 1117 
 1118         /* Set the receive filter. */
 1119         admsw_set_filter(sc);
 1120 }
 1121 
 1122 /*
 1123  * admsw_stop:          [ifnet interface function]
 1124  *
 1125  *      Stop transmission on the interface.
 1126  */
 1127 static void
 1128 admsw_stop(struct ifnet *ifp, int disable)
 1129 {
 1130         struct admsw_softc *sc = ifp->if_softc;
 1131 
 1132         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1133                 return;
 1134 
 1135         if (--sc->ndevs == 0) {
 1136                 /* printf("debug: de-initializing hardware\n"); */
 1137 
 1138                 /* disable cpu port */
 1139                 REG_WRITE(CPUP_CONF_REG,
 1140                                 CPUP_CONF_DCPUP | CPUP_CONF_CRCP |
 1141                                 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK);
 1142 
 1143                 /* XXX We should disable, then clear? --dyoung */
 1144                 /* clear all pending interrupts */
 1145                 REG_WRITE(ADMSW_INT_ST, INT_MASK);
 1146 
 1147                 /* disable interrupts */
 1148                 REG_WRITE(ADMSW_INT_MASK, INT_MASK);
 1149 
 1150                 /* Cancel the watchdog timer. */
 1151                 sc->sc_timer = 0;
 1152                 callout_stop(&sc->sc_watchdog);
 1153         }
 1154 
 1155         /* Mark the interface as down. */
 1156         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1157 
 1158         return;
 1159 }
 1160 
 1161 /*
 1162  * admsw_set_filter:
 1163  *
 1164  *      Set up the receive filter.
 1165  */
 1166 static void
 1167 admsw_set_filter(struct admsw_softc *sc)
 1168 {
 1169         int i;
 1170         uint32_t allmc, anymc, conf, promisc;
 1171         struct ifnet *ifp;
 1172         struct ifmultiaddr *ifma;
 1173 
 1174         /* Find which ports should be operated in promisc mode. */
 1175         allmc = anymc = promisc = 0;
 1176         for (i = 0; i < SW_DEVS; i++) {
 1177                 ifp = sc->sc_ifnet[i];
 1178                 if (ifp->if_flags & IFF_PROMISC)
 1179                         promisc |= vlan_matrix[i];
 1180 
 1181                 ifp->if_flags &= ~IFF_ALLMULTI;
 1182 
 1183                 if_maddr_rlock(ifp);
 1184                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
 1185                 {
 1186                         if (ifma->ifma_addr->sa_family != AF_LINK)
 1187                                 continue;
 1188 
 1189                         anymc |= vlan_matrix[i];
 1190                 }
 1191                 if_maddr_runlock(ifp);
 1192         }
 1193 
 1194         conf = REG_READ(CPUP_CONF_REG);
 1195         /* 1 Disable forwarding of unknown & multicast packets to
 1196          *   CPU on all ports.
 1197          * 2 Enable forwarding of unknown & multicast packets to
 1198          *   CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set.
 1199          */
 1200         conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK;
 1201         /* Enable forwarding of unknown packets to CPU on selected ports. */
 1202         conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK);
 1203         conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
 1204         conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
 1205         REG_WRITE(CPUP_CONF_REG, conf);
 1206 }
 1207 
 1208 /*
 1209  * admsw_add_rxbuf:
 1210  *
 1211  *      Add a receive buffer to the indicated descriptor.
 1212  */
 1213 int
 1214 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high)
 1215 {
 1216         struct admsw_descsoft *ds;
 1217         struct mbuf *m;
 1218         int error;
 1219 
 1220         if (high)
 1221                 ds = &sc->sc_rxhsoft[idx];
 1222         else
 1223                 ds = &sc->sc_rxlsoft[idx];
 1224 
 1225         MGETHDR(m, M_NOWAIT, MT_DATA);
 1226         if (m == NULL)
 1227                 return (ENOBUFS);
 1228 
 1229         if (!(MCLGET(m, M_NOWAIT))) {
 1230                 m_freem(m);
 1231                 return (ENOBUFS);
 1232         }
 1233 
 1234         if (ds->ds_mbuf != NULL)
 1235                 bus_dmamap_unload(sc->sc_bufs_dmat, ds->ds_dmamap);
 1236 
 1237         ds->ds_mbuf = m;
 1238 
 1239         error = bus_dmamap_load(sc->sc_bufs_dmat, ds->ds_dmamap,
 1240             m->m_ext.ext_buf, m->m_ext.ext_size, admsw_rxbuf_map_addr, 
 1241             ds, BUS_DMA_NOWAIT);
 1242         if (error) {
 1243                 device_printf(sc->sc_dev, 
 1244                     "can't load rx DMA map %d, error = %d\n", idx, error);
 1245                 panic("admsw_add_rxbuf");       /* XXX */
 1246         }
 1247 
 1248         bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap, BUS_DMASYNC_PREREAD);
 1249 
 1250         if (high)
 1251                 ADMSW_INIT_RXHDESC(sc, idx);
 1252         else
 1253                 ADMSW_INIT_RXLDESC(sc, idx);
 1254 
 1255         return (0);
 1256 }
 1257 
 1258 int
 1259 admsw_mediachange(struct ifnet *ifp)
 1260 {
 1261         struct admsw_softc *sc = ifp->if_softc;
 1262         int port = 0;
 1263         struct ifmedia *ifm;
 1264         int old, new, val;
 1265 
 1266         while(port < SW_DEVS) {
 1267                 if(ifp == sc->sc_ifnet[port])
 1268                         break;
 1269                 else
 1270                         port++;
 1271         }
 1272 
 1273         ifm = &sc->sc_ifmedia[port];
 1274 
 1275         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
 1276                 return (EINVAL);
 1277 
 1278         if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
 1279                 val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX;
 1280         } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
 1281                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1282                         val = PHY_CNTL2_100M|PHY_CNTL2_FDX;
 1283                 else
 1284                         val = PHY_CNTL2_100M;
 1285         } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
 1286                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
 1287                         val = PHY_CNTL2_FDX;
 1288                 else
 1289                         val = 0;
 1290         } else
 1291                 return (EINVAL);
 1292 
 1293         old = REG_READ(PHY_CNTL2_REG);
 1294         new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port);
 1295         new |= (val << port);
 1296 
 1297         if (new != old)
 1298                 REG_WRITE(PHY_CNTL2_REG, new);
 1299 
 1300         return (0);
 1301 }
 1302 
 1303 void
 1304 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1305 {
 1306         struct admsw_softc *sc = ifp->if_softc;
 1307         int port = 0;
 1308         int status;
 1309 
 1310         while(port < SW_DEVS) {
 1311                 if(ifp == sc->sc_ifnet[port])
 1312                         break;
 1313                 else
 1314                         port++;
 1315         }
 1316 
 1317         ifmr->ifm_status = IFM_AVALID;
 1318         ifmr->ifm_active = IFM_ETHER;
 1319 
 1320         status = REG_READ(PHY_ST_REG) >> port;
 1321 
 1322         if ((status & PHY_ST_LINKUP) == 0) {
 1323                 ifmr->ifm_active |= IFM_NONE;
 1324                 return;
 1325         }
 1326 
 1327         ifmr->ifm_status |= IFM_ACTIVE;
 1328         ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T;
 1329         if (status & PHY_ST_FDX)
 1330                 ifmr->ifm_active |= IFM_FDX;
 1331 }
 1332 
 1333 static device_method_t admsw_methods[] = {
 1334         /* Device interface */
 1335         DEVMETHOD(device_probe,         admsw_probe),
 1336         DEVMETHOD(device_attach,        admsw_attach),
 1337         DEVMETHOD(device_detach,        admsw_detach),
 1338         DEVMETHOD(device_shutdown,      admsw_shutdown),
 1339 
 1340         { 0, 0 }
 1341 };
 1342 
 1343 static devclass_t admsw_devclass;
 1344 
 1345 static driver_t admsw_driver = {
 1346         "admsw",
 1347         admsw_methods,
 1348         sizeof(struct admsw_softc),
 1349 };
 1350 
 1351 DRIVER_MODULE(admsw, obio, admsw_driver, admsw_devclass, 0, 0);
 1352 MODULE_DEPEND(admsw, ether, 1, 1, 1);

Cache object: c133b74596869426511c57135222ba1b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.