The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/pci/if_lmc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: if_lmc.c,v 1.20 2003/05/03 18:11:36 wiz Exp $  */
    2 
    3 /*-
    4  * Copyright (c) 1997-1999 LAN Media Corporation (LMC)
    5  * All rights reserved.  www.lanmedia.com
    6  *
    7  * This code is written by Michael Graff <graff@vix.com> for LMC.
    8  * The code is derived from permitted modifications to software created
    9  * by Matt Thomas (matt@3am-software.com).
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above
   17  *    copyright notice, this list of conditions and the following disclaimer
   18  *    in the documentation and/or other materials provided with the
   19  *    distribution.
   20  * 3. All marketing or advertising materials mentioning features or
   21  *    use of this software must display the following acknowledgement:
   22  *      This product includes software developed by LAN Media Corporation
   23  *      and its contributors.
   24  * 4. Neither the name of LAN Media Corporation nor the names of its
   25  *    contributors may be used to endorse or promote products derived
   26  *    from this software without specific prior written permission.
   27  *
   28  * THIS SOFTWARE IS PROVIDED BY LAN MEDIA CORPORATION AND CONTRIBUTORS
   29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   38  * THE POSSIBILITY OF SUCH DAMAGE.
   39  */
   40 
   41 /*-
   42  * Copyright (c) 1994-1997 Matt Thomas (matt@3am-software.com)
   43  * All rights reserved.
   44  *
   45  * Redistribution and use in source and binary forms, with or without
   46  * modification, are permitted provided that the following conditions
   47  * are met:
   48  * 1. Redistributions of source code must retain the above copyright
   49  *    notice, this list of conditions and the following disclaimer.
   50  * 2. The name of the author may not be used to endorse or promote products
   51  *    derived from this software without specific prior written permission
   52  *
   53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   54  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   55  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   56  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   57  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   58  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   59  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   60  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   61  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   62  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   63  */
   64 
   65 #include <sys/cdefs.h>
   66 __KERNEL_RCSID(0, "$NetBSD: if_lmc.c,v 1.20 2003/05/03 18:11:36 wiz Exp $");
   67 
   68 #include <sys/param.h>
   69 #include <sys/systm.h>
   70 #include <sys/mbuf.h>
   71 #include <sys/socket.h>
   72 #include <sys/ioctl.h>
   73 #include <sys/errno.h>
   74 #include <sys/malloc.h>
   75 #include <sys/kernel.h>
   76 #include <sys/proc.h>   /* only for declaration of wakeup() used by vm.h */
   77 #if defined(__FreeBSD__)
   78 #include <machine/clock.h>
   79 #elif defined(__bsdi__) || defined(__NetBSD__)
   80 #include <sys/device.h>
   81 #endif
   82 
   83 #if defined(__NetBSD__)
   84 #include <dev/pci/pcidevs.h>
   85 #include "rnd.h"
   86 #if NRND > 0
   87 #include <sys/rnd.h>
   88 #endif
   89 #endif
   90 
   91 #include <net/if.h>
   92 #include <net/if_types.h>
   93 #include <net/if_dl.h>
   94 #include <net/netisr.h>
   95 
   96 #include "bpfilter.h"
   97 #if NBPFILTER > 0
   98 #include <net/bpf.h>
   99 #include <net/bpfdesc.h>
  100 #endif
  101 
  102 
  103 #if defined(__FreeBSD__)
  104 #include <net/if_sppp.h>
  105 #elif defined(__NetBSD__)
  106 #include <net/if_spppvar.h>
  107 #endif
  108 
  109 #if defined(__bsdi__)
  110 #if INET
  111 #include <netinet/in.h>
  112 #include <netinet/in_systm.h>
  113 #include <netinet/ip.h>
  114 #endif
  115 
  116 #include <net/netisr.h>
  117 #include <net/if.h>
  118 #include <net/netisr.h>
  119 #include <net/if_types.h>
  120 #include <net/if_p2p.h>
  121 #include <net/if_c_hdlc.h>
  122 #endif
  123 
  124 #if defined(__NetBSD__)
  125 #include <uvm/uvm_extern.h>
  126 #endif
  127 
  128 #if defined(__FreeBSD__)
  129 #include <vm/vm.h>
  130 #include <vm/pmap.h>
  131 #include <pci.h>
  132 #if NPCI > 0
  133 #include <pci/pcivar.h>
  134 #include <pci/dc21040reg.h>
  135 #endif
  136 #endif /* __FreeBSD__ */
  137 
  138 #if defined(__bsdi__)
  139 #include <vm/vm.h>
  140 #include <i386/pci/ic/dc21040.h>
  141 #include <i386/isa/isa.h>
  142 #include <i386/isa/icu.h>
  143 #include <i386/isa/dma.h>
  144 #include <i386/isa/isavar.h>
  145 #include <i386/pci/pci.h>
  146 
  147 #endif /* __bsdi__ */
  148 
  149 #if defined(__NetBSD__)
  150 #include <machine/bus.h>
  151 #if defined(__alpha__)
  152 #include <machine/intr.h>
  153 #endif
  154 #include <dev/pci/pcireg.h>
  155 #include <dev/pci/pcivar.h>
  156 #include <dev/ic/dc21040reg.h>
  157 #endif /* __NetBSD__ */
  158 
  159 /*
  160  * Sigh.  Every OS puts these in different places.
  161  */
  162 #if defined(__NetBSD__)
  163 #include <dev/pci/if_lmc_types.h>
  164 #include <dev/pci/if_lmcioctl.h>
  165 #include <dev/pci/if_lmcvar.h>
  166 #elif defined(__FreeBSD__)
  167 #include "pci/if_lmc_types.h"
  168 #include "pci/if_lmcioctl.h"
  169 #include "pci/if_lmcvar.h"
  170 #else /* BSDI */
  171 #include "i386/pci/if_lmctypes.h"
  172 #include "i386/pci/if_lmcioctl.h"
  173 #include "i386/pci/if_lmcvar.h"
  174 #endif
  175 
  176 /*
  177  * This module supports
  178  *      the DEC 21140A pass 2.2 PCI Fast Ethernet Controller.
  179  */
  180 static ifnet_ret_t lmc_ifstart_one(struct ifnet *ifp);
  181 static ifnet_ret_t lmc_ifstart(struct ifnet *ifp);
  182 static struct mbuf *lmc_txput(lmc_softc_t * const sc, struct mbuf *m);
  183 static void lmc_rx_intr(lmc_softc_t * const sc);
  184 
  185 #if defined(__NetBSD__) || defined(__FreeBSD__)
  186 static void lmc_watchdog(struct ifnet *ifp);
  187 #endif
  188 #if defined(__bsdi__)
  189 static int lmc_watchdog(int);
  190 #endif
  191 static void lmc_ifup(lmc_softc_t * const sc);
  192 static void lmc_ifdown(lmc_softc_t * const sc);
  193 
  194 
  195 /*
  196  * Code the read the SROM and MII bit streams (I2C)
  197  */
  198 static inline void
  199 lmc_delay_300ns(lmc_softc_t * const sc)
  200 {
  201         int idx;
  202         for (idx = (300 / 33) + 1; idx > 0; idx--)
  203                 (void)LMC_CSR_READ(sc, csr_busmode);
  204 }
  205 
  206 
  207 #define EMIT    \
  208 do { \
  209         LMC_CSR_WRITE(sc, csr_srom_mii, csr); \
  210         lmc_delay_300ns(sc); \
  211 } while (0)
  212 
  213 static inline void
  214 lmc_srom_idle(lmc_softc_t * const sc)
  215 {
  216         unsigned bit, csr;
  217     
  218         csr  = SROMSEL ; EMIT;
  219         csr  = SROMSEL | SROMRD; EMIT;  
  220         csr ^= SROMCS; EMIT;
  221         csr ^= SROMCLKON; EMIT;
  222 
  223         /*
  224          * Write 25 cycles of 0 which will force the SROM to be idle.
  225          */
  226         for (bit = 3 + SROM_BITWIDTH + 16; bit > 0; bit--) {
  227                 csr ^= SROMCLKOFF; EMIT;    /* clock low; data not valid */
  228                 csr ^= SROMCLKON; EMIT;     /* clock high; data valid */
  229         }
  230         csr ^= SROMCLKOFF; EMIT;
  231         csr ^= SROMCS; EMIT;
  232         csr  = 0; EMIT;
  233 }
  234 
  235      
  236 static void
  237 lmc_srom_read(lmc_softc_t * const sc)
  238 {   
  239         unsigned idx; 
  240         const unsigned bitwidth = SROM_BITWIDTH;
  241         const unsigned cmdmask = (SROMCMD_RD << bitwidth);
  242         const unsigned msb = 1 << (bitwidth + 3 - 1);
  243         unsigned lastidx = (1 << bitwidth) - 1;
  244 
  245         lmc_srom_idle(sc);
  246 
  247         for (idx = 0; idx <= lastidx; idx++) {
  248                 unsigned lastbit, data, bits, bit, csr;
  249                 csr  = SROMSEL ;                EMIT;
  250                 csr  = SROMSEL | SROMRD;        EMIT;
  251                 csr ^= SROMCSON;                EMIT;
  252                 csr ^=            SROMCLKON;    EMIT;
  253     
  254                 lastbit = 0;
  255                 for (bits = idx|cmdmask, bit = bitwidth + 3
  256                              ; bit > 0
  257                              ; bit--, bits <<= 1) {
  258                         const unsigned thisbit = bits & msb;
  259                         csr ^= SROMCLKOFF; EMIT;    /* clock L data invalid */
  260                         if (thisbit != lastbit) {
  261                                 csr ^= SROMDOUT; EMIT;/* clock L invert data */
  262                         } else {
  263                                 EMIT;
  264                         }
  265                         csr ^= SROMCLKON; EMIT;     /* clock H data valid */
  266                         lastbit = thisbit;
  267                 }
  268                 csr ^= SROMCLKOFF; EMIT;
  269 
  270                 for (data = 0, bits = 0; bits < 16; bits++) {
  271                         data <<= 1;
  272                         csr ^= SROMCLKON; EMIT;     /* clock H data valid */ 
  273                         data |= LMC_CSR_READ(sc, csr_srom_mii) & SROMDIN ? 1 : 0;
  274                         csr ^= SROMCLKOFF; EMIT;    /* clock L data invalid */
  275                 }
  276                 sc->lmc_rombuf[idx*2] = data & 0xFF;
  277                 sc->lmc_rombuf[idx*2+1] = data >> 8;
  278                 csr  = SROMSEL | SROMRD; EMIT;
  279                 csr  = 0; EMIT;
  280         }
  281         lmc_srom_idle(sc);
  282 }
  283 
  284 #define MII_EMIT    do { LMC_CSR_WRITE(sc, csr_srom_mii, csr); lmc_delay_300ns(sc); } while (0)
  285 
  286 static inline void
  287 lmc_mii_writebits(lmc_softc_t * const sc, unsigned data, unsigned bits)
  288 {
  289     unsigned msb = 1 << (bits - 1);
  290     unsigned csr = LMC_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK);
  291     unsigned lastbit = (csr & MII_DOUT) ? msb : 0;
  292 
  293     csr |= MII_WR; MII_EMIT;            /* clock low; assert write */
  294 
  295     for (; bits > 0; bits--, data <<= 1) {
  296         const unsigned thisbit = data & msb;
  297         if (thisbit != lastbit) {
  298             csr ^= MII_DOUT; MII_EMIT;  /* clock low; invert data */
  299         }
  300         csr ^= MII_CLKON; MII_EMIT;     /* clock high; data valid */
  301         lastbit = thisbit;
  302         csr ^= MII_CLKOFF; MII_EMIT;    /* clock low; data not valid */
  303     }
  304 }
  305 
  306 static void
  307 lmc_mii_turnaround(lmc_softc_t * const sc, u_int32_t cmd)
  308 {
  309     u_int32_t csr;
  310 
  311     csr = LMC_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK);
  312     if (cmd == MII_WRCMD) {
  313         csr |= MII_DOUT; MII_EMIT;      /* clock low; change data */
  314         csr ^= MII_CLKON; MII_EMIT;     /* clock high; data valid */
  315         csr ^= MII_CLKOFF; MII_EMIT;    /* clock low; data not valid */
  316         csr ^= MII_DOUT; MII_EMIT;      /* clock low; change data */
  317     } else {
  318         csr |= MII_RD; MII_EMIT;        /* clock low; switch to read */
  319     }
  320     csr ^= MII_CLKON; MII_EMIT;         /* clock high; data valid */
  321     csr ^= MII_CLKOFF; MII_EMIT;        /* clock low; data not valid */
  322 }
  323 
  324 static u_int32_t
  325 lmc_mii_readbits(lmc_softc_t * const sc)
  326 {
  327     u_int32_t data;
  328     u_int32_t csr = LMC_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK);
  329     int idx;
  330 
  331     for (idx = 0, data = 0; idx < 16; idx++) {
  332         data <<= 1;     /* this is NOOP on the first pass through */
  333         csr ^= MII_CLKON; MII_EMIT;     /* clock high; data valid */
  334         if (LMC_CSR_READ(sc, csr_srom_mii) & MII_DIN)
  335             data |= 1;
  336         csr ^= MII_CLKOFF; MII_EMIT;    /* clock low; data not valid */
  337     }
  338     csr ^= MII_RD; MII_EMIT;            /* clock low; turn off read */
  339 
  340     return data;
  341 }
  342 
  343 u_int32_t
  344 lmc_mii_readreg(lmc_softc_t * const sc, u_int32_t devaddr, u_int32_t regno)
  345 {
  346     u_int32_t csr = LMC_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK);
  347     u_int32_t data;
  348 
  349     csr &= ~(MII_RD|MII_CLK); MII_EMIT;
  350     lmc_mii_writebits(sc, MII_PREAMBLE, 32);
  351     lmc_mii_writebits(sc, MII_RDCMD, 8);
  352     lmc_mii_writebits(sc, devaddr, 5);
  353     lmc_mii_writebits(sc, regno, 5);
  354     lmc_mii_turnaround(sc, MII_RDCMD);
  355 
  356     data = lmc_mii_readbits(sc);
  357     return (data);
  358 }
  359 
  360 void
  361 lmc_mii_writereg(lmc_softc_t * const sc, u_int32_t devaddr,
  362                    u_int32_t regno, u_int32_t data)
  363 {
  364     u_int32_t csr;
  365 
  366     csr = LMC_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK);
  367     csr &= ~(MII_RD|MII_CLK); MII_EMIT;
  368     lmc_mii_writebits(sc, MII_PREAMBLE, 32);
  369     lmc_mii_writebits(sc, MII_WRCMD, 8);
  370     lmc_mii_writebits(sc, devaddr, 5);
  371     lmc_mii_writebits(sc, regno, 5);
  372     lmc_mii_turnaround(sc, MII_WRCMD);
  373     lmc_mii_writebits(sc, data, 16);
  374 }
  375 
  376 int
  377 lmc_read_macaddr(lmc_softc_t * const sc)
  378 {
  379         lmc_srom_read(sc);
  380 
  381         memcpy(sc->lmc_enaddr, sc->lmc_rombuf + 20, 6);
  382 
  383         return 0;
  384 }
  385 
  386 /*
  387  * Check to make certain there is a signal from the modem, and flicker
  388  * lights as needed.
  389  */
  390 #if defined(__NetBSD__) || defined(__FreeBSD__)
  391 static void
  392 lmc_watchdog(struct ifnet *ifp)
  393 #endif
  394 #if defined(__bsdi__)
  395 static int
  396 lmc_watchdog(int unit)
  397 #endif
  398 {
  399 #if defined(__NetBSD__) || defined(__FreeBSD__)
  400         lmc_softc_t * const sc = LMC_IFP_TO_SOFTC(ifp);
  401 #endif
  402 #if defined(__bsdi__)
  403         lmc_softc_t * const sc = LMC_UNIT_TO_SOFTC(unit);
  404         struct ifnet *ifp = &sc->lmc_if;
  405 #endif
  406         u_int32_t ostatus;
  407         u_int32_t link_status;
  408         u_int32_t ticks;
  409 
  410         /*
  411          * Make sure the tx jabber and rx watchdog are off,
  412          * and the transmit and receive processes are running.
  413          */
  414         LMC_CSR_WRITE (sc, csr_15, 0x00000011);
  415         sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
  416         LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
  417 
  418         /* Is the transmit clock still available? */
  419         ticks = LMC_CSR_READ (sc, csr_gp_timer);
  420         ticks = 0x0000ffff - (ticks & 0x0000ffff);
  421         if (ticks == 0)
  422         {
  423                 /* no clock found ? */
  424                 if (sc->tx_clockState != 0)
  425                 {
  426                         sc->tx_clockState = 0;
  427                         if (sc->lmc_cardtype == LMC_CARDTYPE_SSI)
  428                                 lmc_led_on (sc, LMC_MII16_LED3); /* ON red */
  429                 }
  430         else
  431                 if (sc->tx_clockState == 0)
  432                 {
  433                         sc->tx_clockState = 1;
  434                         if (sc->lmc_cardtype == LMC_CARDTYPE_SSI)
  435                                 lmc_led_off (sc, LMC_MII16_LED3); /* OFF red */
  436                 }
  437         }
  438 
  439         link_status = sc->lmc_media->get_link_status(sc);
  440         ostatus = ((sc->lmc_flags & LMC_MODEMOK) == LMC_MODEMOK);
  441 
  442         /*
  443          * hardware level link lost, but the interface is marked as up.
  444          * Mark it as down.
  445          */
  446         if (link_status == LMC_LINK_DOWN && ostatus) {
  447                 printf(LMC_PRINTF_FMT ": physical link down\n",
  448                        LMC_PRINTF_ARGS);
  449                 sc->lmc_flags &= ~LMC_MODEMOK;
  450                 if (sc->lmc_cardtype == LMC_CARDTYPE_DS3 ||
  451                     sc->lmc_cardtype == LMC_CARDTYPE_T1)
  452                         lmc_led_on (sc, LMC_DS3_LED3 | LMC_DS3_LED2);
  453                                                         /* turn on red LED */
  454                 else {
  455                         lmc_led_off (sc, LMC_MII16_LED1);
  456                         lmc_led_on (sc, LMC_MII16_LED0);
  457                         if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_EXT)
  458                                 lmc_led_on (sc, LMC_MII16_LED3);
  459                 }
  460 
  461         }
  462 
  463         /*
  464          * hardware link is up, but the interface is marked as down.
  465          * Bring it back up again.
  466          */
  467         if (link_status != LMC_LINK_DOWN && !ostatus) {
  468                 printf(LMC_PRINTF_FMT ": physical link up\n",
  469                        LMC_PRINTF_ARGS);
  470                 if (sc->lmc_flags & LMC_IFUP) {
  471                         lmc_ifup(sc);
  472 #if 0 && (defined(__NetBSD__) || defined(__FreeBSD__))
  473                         if (sc->lmc_if.if_flags & IFF_UP) {
  474                                 struct sppp *sp = &sc->lmc_sppp;
  475 
  476                                 /* re-connect LCP */
  477                                 (sp->pp_down)(sp);
  478                                 (sp->pp_up)(sp);
  479                         }
  480 #endif
  481                 }
  482                 sc->lmc_flags |= LMC_MODEMOK;
  483                 if (sc->lmc_cardtype == LMC_CARDTYPE_DS3 ||
  484                     sc->lmc_cardtype == LMC_CARDTYPE_T1)
  485                 {
  486                         sc->lmc_miireg16 |= LMC_DS3_LED3;
  487                         lmc_led_off (sc, LMC_DS3_LED3);
  488                                                         /* turn off red LED */
  489                         lmc_led_on (sc, LMC_DS3_LED2);
  490                 } else {
  491                         lmc_led_on (sc, LMC_MII16_LED0 | LMC_MII16_LED1
  492                                     | LMC_MII16_LED2);
  493                         if (sc->lmc_timing != LMC_CTL_CLOCK_SOURCE_EXT)
  494                                 lmc_led_off (sc, LMC_MII16_LED3);
  495                 }
  496 
  497                 return;
  498         }
  499 
  500         /* Call media specific watchdog functions */
  501         sc->lmc_media->watchdog(sc);
  502 
  503         /*
  504          * remember the timer value
  505          */
  506         ticks = LMC_CSR_READ(sc, csr_gp_timer);
  507         LMC_CSR_WRITE(sc, csr_gp_timer, 0xffffffffUL);
  508         sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
  509 
  510         ifp->if_timer = 1;
  511 }
  512 
  513 /*
  514  * Mark the interface as "up" and enable TX/RX and TX/RX interrupts.
  515  * This also does a full software reset.
  516  */
  517 static void
  518 lmc_ifup(lmc_softc_t * const sc)
  519 {
  520         sc->lmc_if.if_timer = 0;
  521 
  522         lmc_dec_reset(sc);
  523         lmc_reset(sc);
  524 
  525         sc->lmc_media->set_link_status(sc, LMC_LINK_UP);
  526         sc->lmc_media->set_status(sc, NULL);
  527 
  528         sc->lmc_flags |= LMC_IFUP;
  529 
  530         /*
  531          * for DS3 & DS1 adapters light the green light, led2
  532          */
  533         if (sc->lmc_cardtype == LMC_CARDTYPE_DS3 ||
  534             sc->lmc_cardtype == LMC_CARDTYPE_T1)
  535                 lmc_led_on (sc, LMC_MII16_LED2);
  536         else
  537                 lmc_led_on (sc, LMC_MII16_LED0 | LMC_MII16_LED2);
  538 
  539         /*
  540          * select what interrupts we want to get
  541          */
  542         sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
  543                                | TULIP_STS_RXINTR
  544                                | TULIP_STS_RXNOBUF
  545                                | TULIP_STS_TXINTR
  546                                | TULIP_STS_ABNRMLINTR
  547                                | TULIP_STS_SYSERROR
  548                                | TULIP_STS_TXSTOPPED
  549                                | TULIP_STS_TXUNDERFLOW
  550                                | TULIP_STS_RXSTOPPED
  551                                );
  552         LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
  553 
  554         sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
  555         sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
  556         LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
  557 
  558         sc->lmc_if.if_timer = 1;
  559 }
  560 
  561 /*
  562  * Mark the interface as "down" and disable TX/RX and TX/RX interrupts.
  563  * This is done by performing a full reset on the interface.
  564  */
  565 static void
  566 lmc_ifdown(lmc_softc_t * const sc)
  567 {
  568 
  569 #if 0 && (defined(__NetBSD__) || defined(__FreeBSD__))
  570         if (sc->lmc_if.if_flags & IFF_UP) {
  571                 struct sppp *sp = &sc->lmc_sppp;
  572 
  573                 /* disconnect LCP */
  574                 (sp->pp_down)(sp);
  575         }
  576 #endif
  577 
  578         sc->lmc_if.if_timer = 0;
  579         sc->lmc_flags &= ~LMC_IFUP;
  580 
  581         sc->lmc_media->set_link_status(sc, LMC_LINK_DOWN);
  582         lmc_led_off(sc, LMC_MII16_LED_ALL);
  583 
  584         lmc_dec_reset(sc);
  585         lmc_reset(sc);
  586         sc->lmc_media->set_status(sc, NULL);
  587 }
  588 
  589 static void
  590 lmc_rx_intr(lmc_softc_t * const sc)
  591 {
  592         lmc_ringinfo_t * const ri = &sc->lmc_rxinfo;
  593         struct ifnet * const ifp = &sc->lmc_if;
  594         u_int32_t status;
  595         int fillok = 1;
  596 
  597         sc->lmc_rxtick++;
  598 
  599         for (;;) {
  600                 lmc_desc_t *eop = ri->ri_nextin;
  601                 int total_len = 0, last_offset = 0;
  602                 struct mbuf *ms = NULL, *me = NULL;
  603                 int accept = 0;
  604 #if defined(LMC_BUS_DMA) && !defined(LMC_BUS_DMA_NORX)
  605                 bus_dmamap_t map;
  606                 int error;
  607 #endif
  608 
  609                 if (fillok && sc->lmc_rxq.ifq_len < LMC_RXQ_TARGET)
  610                         goto queue_mbuf;
  611 
  612                 /*
  613                  * If the TULIP has no descriptors, there can't be any receive
  614                  * descriptors to process.
  615                  */
  616                 if (eop == ri->ri_nextout)
  617                         break;
  618             
  619                 /*
  620                  * 90% of the packets will fit in one descriptor.  So we
  621                  * optimize for that case.
  622                  */
  623                 LMC_RXDESC_POSTSYNC(sc, eop, sizeof(*eop));
  624                 status = le32toh(((volatile lmc_desc_t *) eop)->d_status);
  625                 if ((status &
  626                         (TULIP_DSTS_OWNER|TULIP_DSTS_RxFIRSTDESC|TULIP_DSTS_RxLASTDESC)) == 
  627                         (TULIP_DSTS_RxFIRSTDESC|TULIP_DSTS_RxLASTDESC)) {
  628                         IF_DEQUEUE(&sc->lmc_rxq, ms);
  629                         me = ms;
  630                 } else {
  631                         /*
  632                          * If still owned by the TULIP, don't touch it.
  633                          */
  634                         if (status & TULIP_DSTS_OWNER)
  635                                 break;
  636 
  637                         /*
  638                          * It is possible (though improbable unless the
  639                          * BIG_PACKET support is enabled or MCLBYTES < 1518)
  640                          * for a received packet to cross more than one
  641                          * receive descriptor.
  642                          */
  643                         while ((status & TULIP_DSTS_RxLASTDESC) == 0) {
  644                                 if (++eop == ri->ri_last)
  645                                         eop = ri->ri_first;
  646                                 LMC_RXDESC_POSTSYNC(sc, eop, sizeof(*eop));
  647                                 status = le32toh(((volatile lmc_desc_t *)
  648                                         eop)->d_status);
  649                                 if (eop == ri->ri_nextout || 
  650                                         (status & TULIP_DSTS_OWNER)) {
  651                                         return;
  652                                 }
  653                                 total_len++;
  654                         }
  655 
  656                         /*
  657                          * Dequeue the first buffer for the start of the
  658                          * packet.  Hopefully this will be the only one we
  659                          * need to dequeue.  However, if the packet consumed
  660                          * multiple descriptors, then we need to dequeue
  661                          * those buffers and chain to the starting mbuf.
  662                          * All buffers but the last buffer have the same
  663                          * length so we can set that now. (we add to
  664                          * last_offset instead of multiplying since we
  665                          * normally won't go into the loop and thereby
  666                          * saving a ourselves from doing a multiplication
  667                          * by 0 in the normal case).
  668                          */
  669                         IF_DEQUEUE(&sc->lmc_rxq, ms);
  670                         for (me = ms; total_len > 0; total_len--) {
  671 #if defined(LMC_BUS_DMA) && !defined(LMC_BUS_DMA_NORX)
  672                                 map = M_GETCTX(me, bus_dmamap_t);
  673                                 LMC_RXMAP_POSTSYNC(sc, map);
  674                                 bus_dmamap_unload(sc->lmc_dmatag, map);
  675                                 sc->lmc_rxmaps[sc->lmc_rxmaps_free++] = map;
  676 #if defined(DIAGNOSTIC)
  677                                 M_SETCTX(me, NULL);
  678 #endif
  679 #endif /* LMC_BUS_DMA */
  680                                 me->m_len = LMC_RX_BUFLEN;
  681                                 last_offset += LMC_RX_BUFLEN;
  682                                 IF_DEQUEUE(&sc->lmc_rxq, me->m_next);
  683                                 me = me->m_next;
  684                         }
  685                 }
  686 
  687                 /*
  688                  *  Now get the size of received packet (minus the CRC).
  689                  */
  690                 total_len = ((status >> 16) & 0x7FFF);
  691                 if (sc->ictl.crc_length == 16)
  692                         total_len -= 2;
  693                 else
  694                         total_len -= 4;
  695 
  696                 if ((sc->lmc_flags & LMC_RXIGNORE) == 0
  697                     && ((status & LMC_DSTS_ERRSUM) == 0
  698 #ifdef BIG_PACKET
  699                         || (total_len <= sc->lmc_if.if_mtu + PPP_HEADER_LEN
  700                             && (status & TULIP_DSTS_RxOVERFLOW) == 0)
  701 #endif
  702                         )) {
  703 
  704 #if defined(LMC_BUS_DMA) && !defined(LMC_BUS_DMA_NORX)
  705                         map = M_GETCTX(me, bus_dmamap_t);
  706                         bus_dmamap_sync(sc->lmc_dmatag, map, 0, me->m_len,
  707                                 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  708                         bus_dmamap_unload(sc->lmc_dmatag, map);
  709                         sc->lmc_rxmaps[sc->lmc_rxmaps_free++] = map;
  710 #if defined(DIAGNOSTIC)
  711                         M_SETCTX(me, NULL);
  712 #endif
  713 #endif /* LMC_BUS_DMA */
  714 
  715                         me->m_len = total_len - last_offset;
  716 #if NBPFILTER > 0
  717                         if (sc->lmc_bpf != NULL) {
  718                                 if (me == ms)
  719                                         LMC_BPF_TAP(sc, mtod(ms, caddr_t), total_len);
  720                                 else
  721                                         LMC_BPF_MTAP(sc, ms);
  722                         }
  723 #endif
  724                         sc->lmc_flags |= LMC_RXACT;
  725                         accept = 1;
  726                 } else {
  727                         ifp->if_ierrors++;
  728                         if (status & TULIP_DSTS_RxOVERFLOW) {
  729                                 sc->lmc_dot3stats.dot3StatsInternalMacReceiveErrors++;
  730                         }
  731 #if defined(LMC_BUS_DMA) && !defined(LMC_BUS_DMA_NORX)
  732                         map = M_GETCTX(me, bus_dmamap_t);
  733                         bus_dmamap_unload(sc->lmc_dmatag, map);
  734                         sc->lmc_rxmaps[sc->lmc_rxmaps_free++] = map;
  735 #if defined(DIAGNOSTIC)
  736                         M_SETCTX(me, NULL);
  737 #endif
  738 #endif /* LMC_BUS_DMA */
  739                 }
  740 
  741                 ifp->if_ipackets++;
  742                 if (++eop == ri->ri_last)
  743                         eop = ri->ri_first;
  744                 ri->ri_nextin = eop;
  745 
  746         queue_mbuf:
  747                 /*
  748                  * Either we are priming the TULIP with mbufs (m == NULL)
  749                  * or we are about to accept an mbuf for the upper layers
  750                  * so we need to allocate an mbuf to replace it.  If we
  751                  * can't replace it, send up it anyways.  This may cause
  752                  * us to drop packets in the future but that's better than
  753                  * being caught in livelock.
  754                  *
  755                  * Note that if this packet crossed multiple descriptors
  756                  * we don't even try to reallocate all the mbufs here.
  757                  * Instead we rely on the test of the beginning of
  758                  * the loop to refill for the extra consumed mbufs.
  759                  */
  760                 if (accept || ms == NULL) {
  761                         struct mbuf *m0;
  762                         MGETHDR(m0, M_DONTWAIT, MT_DATA);
  763                         if (m0 != NULL) {
  764                                 MCLGET(m0, M_DONTWAIT);
  765                                 if ((m0->m_flags & M_EXT) == 0) {
  766                                         m_freem(m0);
  767                                         m0 = NULL;
  768                                 }
  769                         }
  770                         if (accept) {
  771                                 ms->m_pkthdr.len = total_len;
  772                                 ms->m_pkthdr.rcvif = ifp;
  773 #if defined(__NetBSD__) || defined(__FreeBSD__)
  774                                 sppp_input(ifp, ms);
  775 #endif
  776 #if defined(__bsdi__)
  777                                 sc->lmc_p2pcom.p2p_input(&sc->lmc_p2pcom, ms);
  778 #endif
  779                         }
  780                         ms = m0;
  781                 }
  782                 if (ms == NULL) {
  783                         /*
  784                          * Couldn't allocate a new buffer.  Don't bother 
  785                          * trying to replenish the receive queue.
  786                          */
  787                         fillok = 0;
  788                         sc->lmc_flags |= LMC_RXBUFSLOW;
  789                         continue;
  790                 }
  791                 /*
  792                  * Now give the buffer(s) to the TULIP and save in our
  793                  * receive queue.
  794                  */
  795                 do {
  796                         u_int32_t ctl;
  797 #if defined(LMC_BUS_DMA) && !defined(LMC_BUS_DMA_NORX)
  798                         lmc_desc_t * const nextout = ri->ri_nextout;
  799 
  800                         if (sc->lmc_rxmaps_free > 0) {
  801                                 map = sc->lmc_rxmaps[--sc->lmc_rxmaps_free];
  802                         } else {
  803                                 m_freem(ms);
  804                                 sc->lmc_flags |= LMC_RXBUFSLOW;
  805 #if defined(LMC_DEBUG)
  806                                 sc->lmc_dbg.dbg_rxlowbufs++;
  807 #endif
  808                                 break;
  809                         }
  810                         M_SETCTX(ms, map);
  811                         error = bus_dmamap_load(sc->lmc_dmatag, map,
  812                                 mtod(ms, void *), LMC_RX_BUFLEN, 
  813                                 NULL, BUS_DMA_NOWAIT);
  814                         if (error) {
  815                                 printf(LMC_PRINTF_FMT
  816                                         ": unable to load rx map, "
  817                                         "error = %d\n",
  818                                         LMC_PRINTF_ARGS, error);
  819                                 panic("lmc_rx_intr");           /* XXX */
  820                         }
  821 
  822                         ctl = le32toh(nextout->d_ctl);
  823                         /* For some weird reason we lose TULIP_DFLAG_ENDRING */
  824                         if ((nextout+1) == ri->ri_last)
  825                                 ctl = LMC_CTL(LMC_CTL_FLGS(ctl)|
  826                                         TULIP_DFLAG_ENDRING, 0, 0);
  827                         nextout->d_addr1 = htole32(map->dm_segs[0].ds_addr);
  828                         if (map->dm_nsegs == 2) {
  829                                 nextout->d_addr2 = htole32(map->dm_segs[1].ds_addr);
  830                                 nextout->d_ctl = 
  831                                         htole32(LMC_CTL(LMC_CTL_FLGS(ctl),
  832                                                 map->dm_segs[0].ds_len,
  833                                                 map->dm_segs[1].ds_len));
  834                         } else {
  835                                 nextout->d_addr2 = 0;
  836                                 nextout->d_ctl = 
  837                                         htole32(LMC_CTL(LMC_CTL_FLGS(ctl),
  838                                                 map->dm_segs[0].ds_len, 0));
  839                         }
  840                         LMC_RXDESC_POSTSYNC(sc, nextout, sizeof(*nextout));
  841 #else /* LMC_BUS_DMA */
  842                         ctl = le32toh(ri->ri_nextout->d_ctl);
  843                         ri->ri_nextout->d_addr1 = htole32(LMC_KVATOPHYS(sc, 
  844                                 mtod(ms, caddr_t)));
  845                         ri->ri_nextout->d_ctl = htole32(LMC_CTL(LMC_CTL_FLGS(ctl), 
  846                                 LMC_RX_BUFLEN, 0));
  847 #endif /* LMC_BUS_DMA */
  848                         ri->ri_nextout->d_status = htole32(TULIP_DSTS_OWNER);
  849                         LMC_RXDESC_POSTSYNC(sc, nextout, sizeof(u_int32_t));
  850                         if (++ri->ri_nextout == ri->ri_last)
  851                                 ri->ri_nextout = ri->ri_first;
  852                         me = ms->m_next;
  853                         ms->m_next = NULL;
  854                         IF_ENQUEUE(&sc->lmc_rxq, ms);
  855                 } while ((ms = me) != NULL);
  856 
  857                 if (sc->lmc_rxq.ifq_len >= LMC_RXQ_TARGET)
  858                         sc->lmc_flags &= ~LMC_RXBUFSLOW;
  859         }
  860 }
  861 
  862 static int
  863 lmc_tx_intr(lmc_softc_t * const sc)
  864 {
  865     lmc_ringinfo_t * const ri = &sc->lmc_txinfo;
  866     struct mbuf *m;
  867     int xmits = 0;
  868     int descs = 0;
  869     u_int32_t d_status;
  870 
  871     sc->lmc_txtick++;
  872 
  873     while (ri->ri_free < ri->ri_max) {
  874         u_int32_t flag;
  875 
  876         LMC_TXDESC_POSTSYNC(sc, ri->ri_nextin, sizeof(*ri->ri_nextin));
  877         d_status = le32toh(((volatile lmc_desc_t *) ri->ri_nextin)->d_status);
  878         if (d_status & TULIP_DSTS_OWNER)
  879             break;
  880 
  881         flag = LMC_CTL_FLGS(le32toh(ri->ri_nextin->d_ctl));
  882         if (flag & TULIP_DFLAG_TxLASTSEG) {
  883                 IF_DEQUEUE(&sc->lmc_txq, m);
  884                 if (m != NULL) {
  885 #if defined(LMC_BUS_DMA) && !defined(LMC_BUS_DMA_NOTX)
  886                     bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
  887                     LMC_TXMAP_POSTSYNC(sc, map);
  888                     sc->lmc_txmaps[sc->lmc_txmaps_free++] = map;
  889 #endif /* LMC_BUS_DMA */
  890 #if NBPFILTER > 0
  891                     if (sc->lmc_bpf != NULL)
  892                         LMC_BPF_MTAP(sc, m);
  893 #endif
  894                     m_freem(m);
  895 #if defined(LMC_DEBUG)
  896                 } else {
  897                     printf(LMC_PRINTF_FMT ": tx_intr: failed to dequeue mbuf?!?\n", LMC_PRINTF_ARGS);
  898 #endif
  899                 }
  900                     xmits++;
  901                     if (d_status & LMC_DSTS_ERRSUM) {
  902                         sc->lmc_if.if_oerrors++;
  903                         if (d_status & TULIP_DSTS_TxUNDERFLOW) {
  904                             sc->lmc_dot3stats.dot3StatsInternalTransmitUnderflows++;
  905                         }
  906                     } else {
  907                         if (d_status & TULIP_DSTS_TxDEFERRED) {
  908                             sc->lmc_dot3stats.dot3StatsDeferredTransmissions++;
  909                         }
  910                     }
  911         }
  912 
  913         if (++ri->ri_nextin == ri->ri_last)
  914             ri->ri_nextin = ri->ri_first;
  915 
  916         ri->ri_free++;
  917         descs++;
  918         sc->lmc_if.if_flags &= ~IFF_OACTIVE;
  919     }
  920     /*
  921      * If nothing left to transmit, disable the timer.
  922      * Else if progress, reset the timer back to 2 ticks.
  923      */
  924     sc->lmc_if.if_opackets += xmits;
  925 
  926     return descs;
  927 }
  928 
  929 static void
  930 lmc_print_abnormal_interrupt (lmc_softc_t * const sc, u_int32_t csr)
  931 {
  932         printf(LMC_PRINTF_FMT ": Abnormal interrupt\n", LMC_PRINTF_ARGS);
  933 }
  934 
  935 static void
  936 lmc_intr_handler(lmc_softc_t * const sc, int *progress_p)
  937 {
  938     u_int32_t csr;
  939 
  940     while ((csr = LMC_CSR_READ(sc, csr_status)) & sc->lmc_intrmask) {
  941 
  942 #if defined(__NetBSD__)
  943 #if NRND > 0
  944             rnd_add_uint32(&sc->lmc_rndsource, csr);
  945 #endif
  946 #endif
  947 
  948         *progress_p = 1;
  949         LMC_CSR_WRITE(sc, csr_status, csr);
  950 
  951         if (csr & TULIP_STS_SYSERROR) {
  952             sc->lmc_last_system_error = (csr & TULIP_STS_ERRORMASK) >> TULIP_STS_ERR_SHIFT;
  953             if (sc->lmc_flags & LMC_NOMESSAGES) {
  954                 sc->lmc_flags |= LMC_SYSTEMERROR;
  955             } else {
  956                 printf(LMC_PRINTF_FMT ": system error: %s\n",
  957                        LMC_PRINTF_ARGS,
  958                        lmc_system_errors[sc->lmc_last_system_error]);
  959             }
  960             sc->lmc_flags |= LMC_NEEDRESET;
  961             sc->lmc_system_errors++;
  962             break;
  963         }
  964         if (csr & (TULIP_STS_RXINTR | TULIP_STS_RXNOBUF)) {
  965             u_int32_t misses = LMC_CSR_READ(sc, csr_missed_frames);
  966             if (csr & TULIP_STS_RXNOBUF)
  967                 sc->lmc_dot3stats.dot3StatsMissedFrames += misses & 0xFFFF;
  968             /*
  969              * Pass 2.[012] of the 21140A-A[CDE] may hang and/or corrupt data
  970              * on receive overflows.
  971              */
  972            if ((misses & 0x0FFE0000) && (sc->lmc_features & LMC_HAVE_RXBADOVRFLW)) {
  973                 sc->lmc_dot3stats.dot3StatsInternalMacReceiveErrors++;
  974                 /*
  975                  * Stop the receiver process and spin until it's stopped.
  976                  * Tell rx_intr to drop the packets it dequeues.
  977                  */
  978                 LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode & ~TULIP_CMD_RXRUN);
  979                 while ((LMC_CSR_READ(sc, csr_status) & TULIP_STS_RXSTOPPED) == 0)
  980                     ;
  981                 LMC_CSR_WRITE(sc, csr_status, TULIP_STS_RXSTOPPED);
  982                 sc->lmc_flags |= LMC_RXIGNORE;
  983             }
  984             lmc_rx_intr(sc);
  985             if (sc->lmc_flags & LMC_RXIGNORE) {
  986                 /*
  987                  * Restart the receiver.
  988                  */
  989                 sc->lmc_flags &= ~LMC_RXIGNORE;
  990                 LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
  991             }
  992         }
  993         if (csr & TULIP_STS_ABNRMLINTR) {
  994             u_int32_t tmp = csr & sc->lmc_intrmask
  995                 & ~(TULIP_STS_NORMALINTR|TULIP_STS_ABNRMLINTR);
  996             if (csr & TULIP_STS_TXUNDERFLOW) {
  997                 if ((sc->lmc_cmdmode & TULIP_CMD_THRESHOLDCTL) != TULIP_CMD_THRSHLD160) {
  998                     sc->lmc_cmdmode += TULIP_CMD_THRSHLD96;
  999                     sc->lmc_flags |= LMC_NEWTXTHRESH;
 1000                 } else if (sc->lmc_features & LMC_HAVE_STOREFWD) {
 1001                     sc->lmc_cmdmode |= TULIP_CMD_STOREFWD;
 1002                     sc->lmc_flags |= LMC_NEWTXTHRESH;
 1003                 }
 1004             }
 1005             if (sc->lmc_flags & LMC_NOMESSAGES) {
 1006                 sc->lmc_statusbits |= tmp;
 1007             } else {
 1008                 lmc_print_abnormal_interrupt(sc, tmp);
 1009                 sc->lmc_flags |= LMC_NOMESSAGES;
 1010             }
 1011             LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
 1012         }
 1013 
 1014         if (csr & TULIP_STS_TXINTR)
 1015                 lmc_tx_intr(sc);
 1016 
 1017         if (sc->lmc_flags & LMC_WANTTXSTART)
 1018             lmc_ifstart(&sc->lmc_if);
 1019     }
 1020 }
 1021 
 1022 lmc_intrfunc_t
 1023 lmc_intr_normal(void *arg)
 1024 {
 1025         lmc_softc_t * sc = (lmc_softc_t *) arg;
 1026         int progress = 0;
 1027 
 1028         lmc_intr_handler(sc, &progress);
 1029 
 1030 #if !defined(LMC_VOID_INTRFUNC)
 1031         return progress;
 1032 #endif
 1033 }
 1034 
 1035 static struct mbuf *
 1036 lmc_mbuf_compress(struct mbuf *m)
 1037 {
 1038         struct mbuf *m0;
 1039 #if MCLBYTES >= LMC_MTU + PPP_HEADER_LEN && !defined(BIG_PACKET)
 1040         MGETHDR(m0, M_DONTWAIT, MT_DATA);
 1041         if (m0 != NULL) {
 1042                 if (m->m_pkthdr.len > MHLEN) {
 1043                         MCLGET(m0, M_DONTWAIT);
 1044                         if ((m0->m_flags & M_EXT) == 0) {
 1045                                 m_freem(m);
 1046                                 m_freem(m0);
 1047                                 return NULL;
 1048                         }
 1049                 }
 1050                 m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
 1051                 m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
 1052         }
 1053 #else
 1054         int mlen = MHLEN;
 1055         int len = m->m_pkthdr.len;
 1056         struct mbuf **mp = &m0;
 1057 
 1058         while (len > 0) {
 1059                 if (mlen == MHLEN) {
 1060                         MGETHDR(*mp, M_DONTWAIT, MT_DATA);
 1061                 } else {
 1062                         MGET(*mp, M_DONTWAIT, MT_DATA);
 1063                 }
 1064                 if (*mp == NULL) {
 1065                         m_freem(m0);
 1066                         m0 = NULL;
 1067                         break;
 1068                 }
 1069                 if (len > MLEN) {
 1070                         MCLGET(*mp, M_DONTWAIT);
 1071                         if (((*mp)->m_flags & M_EXT) == 0) {
 1072                                 m_freem(m0);
 1073                                 m0 = NULL;
 1074                                 break;
 1075                         }
 1076                         (*mp)->m_len = (len <= MCLBYTES ? len : MCLBYTES);
 1077                 } else {
 1078                         (*mp)->m_len = (len <= mlen ? len : mlen);
 1079                 }
 1080                 m_copydata(m, m->m_pkthdr.len - len,
 1081                            (*mp)->m_len, mtod((*mp), caddr_t));
 1082                 len -= (*mp)->m_len;
 1083                 mp = &(*mp)->m_next;
 1084                 mlen = MLEN;
 1085         }
 1086 #endif
 1087         m_freem(m);
 1088         return m0;
 1089 }
 1090 
 1091 /*
 1092  * queue the mbuf handed to us for the interface.  If we cannot
 1093  * queue it, return the mbuf.  Return NULL if the mbuf was queued.
 1094  */
 1095 static struct mbuf *
 1096 lmc_txput(lmc_softc_t * const sc, struct mbuf *m)
 1097 {
 1098         lmc_ringinfo_t * const ri = &sc->lmc_txinfo;
 1099         lmc_desc_t *eop, *nextout;
 1100         int segcnt, free;
 1101         u_int32_t d_status, ctl;
 1102 #if defined(LMC_BUS_DMA) && !defined(LMC_BUS_DMA_NOTX)
 1103         bus_dmamap_t map;
 1104         int error;
 1105 #else
 1106         struct mbuf *m0;
 1107 #endif
 1108 
 1109 #if defined(LMC_DEBUG)
 1110         if ((sc->lmc_cmdmode & TULIP_CMD_TXRUN) == 0) {
 1111                 printf(LMC_PRINTF_FMT ": txput: tx not running\n",
 1112                        LMC_PRINTF_ARGS);
 1113                 sc->lmc_flags |= LMC_WANTTXSTART;
 1114                 goto finish;
 1115         }
 1116 #endif
 1117 
 1118         /*
 1119          * Now we try to fill in our transmit descriptors.  This is
 1120          * a bit reminiscent of going on the Ark two by two
 1121          * since each descriptor for the TULIP can describe
 1122          * two buffers.  So we advance through packet filling
 1123          * each of the two entries at a time to fill each
 1124          * descriptor.  Clear the first and last segment bits
 1125          * in each descriptor (actually just clear everything
 1126          * but the end-of-ring or chain bits) to make sure
 1127          * we don't get messed up by previously sent packets.
 1128          *
 1129          * We may fail to put the entire packet on the ring if
 1130          * there is either not enough ring entries free or if the
 1131          * packet has more than MAX_TXSEG segments.  In the former
 1132          * case we will just wait for the ring to empty.  In the
 1133          * latter case we have to recopy.
 1134          */
 1135 #if !defined(LMC_BUS_DMA) || defined(LMC_BUS_DMA_NOTX)
 1136 again:
 1137         m0 = m;
 1138 #endif
 1139         d_status = 0;
 1140         eop = nextout = ri->ri_nextout;
 1141         segcnt = 0;
 1142         free = ri->ri_free;
 1143 #if defined(LMC_BUS_DMA) && !defined(LMC_BUS_DMA_NOTX)
 1144         /*
 1145          * Reclaim some DMA maps from if we are out.
 1146          */
 1147         if (sc->lmc_txmaps_free == 0) {
 1148 #if defined(LMC_DEBUG)
 1149                 sc->lmc_dbg.dbg_no_txmaps++;
 1150 #endif
 1151                 free += lmc_tx_intr(sc);
 1152         }
 1153         if (sc->lmc_txmaps_free > 0) {
 1154                 map = sc->lmc_txmaps[sc->lmc_txmaps_free-1];
 1155         } else {
 1156                 sc->lmc_flags |= LMC_WANTTXSTART;
 1157 #if defined(LMC_DEBUG)
 1158                 sc->lmc_dbg.dbg_txput_finishes[1]++;
 1159 #endif
 1160                 goto finish;
 1161         }
 1162         error = bus_dmamap_load_mbuf(sc->lmc_dmatag, map, m, BUS_DMA_NOWAIT);
 1163         if (error != 0) {
 1164                 if (error == EFBIG) {
 1165                         /*
 1166                          * The packet exceeds the number of transmit buffer
 1167                          * entries that we can use for one packet, so we have
 1168                          * to recopy it into one mbuf and then try again.
 1169                          */
 1170                         m = lmc_mbuf_compress(m);
 1171                         if (m == NULL) {
 1172 #if defined(LMC_DEBUG)
 1173                                 sc->lmc_dbg.dbg_txput_finishes[2]++;
 1174 #endif
 1175                                 goto finish;
 1176                         }
 1177                         error = bus_dmamap_load_mbuf(sc->lmc_dmatag, map, m,
 1178                                 BUS_DMA_NOWAIT);
 1179                 }
 1180                 if (error != 0) {
 1181                         printf(LMC_PRINTF_FMT ": unable to load tx map, "
 1182                                 "error = %d\n", LMC_PRINTF_ARGS, error);
 1183 #if defined(LMC_DEBUG)
 1184                         sc->lmc_dbg.dbg_txput_finishes[3]++;
 1185 #endif
 1186                         goto finish;
 1187                 }
 1188         }
 1189         if ((free -= (map->dm_nsegs + 1) / 2) <= 0
 1190                 /*
 1191                  * See if there's any unclaimed space in the transmit ring.
 1192                  */
 1193                 && (free += lmc_tx_intr(sc)) <= 0) {
 1194                 /*
 1195                  * There's no more room but since nothing
 1196                  * has been committed at this point, just
 1197                  * show output is active, put back the
 1198                  * mbuf and return.
 1199                  */
 1200                 sc->lmc_flags |= LMC_WANTTXSTART;
 1201 #if defined(LMC_DEBUG)
 1202                 sc->lmc_dbg.dbg_txput_finishes[4]++;
 1203 #endif
 1204                 bus_dmamap_unload(sc->lmc_dmatag, map);
 1205                 goto finish;
 1206         }
 1207         for (; map->dm_nsegs - segcnt > 1; segcnt += 2) {
 1208                 int flg;
 1209 
 1210                 eop = nextout;
 1211                 flg            = LMC_CTL_FLGS(le32toh(eop->d_ctl));
 1212                 flg           &= TULIP_DFLAG_ENDRING;
 1213                 flg           |= TULIP_DFLAG_TxNOPADDING;
 1214                 if (sc->ictl.crc_length == 16)
 1215                         flg |= TULIP_DFLAG_TxHASCRC;
 1216                 eop->d_status  = htole32(d_status);
 1217                 eop->d_addr1   = htole32(map->dm_segs[segcnt].ds_addr);
 1218                 eop->d_addr2   = htole32(map->dm_segs[segcnt+1].ds_addr);
 1219                 eop->d_ctl     = htole32(LMC_CTL(flg, 
 1220                                  map->dm_segs[segcnt].ds_len,
 1221                                  map->dm_segs[segcnt+1].ds_len));
 1222                 d_status = TULIP_DSTS_OWNER;
 1223                 if (++nextout == ri->ri_last)
 1224                         nextout = ri->ri_first;
 1225         }
 1226         if (segcnt < map->dm_nsegs) {
 1227                 int flg;
 1228 
 1229                 eop = nextout;
 1230                 flg            = LMC_CTL_FLGS(le32toh(eop->d_ctl));
 1231                 flg           &= TULIP_DFLAG_ENDRING;
 1232                 flg           |= TULIP_DFLAG_TxNOPADDING;
 1233                 if (sc->ictl.crc_length == 16)
 1234                         flg |= TULIP_DFLAG_TxHASCRC;
 1235                 eop->d_status  = htole32(d_status);
 1236                 eop->d_addr1   = htole32(map->dm_segs[segcnt].ds_addr);
 1237                 eop->d_addr2   = 0;
 1238                 eop->d_ctl     = htole32(LMC_CTL(flg, 
 1239                                  map->dm_segs[segcnt].ds_len, 0));
 1240                 if (++nextout == ri->ri_last)
 1241                         nextout = ri->ri_first;
 1242         }
 1243         LMC_TXMAP_PRESYNC(sc, map);
 1244         M_SETCTX(m, map);
 1245         map = NULL;
 1246         --sc->lmc_txmaps_free;          /* commit to using the dmamap */
 1247 
 1248 #else /* !LMC_BUS_DMA */
 1249 
 1250         do {
 1251                 int len = m0->m_len;
 1252                 caddr_t addr = mtod(m0, caddr_t);
 1253                 unsigned clsize = PAGE_SIZE - (((u_long) addr) & PAGE_MASK);
 1254 
 1255                 while (len > 0) {
 1256                         unsigned slen = min(len, clsize);
 1257 #ifdef BIG_PACKET
 1258                         int partial = 0;
 1259                         if (slen >= 2048)
 1260                                 slen = 2040, partial = 1;
 1261 #endif
 1262                         segcnt++;
 1263                         if (segcnt > LMC_MAX_TXSEG) {
 1264                                 /*
 1265                                  * The packet exceeds the number of transmit
 1266                                  * buffer entries that we can use for one
 1267                                  * packet, so we have recopy it into one mbuf
 1268                                  * and then try again.
 1269                                  */
 1270                                 m = lmc_mbuf_compress(m);
 1271                                 if (m == NULL)
 1272                                         goto finish;
 1273                                 goto again;
 1274                         }
 1275                         if (segcnt & 1) {
 1276                                 if (--free == 0) {
 1277                                         /*
 1278                                          * See if there's any unclaimed space
 1279                                          * in the transmit ring.
 1280                                          */
 1281                                         if ((free += lmc_tx_intr(sc)) == 0) {
 1282                                                 /*
 1283                                                  * There's no more room but
 1284                                                  * since nothing has been
 1285                                                  * committed at this point,
 1286                                                  * just show output is active,
 1287                                                  * put back the mbuf and
 1288                                                  * return.
 1289                                                  */
 1290                                                 sc->lmc_flags |= LMC_WANTTXSTART;
 1291                                                 goto finish;
 1292                                         }
 1293                                 }
 1294                                 eop = nextout;
 1295                                 if (++nextout == ri->ri_last)
 1296                                         nextout = ri->ri_first;
 1297                                 eop->d_flag &= TULIP_DFLAG_ENDRING;
 1298                                 eop->d_flag |= TULIP_DFLAG_TxNOPADDING;
 1299                                 if (sc->ictl.crc_length == 16)
 1300                                         eop->d_flag |= TULIP_DFLAG_TxHASCRC;
 1301                                 eop->d_status = d_status;
 1302                                 eop->d_addr1 = LMC_KVATOPHYS(sc, addr);
 1303                                 eop->d_length1 = slen;
 1304                         } else {
 1305                                 /*
 1306                                  *  Fill in second half of descriptor
 1307                                  */
 1308                                 eop->d_addr2 = LMC_KVATOPHYS(sc, addr);
 1309                                 eop->d_length2 = slen;
 1310                         }
 1311                         d_status = TULIP_DSTS_OWNER;
 1312                         len -= slen;
 1313                         addr += slen;
 1314 #ifdef BIG_PACKET
 1315                         if (partial)
 1316                                 continue;
 1317 #endif
 1318                         clsize = PAGE_SIZE;
 1319                 }
 1320         } while ((m0 = m0->m_next) != NULL);
 1321 #endif /* LMC_BUS_DMA */
 1322 
 1323 
 1324         /*
 1325          * The descriptors have been filled in.  Now get ready
 1326          * to transmit.
 1327          */
 1328         IF_ENQUEUE(&sc->lmc_txq, m);
 1329         m = NULL;
 1330 
 1331         /*
 1332          * Make sure the next descriptor after this packet is owned
 1333          * by us since it may have been set up above if we ran out
 1334          * of room in the ring.
 1335          */
 1336         nextout->d_status = 0;
 1337         LMC_TXDESC_PRESYNC(sc, nextout, sizeof(u_int32_t));
 1338 
 1339 #if !defined(LMC_BUS_DMA) || defined(LMC_BUS_DMA_NOTX)
 1340         /*
 1341          * If we only used the first segment of the last descriptor,
 1342          * make sure the second segment will not be used.
 1343          */
 1344         if (segcnt & 1) {
 1345                 eop->d_addr2 = 0;
 1346                 eop->d_length2 = 0;
 1347         }
 1348 #endif /* LMC_BUS_DMA */
 1349 
 1350         /*
 1351          * Mark the last and first segments, indicate we want a transmit
 1352          * complete interrupt, and tell it to transmit!
 1353          */
 1354         ctl = le32toh(eop->d_ctl);
 1355         eop->d_ctl = htole32(LMC_CTL(
 1356                 LMC_CTL_FLGS(ctl)|TULIP_DFLAG_TxLASTSEG|TULIP_DFLAG_TxWANTINTR,
 1357                 LMC_CTL_LEN1(ctl),
 1358                 LMC_CTL_LEN2(ctl)));
 1359 
 1360         /*
 1361          * Note that ri->ri_nextout is still the start of the packet
 1362          * and until we set the OWNER bit, we can still back out of
 1363          * everything we have done.
 1364          */
 1365         ctl = le32toh(ri->ri_nextout->d_ctl);
 1366         ri->ri_nextout->d_ctl = htole32(LMC_CTL(
 1367                 LMC_CTL_FLGS(ctl)|TULIP_DFLAG_TxFIRSTSEG,
 1368                 LMC_CTL_LEN1(ctl),
 1369                 LMC_CTL_LEN2(ctl)));
 1370 #if defined(LMC_BUS_MAP) && !defined(LMC_BUS_DMA_NOTX)
 1371         if (eop < ri->ri_nextout) {
 1372                 LMC_TXDESC_PRESYNC(sc, ri->ri_nextout,
 1373                         (caddr_t) ri->ri_last - (caddr_t) ri->ri_nextout);
 1374                 LMC_TXDESC_PRESYNC(sc, ri->ri_first,
 1375                         (caddr_t) (eop + 1) - (caddr_t) ri->ri_first);
 1376         } else {
 1377                 LMC_TXDESC_PRESYNC(sc, ri->ri_nextout,
 1378                         (caddr_t) (eop + 1) - (caddr_t) ri->ri_nextout);
 1379         }
 1380 #endif
 1381         ri->ri_nextout->d_status = htole32(TULIP_DSTS_OWNER);
 1382         LMC_TXDESC_PRESYNC(sc, ri->ri_nextout, sizeof(u_int32_t));
 1383 
 1384         LMC_CSR_WRITE(sc, csr_txpoll, 1);
 1385 
 1386         /*
 1387          * This advances the ring for us.
 1388          */
 1389         ri->ri_nextout = nextout;
 1390         ri->ri_free = free;
 1391 
 1392         /*
 1393          * switch back to the single queueing ifstart.
 1394          */
 1395         sc->lmc_flags &= ~LMC_WANTTXSTART;
 1396         sc->lmc_if.if_start = lmc_ifstart_one;
 1397 
 1398         /*
 1399          * If we want a txstart, there must be not enough space in the
 1400          * transmit ring.  So we want to enable transmit done interrupts
 1401          * so we can immediately reclaim some space.  When the transmit
 1402          * interrupt is posted, the interrupt handler will call tx_intr
 1403          * to reclaim space and then txstart (since WANTTXSTART is set).
 1404          * txstart will move the packet into the transmit ring and clear
 1405          * WANTTXSTART thereby causing TXINTR to be cleared.
 1406          */
 1407  finish:
 1408         if (sc->lmc_flags & LMC_WANTTXSTART) {
 1409                 sc->lmc_if.if_flags |= IFF_OACTIVE;
 1410                 sc->lmc_if.if_start = lmc_ifstart;
 1411         }
 1412 
 1413         return m;
 1414 }
 1415 
 1416 
 1417 /*
 1418  * This routine is entered at splnet() (splsoftnet() on NetBSD)
 1419  */
 1420 static int
 1421 lmc_ifioctl(struct ifnet * ifp, ioctl_cmd_t cmd, caddr_t data)
 1422 {
 1423         lmc_softc_t * const sc = LMC_IFP_TO_SOFTC(ifp);
 1424 #if defined(__NetBSD__) || defined(__FreeBSD__)
 1425         lmc_spl_t s;
 1426 #endif
 1427         int error = 0;
 1428         struct ifreq *ifr = (struct ifreq *)data;
 1429         u_int32_t new_state;
 1430         u_int32_t old_state;
 1431         lmc_ctl_t ctl;
 1432 
 1433 #if defined(__NetBSD__) || defined(__FreeBSD__)
 1434         s = LMC_RAISESPL();
 1435 #endif
 1436 
 1437         switch (cmd) {
 1438         case LMCIOCGINFO:
 1439                 error = copyout(&sc->ictl, ifr->ifr_data, sizeof(lmc_ctl_t));
 1440 
 1441                 goto out;
 1442                 break;
 1443 
 1444         case LMCIOCSINFO:
 1445 #if 0 /* XXX */
 1446                 error = suser(p->p_ucred, &p->p_acflag);
 1447                 if (error)
 1448                         goto out;
 1449 #endif
 1450 
 1451                 error = copyin(ifr->ifr_data, &ctl, sizeof(lmc_ctl_t));
 1452                 if (error != 0)
 1453                         goto out;
 1454 
 1455                 sc->lmc_media->set_status(sc, &ctl);
 1456 
 1457                 goto out;
 1458                 break;
 1459 
 1460 #if defined(__NetBSD__) || defined(__FreeBSD__)
 1461         case SIOCSIFMTU:
 1462                 /*
 1463                  * Don't allow the MTU to get larger than we can handle
 1464                  */
 1465                 if (ifr->ifr_mtu > LMC_MTU) {
 1466                         error = EINVAL;
 1467                         goto out;
 1468                 } else {
 1469                         ifp->if_mtu = ifr->ifr_mtu;
 1470                 }
 1471                 break;
 1472 #endif
 1473         }
 1474 
 1475 #if defined(__NetBSD__) || defined(__FreeBSD__)
 1476         /*
 1477          * call the sppp ioctl layer
 1478          */
 1479         error = sppp_ioctl(ifp, cmd, data);
 1480         if (error != 0)
 1481                 goto out;
 1482 #endif
 1483 
 1484 #if defined(__bsdi__)
 1485         error = p2p_ioctl(ifp, cmd, data);
 1486 #endif
 1487 
 1488 #if defined(__NetBSD__) || defined(__FreeBSD__)
 1489         /*
 1490          * If we are transitioning from up to down or down to up, call
 1491          * our init routine.
 1492          */
 1493         new_state = ifp->if_flags & IFF_UP;
 1494         old_state = sc->lmc_flags & LMC_IFUP;
 1495 
 1496         if (new_state && !old_state)
 1497                 lmc_ifup(sc);
 1498         else if (!new_state && old_state)
 1499                 lmc_ifdown(sc);
 1500 #endif
 1501 
 1502  out:
 1503 #if defined(__NetBSD__) || defined(__FreeBSD__)
 1504         LMC_RESTORESPL(s);
 1505 #endif
 1506 
 1507         return error;
 1508 }
 1509 
 1510 /*
 1511  * These routines gets called at device spl (from sppp_output).
 1512  */
 1513 
 1514 #if defined(__NetBSD__) || defined(__FreeBSD__)
 1515 static ifnet_ret_t
 1516 lmc_ifstart(struct ifnet * const ifp)
 1517 {
 1518         lmc_softc_t * const sc = LMC_IFP_TO_SOFTC(ifp);
 1519         struct mbuf *m;
 1520 
 1521         if (sc->lmc_flags & LMC_IFUP) {
 1522                 while (sppp_isempty(ifp) == 0) {
 1523                         m = sppp_dequeue(ifp);
 1524                         if (!m)
 1525                                 break;
 1526                         if ((m = lmc_txput(sc, m)) != NULL) {
 1527                                 IF_PREPEND(&((struct sppp *)ifp)->pp_fastq, m);
 1528                                 break;
 1529                         }
 1530                 }
 1531                 LMC_CSR_WRITE(sc, csr_txpoll, 1);
 1532         }
 1533 }
 1534 
 1535 static ifnet_ret_t
 1536 lmc_ifstart_one(struct ifnet * const ifp)
 1537 {
 1538         lmc_softc_t * const sc = LMC_IFP_TO_SOFTC(ifp);
 1539         struct mbuf *m;
 1540 
 1541         if ((sc->lmc_flags & LMC_IFUP) && (sppp_isempty(ifp) == 0)) {
 1542                 m = sppp_dequeue(ifp);
 1543                 if (m) {
 1544                         if ((m = lmc_txput(sc, m)) != NULL)
 1545                                 IF_PREPEND(&((struct sppp *)ifp)->pp_fastq, m);
 1546                 }
 1547                 LMC_CSR_WRITE(sc, csr_txpoll, 1);
 1548         }
 1549 }
 1550 #endif
 1551 
 1552 #if defined(__bsdi__)
 1553 static ifnet_ret_t
 1554 lmc_ifstart(struct ifnet * const ifp)
 1555 {
 1556         lmc_softc_t * const sc = LMC_IFP_TO_SOFTC(ifp);
 1557         struct mbuf *m;
 1558         struct ifqueue *ifq;
 1559 
 1560         if ((sc->lmc_flags & LMC_IFUP) == 0)
 1561                 return;
 1562 
 1563         for (;;) {
 1564                 ifq = &sc->lmc_p2pcom.p2p_isnd;
 1565 
 1566                 m = ifq->ifq_head;
 1567                 if (m == NULL) {
 1568                         ifq = &sc->lmc_if.if_snd;
 1569                         m = ifq->ifq_head;
 1570                 }
 1571                 if (m == NULL)
 1572                         break;
 1573                 IF_DEQUEUE(ifq, m);
 1574 
 1575                 m = lmc_txput(sc, m);
 1576                 if (m != NULL) {
 1577                         IF_PREPEND(ifq, m);
 1578                         break;
 1579                 }
 1580         }
 1581 
 1582         LMC_CSR_WRITE(sc, csr_txpoll, 1);
 1583 }
 1584 
 1585 static ifnet_ret_t
 1586 lmc_ifstart_one(struct ifnet * const ifp)
 1587 {
 1588         lmc_softc_t * const sc = LMC_IFP_TO_SOFTC(ifp);
 1589         struct mbuf *m;
 1590         struct ifqueue *ifq;
 1591 
 1592         if ((sc->lmc_flags & LMC_IFUP) == 0)
 1593                 return;
 1594 
 1595         ifq = &sc->lmc_p2pcom.p2p_isnd;
 1596 
 1597         m = ifq->ifq_head;
 1598         if (m == NULL) {
 1599                 ifq = &sc->lmc_if.if_snd;
 1600                 m = ifq->ifq_head;
 1601         }
 1602         if (m == NULL)
 1603                 return 0;
 1604         IF_DEQUEUE(ifq, m);
 1605 
 1606         m = lmc_txput(sc, m);
 1607         if (m != NULL)
 1608                 IF_PREPEND(ifq, m);
 1609 
 1610         LMC_CSR_WRITE(sc, csr_txpoll, 1);
 1611 }
 1612 #endif
 1613 
 1614 #if defined(__bsdi__)
 1615 int
 1616 lmc_getmdm(struct p2pcom *pp, caddr_t b)
 1617 {
 1618         lmc_softc_t *sc = LMC_UNIT_TO_SOFTC(pp->p2p_if.if_unit);
 1619 
 1620         if (sc->lmc_media->get_link_status(sc)) {
 1621                 *(int *)b = TIOCM_CAR;
 1622         } else {
 1623                 *(int *)b = 0;
 1624         }
 1625 
 1626         return (0);
 1627 }
 1628 
 1629 int
 1630 lmc_mdmctl(struct p2pcom *pp, int flag)
 1631 {
 1632         lmc_softc_t *sc = LMC_UNIT_TO_SOFTC(pp->p2p_if.if_unit);
 1633 
 1634         sc->lmc_media->set_link_status(sc, flag);
 1635 
 1636         if (flag)
 1637                 if ((sc->lmc_flags & LMC_IFUP) == 0)
 1638                         lmc_ifup(sc);
 1639         else
 1640                 if ((sc->lmc_flags & LMC_IFUP) == LMC_IFUP)
 1641                         lmc_ifdown(sc);
 1642 
 1643         return (0);
 1644 }
 1645 #endif
 1646 
 1647 /*
 1648  * Set up the OS interface magic and attach to the operating system
 1649  * network services.
 1650  */
 1651 void
 1652 lmc_attach(lmc_softc_t * const sc)
 1653 {
 1654         struct ifnet * const ifp = &sc->lmc_if;
 1655 
 1656         ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
 1657         ifp->if_ioctl = lmc_ifioctl;
 1658         ifp->if_start = lmc_ifstart;
 1659         ifp->if_watchdog = lmc_watchdog;
 1660         ifp->if_timer = 1;
 1661         ifp->if_mtu = LMC_MTU;
 1662 
 1663 #if defined(__bsdi__)
 1664         ifp->if_type = IFT_NONE;
 1665         ifp->if_unit = (sc->lmc_dev.dv_unit);
 1666 #endif
 1667   
 1668         if_attach(ifp);
 1669 
 1670 #if defined(__NetBSD__) || defined(__FreeBSD__)
 1671         sc->lmc_sppp.pp_framebytes = 3; /* 1 flag byte, 2 byte FCS */
 1672         sppp_attach((struct ifnet *)&sc->lmc_sppp);
 1673         sc->lmc_sppp.pp_flags = PP_CISCO | PP_KEEPALIVE;
 1674 #endif
 1675 #if defined(__bsdi__)
 1676         sc->lmc_p2pcom.p2p_mdmctl = lmc_mdmctl;
 1677         sc->lmc_p2pcom.p2p_getmdm = lmc_getmdm;
 1678         p2p_attach(&sc->lmc_p2pcom);
 1679 #endif
 1680 
 1681 #if NBPFILTER > 0
 1682         LMC_BPF_ATTACH(sc);
 1683 #endif
 1684 
 1685 #if defined(__NetBSD__) && NRND > 0
 1686         rnd_attach_source(&sc->lmc_rndsource, sc->lmc_dev.dv_xname,
 1687                           RND_TYPE_NET, 0);
 1688 #endif
 1689 
 1690         /*
 1691          * turn off those LEDs...
 1692          */
 1693         sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
 1694         lmc_led_on(sc, LMC_MII16_LED0);
 1695 }
 1696 
 1697 void
 1698 lmc_initring(lmc_softc_t * const sc, lmc_ringinfo_t * const ri,
 1699                lmc_desc_t *descs, int ndescs)
 1700 {
 1701         ri->ri_max = ndescs;
 1702         ri->ri_first = descs;
 1703         ri->ri_last = ri->ri_first + ri->ri_max;
 1704         memset((caddr_t) ri->ri_first, 0, sizeof(ri->ri_first[0]) * ri->ri_max);
 1705         ri->ri_last[-1].d_ctl = htole32(LMC_CTL(TULIP_DFLAG_ENDRING, 0, 0));
 1706 }

Cache object: 278612e93fb70e4025c0b16d3d2b1006


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.