The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/en/midway.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: midway.c,v 1.30 1997/09/29 17:40:38 chuck Exp $        */
    2 /*      (sync'd to midway.c 1.68)       */
    3 
    4 /*-
    5  * Copyright (c) 1996 Charles D. Cranor and Washington University.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by Charles D. Cranor and
   19  *      Washington University.
   20  * 4. The name of the author may not be used to endorse or promote products
   21  *    derived from this software without specific prior written permission.
   22  *
   23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   33  */
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD$");
   36 
   37 /*
   38  *
   39  * m i d w a y . c   e n i 1 5 5   d r i v e r 
   40  *
   41  * author: Chuck Cranor <chuck@ccrc.wustl.edu>
   42  * started: spring, 1996 (written from scratch).
   43  *
   44  * notes from the author:
   45  *   Extra special thanks go to Werner Almesberger, EPFL LRC.   Werner's
   46  *   ENI driver was especially useful in figuring out how this card works.
   47  *   I would also like to thank Werner for promptly answering email and being
   48  *   generally helpful.
   49  */
   50 
   51 #define EN_DIAG
   52 #define EN_DDBHOOK      1       /* compile in ddb functions */
   53 
   54 /*
   55  * Note on EN_ENIDMAFIX: the byte aligner on the ENI version of the card
   56  * appears to be broken.   it works just fine if there is no load... however
   57  * when the card is loaded the data get corrupted.   to see this, one only
   58  * has to use "telnet" over ATM.   do the following command in "telnet":
   59  *      cat /usr/share/misc/termcap
   60  * "telnet" seems to generate lots of 1023 byte mbufs (which make great
   61  * use of the byte aligner).   watch "netstat -s" for checksum errors.
   62  * 
   63  * I further tested this by adding a function that compared the transmit 
   64  * data on the card's SRAM with the data in the mbuf chain _after_ the 
   65  * "transmit DMA complete" interrupt.   using the "telnet" test I got data
   66  * mismatches where the byte-aligned data should have been.   using ddb
   67  * and en_dumpmem() I verified that the DTQs fed into the card were 
   68  * absolutely correct.   thus, we are forced to concluded that the ENI
   69  * hardware is buggy.   note that the Adaptec version of the card works
   70  * just fine with byte DMA.
   71  *
   72  * bottom line: we set EN_ENIDMAFIX to 1 to avoid byte DMAs on the ENI
   73  * card.
   74  */
   75 
   76 #if defined(DIAGNOSTIC) && !defined(EN_DIAG)
   77 #define EN_DIAG                 /* link in with master DIAG option */
   78 #endif
   79 
   80 #define EN_COUNT(X) (X)++
   81 
   82 #ifdef EN_DEBUG
   83 
   84 #undef  EN_DDBHOOK
   85 #define EN_DDBHOOK      1
   86 
   87 /*
   88  * This macro removes almost all the EN_DEBUG conditionals in the code that make
   89  * to code a good deal less readable.
   90  */
   91 #define DBG(SC, FL, PRINT) do {                                         \
   92         if ((SC)->debug & DBG_##FL) {                                   \
   93                 device_printf((SC)->dev, "%s: "#FL": ", __func__);      \
   94                 printf PRINT;                                           \
   95                 printf("\n");                                           \
   96         }                                                               \
   97     } while (0)
   98 
   99 enum {
  100         DBG_INIT        = 0x0001,       /* debug attach/detach */
  101         DBG_TX          = 0x0002,       /* debug transmitting */
  102         DBG_SERV        = 0x0004,       /* debug service interrupts */
  103         DBG_IOCTL       = 0x0008,       /* debug ioctls */
  104         DBG_VC          = 0x0010,       /* debug VC handling */
  105         DBG_INTR        = 0x0020,       /* debug interrupts */
  106         DBG_DMA         = 0x0040,       /* debug DMA probing */
  107         DBG_IPACKETS    = 0x0080,       /* print input packets */
  108         DBG_REG         = 0x0100,       /* print all register access */
  109         DBG_LOCK        = 0x0200,       /* debug locking */
  110 };
  111 
  112 #else /* EN_DEBUG */
  113 
  114 #define DBG(SC, FL, PRINT) do { } while (0)
  115 
  116 #endif /* EN_DEBUG */
  117 
  118 #include "opt_inet.h"
  119 #include "opt_natm.h"
  120 #include "opt_ddb.h"
  121 
  122 #ifdef DDB
  123 #undef  EN_DDBHOOK
  124 #define EN_DDBHOOK      1
  125 #endif
  126 
  127 #include <sys/param.h>
  128 #include <sys/systm.h>
  129 #include <sys/queue.h>
  130 #include <sys/sockio.h>
  131 #include <sys/socket.h>
  132 #include <sys/mbuf.h>
  133 #include <sys/endian.h>
  134 #include <sys/stdint.h>
  135 #include <sys/lock.h>
  136 #include <sys/mutex.h>
  137 #include <sys/condvar.h>
  138 #include <vm/uma.h>
  139 
  140 #include <net/if.h>
  141 #include <net/if_media.h>
  142 #include <net/if_atm.h>
  143 
  144 #if defined(INET) || defined(INET6)
  145 #include <netinet/in.h>
  146 #include <netinet/if_atm.h>
  147 #endif
  148 
  149 #ifdef NATM
  150 #include <netnatm/natm.h>
  151 #endif
  152 
  153 #include <sys/bus.h>
  154 #include <machine/bus.h>
  155 #include <sys/rman.h>
  156 #include <sys/module.h>
  157 #include <sys/sysctl.h>
  158 #include <sys/malloc.h>
  159 #include <machine/resource.h>
  160 #include <dev/utopia/utopia.h>
  161 #include <dev/en/midwayreg.h>
  162 #include <dev/en/midwayvar.h>
  163 
  164 #include <net/bpf.h>
  165 
  166 /*
  167  * params
  168  */
  169 #ifndef EN_TXHIWAT
  170 #define EN_TXHIWAT      (64 * 1024)     /* max 64 KB waiting to be DMAd out */
  171 #endif
  172 
  173 SYSCTL_DECL(_hw_atm);
  174 
  175 /*
  176  * dma tables
  177  *
  178  * The plan is indexed by the number of words to transfer.
  179  * The maximum index is 15 for 60 words.
  180  */
  181 struct en_dmatab {
  182         uint8_t bcode;          /* code */
  183         uint8_t divshift;       /* byte divisor */
  184 };
  185 
  186 static const struct en_dmatab en_dmaplan[] = {
  187   { 0, 0 },             /* 0 */         { MIDDMA_WORD, 2},      /* 1 */
  188   { MIDDMA_2WORD, 3},   /* 2 */         { MIDDMA_WORD, 2},      /* 3 */
  189   { MIDDMA_4WORD, 4},   /* 4 */         { MIDDMA_WORD, 2},      /* 5 */
  190   { MIDDMA_2WORD, 3},   /* 6 */         { MIDDMA_WORD, 2},      /* 7 */
  191   { MIDDMA_8WORD, 5},   /* 8 */         { MIDDMA_WORD, 2},      /* 9 */
  192   { MIDDMA_2WORD, 3},   /* 10 */        { MIDDMA_WORD, 2},      /* 11 */
  193   { MIDDMA_4WORD, 4},   /* 12 */        { MIDDMA_WORD, 2},      /* 13 */
  194   { MIDDMA_2WORD, 3},   /* 14 */        { MIDDMA_WORD, 2},      /* 15 */
  195   { MIDDMA_16WORD,6},   /* 16 */
  196 };
  197 
  198 /*
  199  * prototypes
  200  */
  201 #ifdef EN_DDBHOOK
  202 int en_dump(int unit, int level);
  203 int en_dumpmem(int,int,int);
  204 #endif
  205 static void en_close_finish(struct en_softc *sc, struct en_vcc *vc);
  206 
  207 #define EN_LOCK(SC)     do {                            \
  208         DBG(SC, LOCK, ("ENLOCK %d\n", __LINE__));       \
  209         mtx_lock(&sc->en_mtx);                          \
  210     } while (0)
  211 #define EN_UNLOCK(SC)   do {                            \
  212         DBG(SC, LOCK, ("ENUNLOCK %d\n", __LINE__));     \
  213         mtx_unlock(&sc->en_mtx);                        \
  214     } while (0)
  215 #define EN_CHECKLOCK(sc)        mtx_assert(&sc->en_mtx, MA_OWNED)
  216 
  217 /*
  218  * While a transmit mbuf is waiting to get transmit DMA resources we
  219  * need to keep some information with it. We don't want to allocate
  220  * additional memory for this so we stuff it into free fields in the
  221  * mbuf packet header. Neither the checksum fields nor the rcvif field are used
  222  * so use these.
  223  */
  224 #define TX_AAL5         0x1     /* transmit AAL5 PDU */
  225 #define TX_HAS_TBD      0x2     /* TBD did fit into mbuf */
  226 #define TX_HAS_PAD      0x4     /* padding did fit into mbuf */
  227 #define TX_HAS_PDU      0x8     /* PDU trailer did fit into mbuf */
  228 
  229 #define MBUF_SET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do {              \
  230         (M)->m_pkthdr.csum_data = (VCI) | ((FLAGS) << MID_VCI_BITS);    \
  231         (M)->m_pkthdr.csum_flags = ((DATALEN) & 0xffff) |               \
  232             ((PAD & 0x3f) << 16);                                       \
  233         (M)->m_pkthdr.rcvif = (void *)(MAP);                            \
  234     } while (0)
  235 
  236 #define MBUF_GET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do {              \
  237         (VCI) = (M)->m_pkthdr.csum_data & ((1 << MID_VCI_BITS) - 1);    \
  238         (FLAGS) = ((M)->m_pkthdr.csum_data >> MID_VCI_BITS) & 0xf;      \
  239         (DATALEN) = (M)->m_pkthdr.csum_flags & 0xffff;                  \
  240         (PAD) = ((M)->m_pkthdr.csum_flags >> 16) & 0x3f;                \
  241         (MAP) = (void *)((M)->m_pkthdr.rcvif);                          \
  242     } while (0)
  243 
  244 
  245 #define EN_WRAPADD(START, STOP, CUR, VAL) do {                  \
  246         (CUR) = (CUR) + (VAL);                                  \
  247         if ((CUR) >= (STOP))                                    \
  248                 (CUR) = (START) + ((CUR) - (STOP));             \
  249     } while (0)
  250 
  251 #define WORD_IDX(START, X) (((X) - (START)) / sizeof(uint32_t))
  252 
  253 #define SETQ_END(SC, VAL) ((SC)->is_adaptec ?                   \
  254         ((VAL) | (MID_DMA_END >> 4)) :                          \
  255         ((VAL) | (MID_DMA_END)))
  256 
  257 /*
  258  * The dtq and drq members are set for each END entry in the corresponding
  259  * card queue entry. It is used to find out, when a buffer has been
  260  * finished DMAing and can be freed.
  261  *
  262  * We store sc->dtq and sc->drq data in the following format...
  263  * the 0x80000 ensures we != 0
  264  */
  265 #define EN_DQ_MK(SLOT, LEN)     (((SLOT) << 20) | (LEN) | (0x80000))
  266 #define EN_DQ_SLOT(X)           ((X) >> 20)
  267 #define EN_DQ_LEN(X)            ((X) & 0x3ffff)
  268 
  269 /*
  270  * Variables
  271  */
  272 static uma_zone_t en_vcc_zone;
  273 
  274 /***********************************************************************/
  275 
  276 /*
  277  * en_read{x}: read a word from the card. These are the only functions
  278  * that read from the card.
  279  */
  280 static __inline uint32_t
  281 en_readx(struct en_softc *sc, uint32_t r)
  282 {
  283         uint32_t v;
  284 
  285 #ifdef EN_DIAG
  286         if (r > MID_MAXOFF || (r % 4))
  287                 panic("en_read out of range, r=0x%x", r);
  288 #endif
  289         v = bus_space_read_4(sc->en_memt, sc->en_base, r);
  290         return (v);
  291 }
  292 
  293 static __inline uint32_t
  294 en_read(struct en_softc *sc, uint32_t r)
  295 {
  296         uint32_t v;
  297 
  298 #ifdef EN_DIAG
  299         if (r > MID_MAXOFF || (r % 4))
  300                 panic("en_read out of range, r=0x%x", r);
  301 #endif
  302         v = bus_space_read_4(sc->en_memt, sc->en_base, r);
  303         DBG(sc, REG, ("en_read(%#x) -> %08x", r, v));
  304         return (v);
  305 }
  306 
  307 /*
  308  * en_write: write a word to the card. This is the only function that
  309  * writes to the card.
  310  */
  311 static __inline void
  312 en_write(struct en_softc *sc, uint32_t r, uint32_t v)
  313 {
  314 #ifdef EN_DIAG
  315         if (r > MID_MAXOFF || (r % 4))
  316                 panic("en_write out of range, r=0x%x", r);
  317 #endif
  318         DBG(sc, REG, ("en_write(%#x) <- %08x", r, v));
  319         bus_space_write_4(sc->en_memt, sc->en_base, r, v);
  320 }
  321 
  322 /*
  323  * en_k2sz: convert KBytes to a size parameter (a log2)
  324  */
  325 static __inline int
  326 en_k2sz(int k)
  327 {
  328         switch(k) {
  329           case 1:   return (0);
  330           case 2:   return (1);
  331           case 4:   return (2);
  332           case 8:   return (3);
  333           case 16:  return (4);
  334           case 32:  return (5);
  335           case 64:  return (6);
  336           case 128: return (7);
  337           default:
  338                 panic("en_k2sz");
  339         }
  340         return (0);
  341 }
  342 #define en_log2(X) en_k2sz(X)
  343 
  344 /*
  345  * en_b2sz: convert a DMA burst code to its byte size
  346  */
  347 static __inline int
  348 en_b2sz(int b)
  349 {
  350         switch (b) {
  351           case MIDDMA_WORD:   return (1*4);
  352           case MIDDMA_2WMAYBE:
  353           case MIDDMA_2WORD:  return (2*4);
  354           case MIDDMA_4WMAYBE:
  355           case MIDDMA_4WORD:  return (4*4);
  356           case MIDDMA_8WMAYBE:
  357           case MIDDMA_8WORD:  return (8*4);
  358           case MIDDMA_16WMAYBE:
  359           case MIDDMA_16WORD: return (16*4);
  360           default:
  361                 panic("en_b2sz");
  362         }
  363         return (0);
  364 }
  365 
  366 /*
  367  * en_sz2b: convert a burst size (bytes) to DMA burst code
  368  */
  369 static __inline int
  370 en_sz2b(int sz)
  371 {
  372         switch (sz) {
  373           case 1*4:  return (MIDDMA_WORD);
  374           case 2*4:  return (MIDDMA_2WORD);
  375           case 4*4:  return (MIDDMA_4WORD);
  376           case 8*4:  return (MIDDMA_8WORD);
  377           case 16*4: return (MIDDMA_16WORD);
  378           default:
  379                 panic("en_sz2b");
  380         }
  381         return(0);
  382 }
  383 
  384 #ifdef EN_DEBUG
  385 /*
  386  * Dump a packet
  387  */
  388 static void
  389 en_dump_packet(struct en_softc *sc, struct mbuf *m)
  390 {
  391         int plen = m->m_pkthdr.len;
  392         u_int pos = 0;
  393         u_int totlen = 0;
  394         int len;
  395         u_char *ptr;
  396 
  397         device_printf(sc->dev, "packet len=%d", plen);
  398         while (m != NULL) {
  399                 totlen += m->m_len;
  400                 ptr = mtod(m, u_char *);
  401                 for (len = 0; len < m->m_len; len++, pos++, ptr++) {
  402                         if (pos % 16 == 8)
  403                                 printf(" ");
  404                         if (pos % 16 == 0)
  405                                 printf("\n");
  406                         printf(" %02x", *ptr);
  407                 }
  408                 m = m->m_next;
  409         }
  410         printf("\n");
  411         if (totlen != plen)
  412                 printf("sum of m_len=%u\n", totlen);
  413 }
  414 #endif
  415 
  416 /*********************************************************************/
  417 /*
  418  * DMA maps
  419  */
  420 
  421 /*
  422  * Map constructor for a MAP.
  423  *
  424  * This is called each time when a map is allocated
  425  * from the pool and about to be returned to the user. Here we actually
  426  * allocate the map if there isn't one. The problem is that we may fail
  427  * to allocate the DMA map yet have no means to signal this error. Therefor
  428  * when allocating a map, the call must check that there is a map. An
  429  * additional problem is, that i386 maps will be NULL, yet are ok and must
  430  * be freed so let's use a flag to signal allocation.
  431  *
  432  * Caveat: we have no way to know that we are called from an interrupt context
  433  * here. We rely on the fact, that bus_dmamap_create uses M_NOWAIT in all
  434  * its allocations.
  435  *
  436  * LOCK: any, not needed
  437  */
  438 static int
  439 en_map_ctor(void *mem, int size, void *arg, int flags)
  440 {
  441         struct en_softc *sc = arg;
  442         struct en_map *map = mem;
  443         int err;
  444 
  445         err = bus_dmamap_create(sc->txtag, 0, &map->map);
  446         if (err != 0) {
  447                 device_printf(sc->dev, "cannot create DMA map %d\n", err);
  448                 return (err);
  449         }
  450         map->flags = ENMAP_ALLOC;
  451         map->sc = sc;
  452         return (0);
  453 }
  454 
  455 /*
  456  * Map destructor.
  457  *
  458  * Called when a map is disposed into the zone. If the map is loaded, unload
  459  * it.
  460  *
  461  * LOCK: any, not needed
  462  */
  463 static void
  464 en_map_dtor(void *mem, int size, void *arg)
  465 {
  466         struct en_map *map = mem;
  467 
  468         if (map->flags & ENMAP_LOADED) {
  469                 bus_dmamap_unload(map->sc->txtag, map->map);
  470                 map->flags &= ~ENMAP_LOADED;
  471         }
  472 }
  473 
  474 /*
  475  * Map finializer.
  476  *
  477  * This is called each time a map is returned from the zone to the system.
  478  * Get rid of the dmamap here.
  479  *
  480  * LOCK: any, not needed
  481  */
  482 static void
  483 en_map_fini(void *mem, int size)
  484 {
  485         struct en_map *map = mem;
  486 
  487         bus_dmamap_destroy(map->sc->txtag, map->map);
  488 }
  489 
  490 /*********************************************************************/
  491 /*
  492  * Transmission
  493  */
  494 
  495 /*
  496  * Argument structure to load a transmit DMA map
  497  */
  498 struct txarg {
  499         struct en_softc *sc;
  500         struct mbuf *m;
  501         u_int vci;
  502         u_int chan;             /* transmit channel */
  503         u_int datalen;          /* length of user data */
  504         u_int flags;
  505         u_int wait;             /* return: out of resources */
  506 };
  507 
  508 /*
  509  * TX DMA map loader helper. This function is the callback when the map
  510  * is loaded. It should fill the DMA segment descriptors into the hardware.
  511  *
  512  * LOCK: locked, needed
  513  */
  514 static void
  515 en_txdma_load(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize,
  516     int error)
  517 {
  518         struct txarg *tx = uarg;
  519         struct en_softc *sc = tx->sc;
  520         struct en_txslot *slot = &sc->txslot[tx->chan];
  521         uint32_t cur;           /* on-card buffer position (bytes offset) */
  522         uint32_t dtq;           /* on-card queue position (byte offset) */
  523         uint32_t last_dtq;      /* last DTQ we have written */
  524         uint32_t tmp;
  525         u_int free;             /* free queue entries on card */
  526         u_int needalign, cnt;
  527         bus_size_t rest;        /* remaining bytes in current segment */
  528         bus_addr_t addr;
  529         bus_dma_segment_t *s;
  530         uint32_t count, bcode;
  531         int i;
  532 
  533         if (error != 0)
  534                 return;
  535 
  536         cur = slot->cur;
  537         dtq = sc->dtq_us;
  538         free = sc->dtq_free;
  539 
  540         last_dtq = 0;           /* make gcc happy */
  541 
  542         /*
  543          * Local macro to add an entry to the transmit DMA area. If there
  544          * are no entries left, return. Save the byte offset of the entry
  545          * in last_dtq for later use.
  546          */
  547 #define PUT_DTQ_ENTRY(ENI, BCODE, COUNT, ADDR)                          \
  548         if (free == 0) {                                                \
  549                 EN_COUNT(sc->stats.txdtqout);                           \
  550                 tx->wait = 1;                                           \
  551                 return;                                                 \
  552         }                                                               \
  553         last_dtq = dtq;                                                 \
  554         en_write(sc, dtq + 0, (ENI || !sc->is_adaptec) ?                \
  555             MID_MK_TXQ_ENI(COUNT, tx->chan, 0, BCODE) :                 \
  556             MID_MK_TXQ_ADP(COUNT, tx->chan, 0, BCODE));                 \
  557         en_write(sc, dtq + 4, ADDR);                                    \
  558                                                                         \
  559         EN_WRAPADD(MID_DTQOFF, MID_DTQEND, dtq, 8);                     \
  560         free--;
  561 
  562         /*
  563          * Local macro to generate a DMA entry to DMA cnt bytes. Updates
  564          * the current buffer byte offset accordingly.
  565          */
  566 #define DO_DTQ(TYPE) do {                                               \
  567         rest -= cnt;                                                    \
  568         EN_WRAPADD(slot->start, slot->stop, cur, cnt);                  \
  569         DBG(sc, TX, ("tx%d: "TYPE" %u bytes, %ju left, cur %#x",        \
  570             tx->chan, cnt, (uintmax_t)rest, cur));                      \
  571                                                                         \
  572         PUT_DTQ_ENTRY(1, bcode, count, addr);                           \
  573                                                                         \
  574         addr += cnt;                                                    \
  575     } while (0)
  576 
  577         if (!(tx->flags & TX_HAS_TBD)) {
  578                 /*
  579                  * Prepend the TBD - it did not fit into the first mbuf
  580                  */
  581                 tmp = MID_TBD_MK1((tx->flags & TX_AAL5) ?
  582                     MID_TBD_AAL5 : MID_TBD_NOAAL5,
  583                     sc->vccs[tx->vci]->txspeed,
  584                     tx->m->m_pkthdr.len / MID_ATMDATASZ);
  585                 en_write(sc, cur, tmp);
  586                 EN_WRAPADD(slot->start, slot->stop, cur, 4);
  587 
  588                 tmp = MID_TBD_MK2(tx->vci, 0, 0);
  589                 en_write(sc, cur, tmp);
  590                 EN_WRAPADD(slot->start, slot->stop, cur, 4);
  591 
  592                 /* update DMA address */
  593                 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
  594         }
  595 
  596         for (i = 0, s = segs; i < nseg; i++, s++) {
  597                 rest = s->ds_len;
  598                 addr = s->ds_addr;
  599 
  600                 if (sc->is_adaptec) {
  601                         /* adaptec card - simple */
  602 
  603                         /* advance the on-card buffer pointer */
  604                         EN_WRAPADD(slot->start, slot->stop, cur, rest);
  605                         DBG(sc, TX, ("tx%d: adp %ju bytes %#jx (cur now 0x%x)",
  606                             tx->chan, (uintmax_t)rest, (uintmax_t)addr, cur));
  607 
  608                         PUT_DTQ_ENTRY(0, 0, rest, addr);
  609 
  610                         continue;
  611                 }
  612 
  613                 /*
  614                  * do we need to do a DMA op to align to the maximum
  615                  * burst? Note, that we are alway 32-bit aligned.
  616                  */
  617                 if (sc->alburst &&
  618                     (needalign = (addr & sc->bestburstmask)) != 0) {
  619                         /* compute number of bytes, words and code */
  620                         cnt = sc->bestburstlen - needalign;
  621                         if (cnt > rest)
  622                                 cnt = rest;
  623                         count = cnt / sizeof(uint32_t);
  624                         if (sc->noalbursts) {
  625                                 bcode = MIDDMA_WORD;
  626                         } else {
  627                                 bcode = en_dmaplan[count].bcode;
  628                                 count = cnt >> en_dmaplan[count].divshift;
  629                         }
  630                         DO_DTQ("al_dma");
  631                 }
  632 
  633                 /* do we need to do a max-sized burst? */
  634                 if (rest >= sc->bestburstlen) {
  635                         count = rest >> sc->bestburstshift;
  636                         cnt = count << sc->bestburstshift;
  637                         bcode = sc->bestburstcode;
  638                         DO_DTQ("best_dma");
  639                 }
  640 
  641                 /* do we need to do a cleanup burst? */
  642                 if (rest != 0) {
  643                         cnt = rest;
  644                         count = rest / sizeof(uint32_t);
  645                         if (sc->noalbursts) {
  646                                 bcode = MIDDMA_WORD;
  647                         } else {
  648                                 bcode = en_dmaplan[count].bcode;
  649                                 count = cnt >> en_dmaplan[count].divshift;
  650                         }
  651                         DO_DTQ("clean_dma");
  652                 }
  653         }
  654 
  655         KASSERT (tx->flags & TX_HAS_PAD, ("PDU not padded"));
  656 
  657         if ((tx->flags & TX_AAL5) && !(tx->flags & TX_HAS_PDU)) {
  658                 /*
  659                  * Append the AAL5 PDU trailer
  660                  */
  661                 tmp = MID_PDU_MK1(0, 0, tx->datalen);
  662                 en_write(sc, cur, tmp);
  663                 EN_WRAPADD(slot->start, slot->stop, cur, 4);
  664 
  665                 en_write(sc, cur, 0);
  666                 EN_WRAPADD(slot->start, slot->stop, cur, 4);
  667 
  668                 /* update DMA address */
  669                 PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
  670         }
  671 
  672         /* record the end for the interrupt routine */
  673         sc->dtq[MID_DTQ_A2REG(last_dtq)] =
  674             EN_DQ_MK(tx->chan, tx->m->m_pkthdr.len);
  675 
  676         /* set the end flag in the last descriptor */
  677         en_write(sc, last_dtq + 0, SETQ_END(sc, en_read(sc, last_dtq + 0)));
  678 
  679 #undef PUT_DTQ_ENTRY
  680 #undef DO_DTQ
  681 
  682         /* commit */
  683         slot->cur = cur;
  684         sc->dtq_free = free;
  685         sc->dtq_us = dtq;
  686 
  687         /* tell card */
  688         en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_us));
  689 }
  690 
  691 /*
  692  * en_txdma: start transmit DMA on the given channel, if possible
  693  *
  694  * This is called from two places: when we got new packets from the upper
  695  * layer or when we found that buffer space has freed up during interrupt
  696  * processing.
  697  *
  698  * LOCK: locked, needed
  699  */
  700 static void
  701 en_txdma(struct en_softc *sc, struct en_txslot *slot)
  702 {
  703         struct en_map *map;
  704         struct mbuf *lastm;
  705         struct txarg tx;
  706         u_int pad;
  707         int error;
  708 
  709         DBG(sc, TX, ("tx%td: starting ...", slot - sc->txslot));
  710   again:
  711         bzero(&tx, sizeof(tx));
  712         tx.chan = slot - sc->txslot;
  713         tx.sc = sc;
  714 
  715         /*
  716          * get an mbuf waiting for DMA
  717          */
  718         _IF_DEQUEUE(&slot->q, tx.m);
  719         if (tx.m == NULL) {
  720                 DBG(sc, TX, ("tx%td: ...done!", slot - sc->txslot));
  721                 return;
  722         }
  723         MBUF_GET_TX(tx.m, tx.vci, tx.flags, tx.datalen, pad, map);
  724 
  725         /*
  726          * note: don't use the entire buffer space.  if WRTX becomes equal
  727          * to RDTX, the transmitter stops assuming the buffer is empty!  --kjc
  728          */
  729         if (tx.m->m_pkthdr.len >= slot->bfree) {
  730                 EN_COUNT(sc->stats.txoutspace);
  731                 DBG(sc, TX, ("tx%td: out of transmit space", slot - sc->txslot));
  732                 goto waitres;
  733         }
  734   
  735         lastm = NULL;
  736         if (!(tx.flags & TX_HAS_PAD)) {
  737                 if (pad != 0) {
  738                         /* Append the padding buffer */
  739                         (void)m_length(tx.m, &lastm);
  740                         lastm->m_next = sc->padbuf;
  741                         sc->padbuf->m_len = pad;
  742                 }
  743                 tx.flags |= TX_HAS_PAD;
  744         }
  745 
  746         /*
  747          * Try to load that map
  748          */
  749         error = bus_dmamap_load_mbuf(sc->txtag, map->map, tx.m,
  750             en_txdma_load, &tx, BUS_DMA_NOWAIT);
  751 
  752         if (lastm != NULL)
  753                 lastm->m_next = NULL;
  754 
  755         if (error != 0) {
  756                 device_printf(sc->dev, "loading TX map failed %d\n",
  757                     error);
  758                 goto dequeue_drop;
  759         }
  760         map->flags |= ENMAP_LOADED;
  761         if (tx.wait) {
  762                 /* probably not enough space */
  763                 bus_dmamap_unload(map->sc->txtag, map->map);
  764                 map->flags &= ~ENMAP_LOADED;
  765 
  766                 sc->need_dtqs = 1;
  767                 DBG(sc, TX, ("tx%td: out of transmit DTQs", slot - sc->txslot));
  768                 goto waitres;
  769         }
  770 
  771         EN_COUNT(sc->stats.launch);
  772         sc->ifp->if_opackets++;
  773 
  774         sc->vccs[tx.vci]->opackets++;
  775         sc->vccs[tx.vci]->obytes += tx.datalen;
  776 
  777 #ifdef ENABLE_BPF
  778         if (bpf_peers_present(sc->ifp->if_bpf)) {
  779                 /*
  780                  * adjust the top of the mbuf to skip the TBD if present
  781                  * before passing the packet to bpf.
  782                  * Also remove padding and the PDU trailer. Assume both of
  783                  * them to be in the same mbuf. pktlen, m_len and m_data
  784                  * are not needed anymore so we can change them.
  785                  */
  786                 if (tx.flags & TX_HAS_TBD) {
  787                         tx.m->m_data += MID_TBD_SIZE;
  788                         tx.m->m_len -= MID_TBD_SIZE;
  789                 }
  790                 tx.m->m_pkthdr.len = m_length(tx.m, &lastm);
  791                 if (tx.m->m_pkthdr.len > tx.datalen) {
  792                         lastm->m_len -= tx.m->m_pkthdr.len - tx.datalen;
  793                         tx.m->m_pkthdr.len = tx.datalen;
  794                 }
  795 
  796                 bpf_mtap(sc->ifp->if_bpf, tx.m);
  797         }
  798 #endif
  799 
  800         /*
  801          * do some housekeeping and get the next packet
  802          */
  803         slot->bfree -= tx.m->m_pkthdr.len;
  804         _IF_ENQUEUE(&slot->indma, tx.m);
  805 
  806         goto again;
  807 
  808         /*
  809          * error handling. This is jumped to when we just want to drop
  810          * the packet. Must be unlocked here.
  811          */
  812   dequeue_drop:
  813         if (map != NULL)
  814                 uma_zfree(sc->map_zone, map);
  815 
  816         slot->mbsize -= tx.m->m_pkthdr.len;
  817 
  818         m_freem(tx.m);
  819 
  820         goto again;
  821 
  822   waitres:
  823         _IF_PREPEND(&slot->q, tx.m);
  824 }
  825 
  826 /*
  827  * Create a copy of a single mbuf. It can have either internal or
  828  * external data, it may have a packet header. External data is really
  829  * copied, so the new buffer is writeable.
  830  *
  831  * LOCK: any, not needed
  832  */
  833 static struct mbuf *
  834 copy_mbuf(struct mbuf *m)
  835 {
  836         struct mbuf *new;
  837 
  838         MGET(new, M_TRYWAIT, MT_DATA);
  839         if (new == NULL)
  840                 return (NULL);
  841 
  842         if (m->m_flags & M_PKTHDR) {
  843                 M_MOVE_PKTHDR(new, m);
  844                 if (m->m_len > MHLEN) {
  845                         MCLGET(new, M_TRYWAIT);
  846                         if ((m->m_flags & M_EXT) == 0) {
  847                                 m_free(new);
  848                                 return (NULL);
  849                         }
  850                 }
  851         } else {
  852                 if (m->m_len > MLEN) {
  853                         MCLGET(new, M_TRYWAIT);
  854                         if ((m->m_flags & M_EXT) == 0) {
  855                                 m_free(new);
  856                                 return (NULL);
  857                         }
  858                 }
  859         }
  860 
  861         bcopy(m->m_data, new->m_data, m->m_len);
  862         new->m_len = m->m_len;
  863         new->m_flags &= ~M_RDONLY;
  864 
  865         return (new);
  866 }
  867 
  868 /*
  869  * This function is called when we have an ENI adapter. It fixes the
  870  * mbuf chain, so that all addresses and lengths are 4 byte aligned.
  871  * The overall length is already padded to multiple of cells plus the
  872  * TBD so this must always succeed. The routine can fail, when it
  873  * needs to copy an mbuf (this may happen if an mbuf is readonly).
  874  *
  875  * We assume here, that aligning the virtual addresses to 4 bytes also
  876  * aligns the physical addresses.
  877  *
  878  * LOCK: locked, needed
  879  */
  880 static struct mbuf *
  881 en_fix_mchain(struct en_softc *sc, struct mbuf *m0, u_int *pad)
  882 {
  883         struct mbuf **prev = &m0;
  884         struct mbuf *m = m0;
  885         struct mbuf *new;
  886         u_char *d;
  887         int off;
  888 
  889         while (m != NULL) {
  890                 d = mtod(m, u_char *);
  891                 if ((off = (uintptr_t)d % sizeof(uint32_t)) != 0) {
  892                         EN_COUNT(sc->stats.mfixaddr);
  893                         if (M_WRITABLE(m)) {
  894                                 bcopy(d, d - off, m->m_len);
  895                                 m->m_data -= off;
  896                         } else {
  897                                 if ((new = copy_mbuf(m)) == NULL) {
  898                                         EN_COUNT(sc->stats.mfixfail);
  899                                         m_freem(m0);
  900                                         return (NULL);
  901                                 }
  902                                 new->m_next = m_free(m);
  903                                 *prev = m = new;
  904                         }
  905                 }
  906 
  907                 if ((off = m->m_len % sizeof(uint32_t)) != 0) {
  908                         EN_COUNT(sc->stats.mfixlen);
  909                         if (!M_WRITABLE(m)) {
  910                                 if ((new = copy_mbuf(m)) == NULL) {
  911                                         EN_COUNT(sc->stats.mfixfail);
  912                                         m_freem(m0);
  913                                         return (NULL);
  914                                 }
  915                                 new->m_next = m_free(m);
  916                                 *prev = m = new;
  917                         }
  918                         d = mtod(m, u_char *) + m->m_len;
  919                         off = 4 - off;
  920                         while (off) {
  921                                 while (m->m_next && m->m_next->m_len == 0)
  922                                         m->m_next = m_free(m->m_next);
  923 
  924                                 if (m->m_next == NULL) {
  925                                         *d++ = 0;
  926                                         KASSERT(*pad > 0, ("no padding space"));
  927                                         (*pad)--;
  928                                 } else {
  929                                         *d++ = *mtod(m->m_next, u_char *);
  930                                         m->m_next->m_len--;
  931                                         m->m_next->m_data++;
  932                                 }
  933                                 m->m_len++;
  934                                 off--;
  935                         }
  936                 }
  937 
  938                 prev = &m->m_next;
  939                 m = m->m_next;
  940         }
  941 
  942         return (m0);
  943 }
  944 
  945 /*
  946  * en_start: start transmitting the next packet that needs to go out
  947  * if there is one. We take off all packets from the interface's queue and
  948  * put them into the channels queue.
  949  *
  950  * Here we also prepend the transmit packet descriptor and append the padding
  951  * and (for aal5) the PDU trailer. This is different from the original driver:
  952  * we assume, that allocating one or two additional mbufs is actually cheaper
  953  * than all this algorithmic fiddling we would need otherwise.
  954  *
  955  * While the packet is on the channels wait queue we use the csum_* fields
  956  * in the packet header to hold the original datalen, the AAL5 flag and the
  957  * VCI. The packet length field in the header holds the needed buffer space.
  958  * This may actually be more than the length of the current mbuf chain (when
  959  * one or more of TBD, padding and PDU do not fit).
  960  *
  961  * LOCK: unlocked, needed
  962  */
  963 static void
  964 en_start(struct ifnet *ifp)
  965 {
  966         struct en_softc *sc = (struct en_softc *)ifp->if_softc;
  967         struct mbuf *m, *lastm;
  968         struct atm_pseudohdr *ap;
  969         u_int pad;              /* 0-bytes to pad at PDU end */
  970         u_int datalen;          /* length of user data */
  971         u_int vci;              /* the VCI we are transmitting on */
  972         u_int flags;
  973         uint32_t tbd[2];
  974         uint32_t pdu[2];
  975         struct en_vcc *vc;
  976         struct en_map *map;
  977         struct en_txslot *tx;
  978 
  979         while (1) {
  980                 IF_DEQUEUE(&ifp->if_snd, m);
  981                 if (m == NULL)
  982                         return;
  983 
  984                 flags = 0;
  985 
  986                 ap = mtod(m, struct atm_pseudohdr *);
  987                 vci = ATM_PH_VCI(ap);
  988 
  989                 if (ATM_PH_VPI(ap) != 0 || vci >= MID_N_VC ||
  990                     (vc = sc->vccs[vci]) == NULL ||
  991                     (vc->vflags & VCC_CLOSE_RX)) {
  992                         DBG(sc, TX, ("output vpi=%u, vci=%u -- drop",
  993                             ATM_PH_VPI(ap), vci));
  994                         m_freem(m);
  995                         continue;
  996                 }
  997                 if (vc->vcc.aal == ATMIO_AAL_5)
  998                         flags |= TX_AAL5;
  999                 m_adj(m, sizeof(struct atm_pseudohdr));
 1000 
 1001                 /*
 1002                  * (re-)calculate size of packet (in bytes)
 1003                  */
 1004                 m->m_pkthdr.len = datalen = m_length(m, &lastm);
 1005 
 1006                 /*
 1007                  * computing how much padding we need on the end of the mbuf,
 1008                  * then see if we can put the TBD at the front of the mbuf
 1009                  * where the link header goes (well behaved protocols will
 1010                  * reserve room for us). Last, check if room for PDU tail.
 1011                  */
 1012                 if (flags & TX_AAL5)
 1013                         m->m_pkthdr.len += MID_PDU_SIZE;
 1014                 m->m_pkthdr.len = roundup(m->m_pkthdr.len, MID_ATMDATASZ);
 1015                 pad = m->m_pkthdr.len - datalen;
 1016                 if (flags & TX_AAL5)
 1017                         pad -= MID_PDU_SIZE;
 1018                 m->m_pkthdr.len += MID_TBD_SIZE;
 1019 
 1020                 DBG(sc, TX, ("txvci%d: buflen=%u datalen=%u lead=%d trail=%d",
 1021                     vci, m->m_pkthdr.len, datalen, (int)M_LEADINGSPACE(m),
 1022                     (int)M_TRAILINGSPACE(lastm)));
 1023 
 1024                 /*
 1025                  * From here on we need access to sc
 1026                  */
 1027                 EN_LOCK(sc);
 1028 
 1029                 /*
 1030                  * Allocate a map. We do this here rather then in en_txdma,
 1031                  * because en_txdma is also called from the interrupt handler
 1032                  * and we are going to have a locking problem then. We must
 1033                  * use NOWAIT here, because the ip_output path holds various
 1034                  * locks.
 1035                  */
 1036                 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
 1037                 if (map == NULL) {
 1038                         /* drop that packet */
 1039                         EN_COUNT(sc->stats.txnomap);
 1040                         EN_UNLOCK(sc);
 1041                         m_freem(m);
 1042                         continue;
 1043                 }
 1044 
 1045                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 1046                         EN_UNLOCK(sc);
 1047                         uma_zfree(sc->map_zone, map);
 1048                         m_freem(m);
 1049                         continue;
 1050                 }
 1051 
 1052                 /*
 1053                  * Look, whether we can prepend the TBD (8 byte)
 1054                  */
 1055                 if (M_WRITABLE(m) && M_LEADINGSPACE(m) >= MID_TBD_SIZE) {
 1056                         tbd[0] = htobe32(MID_TBD_MK1((flags & TX_AAL5) ?
 1057                             MID_TBD_AAL5 : MID_TBD_NOAAL5,
 1058                             vc->txspeed, m->m_pkthdr.len / MID_ATMDATASZ));
 1059                         tbd[1] = htobe32(MID_TBD_MK2(vci, 0, 0));
 1060 
 1061                         m->m_data -= MID_TBD_SIZE;
 1062                         bcopy(tbd, m->m_data, MID_TBD_SIZE);
 1063                         m->m_len += MID_TBD_SIZE;
 1064                         flags |= TX_HAS_TBD;
 1065                 }
 1066 
 1067                 /*
 1068                  * Check whether the padding fits (must be writeable -
 1069                  * we pad with zero).
 1070                  */
 1071                 if (M_WRITABLE(lastm) && M_TRAILINGSPACE(lastm) >= pad) {
 1072                         bzero(lastm->m_data + lastm->m_len, pad);
 1073                         lastm->m_len += pad;
 1074                         flags |= TX_HAS_PAD;
 1075 
 1076                         if ((flags & TX_AAL5) &&
 1077                             M_TRAILINGSPACE(lastm) > MID_PDU_SIZE) {
 1078                                 pdu[0] = htobe32(MID_PDU_MK1(0, 0, datalen));
 1079                                 pdu[1] = 0;
 1080                                 bcopy(pdu, lastm->m_data + lastm->m_len,
 1081                                     MID_PDU_SIZE);
 1082                                 lastm->m_len += MID_PDU_SIZE;
 1083                                 flags |= TX_HAS_PDU;
 1084                         }
 1085                 }
 1086 
 1087                 if (!sc->is_adaptec &&
 1088                     (m = en_fix_mchain(sc, m, &pad)) == NULL) {
 1089                         EN_UNLOCK(sc);
 1090                         uma_zfree(sc->map_zone, map);
 1091                         continue;
 1092                 }
 1093 
 1094                 /*
 1095                  * get assigned channel (will be zero unless txspeed is set)
 1096                  */
 1097                 tx = vc->txslot;
 1098 
 1099                 if (m->m_pkthdr.len > EN_TXSZ * 1024) {
 1100                         DBG(sc, TX, ("tx%td: packet larger than xmit buffer "
 1101                             "(%d > %d)\n", tx - sc->txslot, m->m_pkthdr.len,
 1102                             EN_TXSZ * 1024));
 1103                         EN_UNLOCK(sc);
 1104                         m_freem(m);
 1105                         uma_zfree(sc->map_zone, map);
 1106                         continue;
 1107                 }
 1108 
 1109                 if (tx->mbsize > EN_TXHIWAT) {
 1110                         EN_COUNT(sc->stats.txmbovr);
 1111                         DBG(sc, TX, ("tx%td: buffer space shortage",
 1112                             tx - sc->txslot));
 1113                         EN_UNLOCK(sc);
 1114                         m_freem(m);
 1115                         uma_zfree(sc->map_zone, map);
 1116                         continue;
 1117                 }
 1118 
 1119                 /* commit */
 1120                 tx->mbsize += m->m_pkthdr.len;
 1121 
 1122                 DBG(sc, TX, ("tx%td: VCI=%d, speed=0x%x, buflen=%d, mbsize=%d",
 1123                     tx - sc->txslot, vci, sc->vccs[vci]->txspeed,
 1124                     m->m_pkthdr.len, tx->mbsize));
 1125 
 1126                 MBUF_SET_TX(m, vci, flags, datalen, pad, map);
 1127 
 1128                 _IF_ENQUEUE(&tx->q, m);
 1129 
 1130                 en_txdma(sc, tx);
 1131 
 1132                 EN_UNLOCK(sc);
 1133         }
 1134 }
 1135 
 1136 /*********************************************************************/
 1137 /*
 1138  * VCs
 1139  */
 1140 
 1141 /*
 1142  * en_loadvc: load a vc tab entry from a slot
 1143  *
 1144  * LOCK: locked, needed
 1145  */
 1146 static void
 1147 en_loadvc(struct en_softc *sc, struct en_vcc *vc)
 1148 {
 1149         uint32_t reg = en_read(sc, MID_VC(vc->vcc.vci));
 1150 
 1151         reg = MIDV_SETMODE(reg, MIDV_TRASH);
 1152         en_write(sc, MID_VC(vc->vcc.vci), reg);
 1153         DELAY(27);
 1154 
 1155         /* no need to set CRC */
 1156 
 1157         /* read pointer = 0, desc. start = 0 */
 1158         en_write(sc, MID_DST_RP(vc->vcc.vci), 0);
 1159         /* write pointer = 0 */
 1160         en_write(sc, MID_WP_ST_CNT(vc->vcc.vci), 0);
 1161         /* set mode, size, loc */
 1162         en_write(sc, MID_VC(vc->vcc.vci), vc->rxslot->mode);
 1163 
 1164         vc->rxslot->cur = vc->rxslot->start;
 1165 
 1166         DBG(sc, VC, ("rx%td: assigned to VCI %d", vc->rxslot - sc->rxslot,
 1167             vc->vcc.vci));
 1168 }
 1169 
 1170 /*
 1171  * Open the given vcc.
 1172  *
 1173  * LOCK: unlocked, needed
 1174  */
 1175 static int
 1176 en_open_vcc(struct en_softc *sc, struct atmio_openvcc *op)
 1177 {
 1178         uint32_t oldmode, newmode;
 1179         struct en_rxslot *slot;
 1180         struct en_vcc *vc;
 1181         int error = 0;
 1182 
 1183         DBG(sc, IOCTL, ("enable vpi=%d, vci=%d, flags=%#x",
 1184             op->param.vpi, op->param.vci, op->param.flags));
 1185 
 1186         if (op->param.vpi != 0 || op->param.vci >= MID_N_VC)
 1187                 return (EINVAL);
 1188 
 1189         vc = uma_zalloc(en_vcc_zone, M_NOWAIT | M_ZERO);
 1190         if (vc == NULL)
 1191                 return (ENOMEM);
 1192 
 1193         EN_LOCK(sc);
 1194 
 1195         if (sc->vccs[op->param.vci] != NULL) {
 1196                 error = EBUSY;
 1197                 goto done;
 1198         }
 1199 
 1200         /* find a free receive slot */
 1201         for (slot = sc->rxslot; slot < &sc->rxslot[sc->en_nrx]; slot++)
 1202                 if (slot->vcc == NULL)
 1203                         break;
 1204         if (slot == &sc->rxslot[sc->en_nrx]) {
 1205                 error = ENOSPC;
 1206                 goto done;
 1207         }
 1208 
 1209         vc->rxslot = slot;
 1210         vc->rxhand = op->rxhand;
 1211         vc->vcc = op->param;
 1212 
 1213         oldmode = slot->mode;
 1214         newmode = (op->param.aal == ATMIO_AAL_5) ? MIDV_AAL5 : MIDV_NOAAL;
 1215         slot->mode = MIDV_SETMODE(oldmode, newmode);
 1216         slot->vcc = vc;
 1217 
 1218         KASSERT (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0,
 1219             ("en_rxctl: left over mbufs on enable slot=%td",
 1220             vc->rxslot - sc->rxslot));
 1221 
 1222         vc->txspeed = 0;
 1223         vc->txslot = sc->txslot;
 1224         vc->txslot->nref++;     /* bump reference count */
 1225 
 1226         en_loadvc(sc, vc);      /* does debug printf for us */
 1227 
 1228         /* don't free below */
 1229         sc->vccs[vc->vcc.vci] = vc;
 1230         vc = NULL;
 1231         sc->vccs_open++;
 1232 
 1233   done:
 1234         if (vc != NULL)
 1235                 uma_zfree(en_vcc_zone, vc);
 1236 
 1237         EN_UNLOCK(sc);
 1238         return (error);
 1239 }
 1240 
 1241 /*
 1242  * Close finished
 1243  */
 1244 static void
 1245 en_close_finish(struct en_softc *sc, struct en_vcc *vc)
 1246 {
 1247 
 1248         if (vc->rxslot != NULL)
 1249                 vc->rxslot->vcc = NULL;
 1250 
 1251         DBG(sc, VC, ("vci: %u free (%p)", vc->vcc.vci, vc));
 1252 
 1253         sc->vccs[vc->vcc.vci] = NULL;
 1254         uma_zfree(en_vcc_zone, vc);
 1255         sc->vccs_open--;
 1256 }
 1257 
 1258 /*
 1259  * LOCK: unlocked, needed
 1260  */
 1261 static int
 1262 en_close_vcc(struct en_softc *sc, struct atmio_closevcc *cl)
 1263 {
 1264         uint32_t oldmode, newmode;
 1265         struct en_vcc *vc;
 1266         int error = 0;
 1267 
 1268         DBG(sc, IOCTL, ("disable vpi=%d, vci=%d", cl->vpi, cl->vci));
 1269 
 1270         if (cl->vpi != 0 || cl->vci >= MID_N_VC)
 1271                 return (EINVAL);
 1272 
 1273         EN_LOCK(sc);
 1274         if ((vc = sc->vccs[cl->vci]) == NULL) {
 1275                 error = ENOTCONN;
 1276                 goto done;
 1277         }
 1278 
 1279         /*
 1280          * turn off VCI
 1281          */
 1282         if (vc->rxslot == NULL) {
 1283                 error = ENOTCONN;
 1284                 goto done;
 1285         }
 1286         if (vc->vflags & VCC_DRAIN) {
 1287                 error = EINVAL;
 1288                 goto done;
 1289         }
 1290 
 1291         oldmode = en_read(sc, MID_VC(cl->vci));
 1292         newmode = MIDV_SETMODE(oldmode, MIDV_TRASH) & ~MIDV_INSERVICE;
 1293         en_write(sc, MID_VC(cl->vci), (newmode | (oldmode & MIDV_INSERVICE)));
 1294 
 1295         /* halt in tracks, be careful to preserve inservice bit */
 1296         DELAY(27);
 1297         vc->rxslot->mode = newmode;
 1298 
 1299         vc->txslot->nref--;
 1300 
 1301         /* if stuff is still going on we are going to have to drain it out */
 1302         if (_IF_QLEN(&vc->rxslot->indma) == 0 &&
 1303             _IF_QLEN(&vc->rxslot->q) == 0 &&
 1304             (vc->vflags & VCC_SWSL) == 0) {
 1305                 en_close_finish(sc, vc);
 1306                 goto done;
 1307         }
 1308 
 1309         vc->vflags |= VCC_DRAIN;
 1310         DBG(sc, IOCTL, ("VCI %u now draining", cl->vci));
 1311 
 1312         if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
 1313                 goto done;
 1314 
 1315         vc->vflags |= VCC_CLOSE_RX;
 1316         while ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) &&
 1317             (vc->vflags & VCC_DRAIN))
 1318                 cv_wait(&sc->cv_close, &sc->en_mtx);
 1319 
 1320         en_close_finish(sc, vc);
 1321         if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 1322                 error = EIO;
 1323                 goto done;
 1324         }
 1325 
 1326 
 1327   done:
 1328         EN_UNLOCK(sc);
 1329         return (error);
 1330 }
 1331 
 1332 /*********************************************************************/
 1333 /*
 1334  * starting/stopping the card
 1335  */
 1336 
 1337 /*
 1338  * en_reset_ul: reset the board, throw away work in progress.
 1339  * must en_init to recover.
 1340  *
 1341  * LOCK: locked, needed
 1342  */
 1343 static void
 1344 en_reset_ul(struct en_softc *sc)
 1345 {
 1346         struct en_map *map;
 1347         struct mbuf *m;
 1348         struct en_rxslot *rx;
 1349         int lcv;
 1350 
 1351         device_printf(sc->dev, "reset\n");
 1352         sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1353 
 1354         if (sc->en_busreset)
 1355                 sc->en_busreset(sc);
 1356         en_write(sc, MID_RESID, 0x0);   /* reset hardware */
 1357 
 1358         /*
 1359          * recv: dump any mbufs we are dma'ing into, if DRAINing, then a reset
 1360          * will free us! Don't release the rxslot from the channel.
 1361          */
 1362         for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
 1363                 if (sc->vccs[lcv] == NULL)
 1364                         continue;
 1365                 rx = sc->vccs[lcv]->rxslot;
 1366 
 1367                 for (;;) {
 1368                         _IF_DEQUEUE(&rx->indma, m);
 1369                         if (m == NULL)
 1370                                 break;
 1371                         map = (void *)m->m_pkthdr.rcvif;
 1372                         uma_zfree(sc->map_zone, map);
 1373                         m_freem(m);
 1374                 }
 1375                 for (;;) {
 1376                         _IF_DEQUEUE(&rx->q, m);
 1377                         if (m == NULL)
 1378                                 break;
 1379                         m_freem(m);
 1380                 }
 1381                 sc->vccs[lcv]->vflags = 0;
 1382         }
 1383 
 1384         /*
 1385          * xmit: dump everything
 1386          */
 1387         for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
 1388                 for (;;) {
 1389                         _IF_DEQUEUE(&sc->txslot[lcv].indma, m);
 1390                         if (m == NULL)
 1391                                 break;
 1392                         map = (void *)m->m_pkthdr.rcvif;
 1393                         uma_zfree(sc->map_zone, map);
 1394                         m_freem(m);
 1395                 }
 1396                 for (;;) {
 1397                         _IF_DEQUEUE(&sc->txslot[lcv].q, m);
 1398                         if (m == NULL)
 1399                                 break;
 1400                         map = (void *)m->m_pkthdr.rcvif;
 1401                         uma_zfree(sc->map_zone, map);
 1402                         m_freem(m);
 1403                 }
 1404                 sc->txslot[lcv].mbsize = 0;
 1405         }
 1406 
 1407         /*
 1408          * Unstop all waiters
 1409          */
 1410         cv_broadcast(&sc->cv_close);
 1411 }
 1412 
 1413 /*
 1414  * en_reset: reset the board, throw away work in progress.
 1415  * must en_init to recover.
 1416  *
 1417  * LOCK: unlocked, needed
 1418  *
 1419  * Use en_reset_ul if you alreay have the lock
 1420  */
 1421 void
 1422 en_reset(struct en_softc *sc)
 1423 {
 1424         EN_LOCK(sc);
 1425         en_reset_ul(sc);
 1426         EN_UNLOCK(sc);
 1427 }
 1428 
 1429 
 1430 /*
 1431  * en_init: init board and sync the card with the data in the softc.
 1432  *
 1433  * LOCK: locked, needed
 1434  */
 1435 static void
 1436 en_init(struct en_softc *sc)
 1437 {
 1438         int vc, slot;
 1439         uint32_t loc;
 1440 
 1441         if ((sc->ifp->if_flags & IFF_UP) == 0) {
 1442                 DBG(sc, INIT, ("going down"));
 1443                 en_reset(sc);                           /* to be safe */
 1444                 return;
 1445         }
 1446 
 1447         DBG(sc, INIT, ("going up"));
 1448         sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;       /* enable */
 1449 
 1450         if (sc->en_busreset)
 1451                 sc->en_busreset(sc);
 1452         en_write(sc, MID_RESID, 0x0);           /* reset */
 1453 
 1454         /* zero memory */
 1455         bus_space_set_region_4(sc->en_memt, sc->en_base,
 1456             MID_RAMOFF, 0, sc->en_obmemsz / 4);
 1457 
 1458         /*
 1459          * init obmem data structures: vc tab, dma q's, slist.
 1460          *
 1461          * note that we set drq_free/dtq_free to one less than the total number
 1462          * of DTQ/DRQs present.   we do this because the card uses the condition
 1463          * (drq_chip == drq_us) to mean "list is empty"... but if you allow the
 1464          * circular list to be completely full then (drq_chip == drq_us) [i.e.
 1465          * the drq_us pointer will wrap all the way around].   by restricting
 1466          * the number of active requests to (N - 1) we prevent the list from
 1467          * becoming completely full.    note that the card will sometimes give
 1468          * us an interrupt for a DTQ/DRQ we have already processes... this helps
 1469          * keep that interrupt from messing us up.
 1470          */
 1471         bzero(&sc->drq, sizeof(sc->drq));
 1472         sc->drq_free = MID_DRQ_N - 1;
 1473         sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
 1474         en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip)); 
 1475         sc->drq_us = sc->drq_chip;
 1476 
 1477         bzero(&sc->dtq, sizeof(sc->dtq));
 1478         sc->dtq_free = MID_DTQ_N - 1;
 1479         sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
 1480         en_write(sc, MID_DMA_WRTX, MID_DRQ_A2REG(sc->dtq_chip)); 
 1481         sc->dtq_us = sc->dtq_chip;
 1482 
 1483         sc->hwslistp = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
 1484         sc->swsl_size = sc->swsl_head = sc->swsl_tail = 0;
 1485 
 1486         DBG(sc, INIT, ("drq free/chip: %d/0x%x, dtq free/chip: %d/0x%x, "
 1487             "hwslist: 0x%x", sc->drq_free, sc->drq_chip, sc->dtq_free,
 1488             sc->dtq_chip, sc->hwslistp));
 1489 
 1490         for (slot = 0 ; slot < EN_NTX ; slot++) {
 1491                 sc->txslot[slot].bfree = EN_TXSZ * 1024;
 1492                 en_write(sc, MIDX_READPTR(slot), 0);
 1493                 en_write(sc, MIDX_DESCSTART(slot), 0);
 1494                 loc = sc->txslot[slot].cur = sc->txslot[slot].start;
 1495                 loc = loc - MID_RAMOFF;
 1496                 /* mask, cvt to words */
 1497                 loc = (loc & ~((EN_TXSZ * 1024) - 1)) >> 2;
 1498                 /* top 11 bits */
 1499                 loc = loc >> MIDV_LOCTOPSHFT;
 1500                 en_write(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ),
 1501                     loc));
 1502                 DBG(sc, INIT, ("tx%d: place 0x%x", slot,
 1503                     (u_int)en_read(sc, MIDX_PLACE(slot))));
 1504         }
 1505 
 1506         for (vc = 0; vc < MID_N_VC; vc++) 
 1507                 if (sc->vccs[vc] != NULL)
 1508                         en_loadvc(sc, sc->vccs[vc]);
 1509 
 1510         /*
 1511          * enable!
 1512          */
 1513         en_write(sc, MID_INTENA, MID_INT_TX | MID_INT_DMA_OVR | MID_INT_IDENT |
 1514             MID_INT_LERR | MID_INT_DMA_ERR | MID_INT_DMA_RX | MID_INT_DMA_TX |
 1515             MID_INT_SERVICE | MID_INT_SUNI | MID_INT_STATS);
 1516         en_write(sc, MID_MAST_CSR, MID_SETIPL(sc->ipl) | MID_MCSR_ENDMA |
 1517             MID_MCSR_ENTX | MID_MCSR_ENRX);
 1518 }
 1519 
 1520 /*********************************************************************/
 1521 /*
 1522  * Ioctls
 1523  */
 1524 /*
 1525  * en_ioctl: handle ioctl requests
 1526  *
 1527  * NOTE: if you add an ioctl to set txspeed, you should choose a new
 1528  * TX channel/slot.   Choose the one with the lowest sc->txslot[slot].nref
 1529  * value, subtract one from sc->txslot[0].nref, add one to the
 1530  * sc->txslot[slot].nref, set sc->txvc2slot[vci] = slot, and then set
 1531  * txspeed[vci].
 1532  *
 1533  * LOCK: unlocked, needed
 1534  */
 1535 static int
 1536 en_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1537 {
 1538         struct en_softc *sc = (struct en_softc *)ifp->if_softc;
 1539         struct ifaddr *ifa = (struct ifaddr *)data;
 1540         struct ifreq *ifr = (struct ifreq *)data;
 1541         struct atmio_vcctable *vtab;
 1542         int error = 0;
 1543 
 1544         switch (cmd) {
 1545 
 1546           case SIOCSIFADDR: 
 1547                 EN_LOCK(sc);
 1548                 ifp->if_flags |= IFF_UP;
 1549 #if defined(INET) || defined(INET6)
 1550                 if (ifa->ifa_addr->sa_family == AF_INET
 1551                     || ifa->ifa_addr->sa_family == AF_INET6) {
 1552                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 1553                                 en_reset_ul(sc);
 1554                                 en_init(sc);
 1555                         }
 1556                         ifa->ifa_rtrequest = atm_rtrequest; /* ??? */
 1557                         EN_UNLOCK(sc);
 1558                         break;
 1559                 }
 1560 #endif /* INET */
 1561                 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 1562                         en_reset_ul(sc);
 1563                         en_init(sc);
 1564                 }
 1565                 EN_UNLOCK(sc);
 1566                 break;
 1567 
 1568         case SIOCSIFFLAGS: 
 1569                 EN_LOCK(sc);
 1570                 if (ifp->if_flags & IFF_UP) {
 1571                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1572                                 en_init(sc);
 1573                 } else {
 1574                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1575                                 en_reset_ul(sc);
 1576                 }
 1577                 EN_UNLOCK(sc);
 1578                 break;
 1579 
 1580           case SIOCSIFMTU:
 1581                 /*
 1582                  * Set the interface MTU.
 1583                  */
 1584                 if (ifr->ifr_mtu > ATMMTU) {
 1585                         error = EINVAL;
 1586                         break;
 1587                 }
 1588                 ifp->if_mtu = ifr->ifr_mtu;
 1589                 break;
 1590 
 1591           case SIOCSIFMEDIA:
 1592           case SIOCGIFMEDIA:
 1593                 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
 1594                 break;
 1595 
 1596           case SIOCATMOPENVCC:          /* kernel internal use */
 1597                 error = en_open_vcc(sc, (struct atmio_openvcc *)data);
 1598                 break;
 1599 
 1600           case SIOCATMCLOSEVCC:         /* kernel internal use */
 1601                 error = en_close_vcc(sc, (struct atmio_closevcc *)data);
 1602                 break;
 1603 
 1604           case SIOCATMGETVCCS:  /* internal netgraph use */
 1605                 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
 1606                     MID_N_VC, sc->vccs_open, &sc->en_mtx, 0);
 1607                 if (vtab == NULL) {
 1608                         error = ENOMEM;
 1609                         break;
 1610                 }
 1611                 *(void **)data = vtab;
 1612                 break;
 1613 
 1614           case SIOCATMGVCCS:    /* return vcc table */
 1615                 vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
 1616                     MID_N_VC, sc->vccs_open, &sc->en_mtx, 1);
 1617                 error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
 1618                     vtab->count * sizeof(vtab->vccs[0]));
 1619                 free(vtab, M_DEVBUF);
 1620                 break;
 1621 
 1622           default: 
 1623                 error = EINVAL;
 1624                 break;
 1625         }
 1626         return (error);
 1627 }
 1628 
 1629 /*********************************************************************/
 1630 /*
 1631  * Sysctl's
 1632  */
 1633 
 1634 /*
 1635  * Sysctl handler for internal statistics
 1636  *
 1637  * LOCK: unlocked, needed
 1638  */
 1639 static int
 1640 en_sysctl_istats(SYSCTL_HANDLER_ARGS)
 1641 {
 1642         struct en_softc *sc = arg1;
 1643         uint32_t *ret;
 1644         int error;
 1645 
 1646         ret = malloc(sizeof(sc->stats), M_TEMP, M_WAITOK);
 1647 
 1648         EN_LOCK(sc);
 1649         bcopy(&sc->stats, ret, sizeof(sc->stats));
 1650         EN_UNLOCK(sc);
 1651 
 1652         error = SYSCTL_OUT(req, ret, sizeof(sc->stats));
 1653         free(ret, M_TEMP);
 1654 
 1655         return (error);
 1656 }
 1657 
 1658 /*********************************************************************/
 1659 /*
 1660  * Interrupts
 1661  */
 1662 
 1663 /*
 1664  * Transmit interrupt handler
 1665  *
 1666  * check for tx complete, if detected then this means that some space
 1667  * has come free on the card.   we must account for it and arrange to
 1668  * kick the channel to life (in case it is stalled waiting on the card).
 1669  *
 1670  * LOCK: locked, needed
 1671  */
 1672 static uint32_t
 1673 en_intr_tx(struct en_softc *sc, uint32_t reg)
 1674 {
 1675         uint32_t kick;
 1676         uint32_t mask;
 1677         uint32_t val;
 1678         int chan;
 1679 
 1680         kick = 0;               /* bitmask of channels to kick */
 1681 
 1682         for (mask = 1, chan = 0; chan < EN_NTX; chan++, mask *= 2) {
 1683                 if (!(reg & MID_TXCHAN(chan)))
 1684                         continue;
 1685 
 1686                 kick = kick | mask;
 1687 
 1688                 /* current read pointer */
 1689                 val = en_read(sc, MIDX_READPTR(chan));
 1690                 /* as offset */
 1691                 val = (val * sizeof(uint32_t)) + sc->txslot[chan].start;
 1692                 if (val > sc->txslot[chan].cur)
 1693                         sc->txslot[chan].bfree = val - sc->txslot[chan].cur;
 1694                 else
 1695                         sc->txslot[chan].bfree = (val + (EN_TXSZ * 1024)) -
 1696                             sc->txslot[chan].cur;
 1697                 DBG(sc, INTR, ("tx%d: transmit done. %d bytes now free in "
 1698                     "buffer", chan, sc->txslot[chan].bfree));
 1699         }
 1700         return (kick);
 1701 }
 1702 
 1703 /*
 1704  * TX DMA interrupt
 1705  *
 1706  * check for TX DMA complete, if detected then this means
 1707  * that some DTQs are now free.   it also means some indma
 1708  * mbufs can be freed. if we needed DTQs, kick all channels.
 1709  *
 1710  * LOCK: locked, needed
 1711  */
 1712 static uint32_t
 1713 en_intr_tx_dma(struct en_softc *sc)
 1714 {
 1715         uint32_t kick = 0;
 1716         uint32_t val;
 1717         uint32_t idx;
 1718         uint32_t slot;
 1719         uint32_t dtq;
 1720         struct en_map *map;
 1721         struct mbuf *m;
 1722 
 1723         val = en_read(sc, MID_DMA_RDTX);        /* chip's current location */
 1724         idx = MID_DTQ_A2REG(sc->dtq_chip);      /* where we last saw chip */
 1725 
 1726         if (sc->need_dtqs) {
 1727                 kick = MID_NTX_CH - 1;  /* assume power of 2, kick all! */
 1728                 sc->need_dtqs = 0;      /* recalculated in "kick" loop below */
 1729                 DBG(sc, INTR, ("cleared need DTQ condition"));
 1730         }
 1731 
 1732         while (idx != val) {
 1733                 sc->dtq_free++;
 1734                 if ((dtq = sc->dtq[idx]) != 0) {
 1735                         /* don't forget to zero it out when done */
 1736                         sc->dtq[idx] = 0;
 1737                         slot = EN_DQ_SLOT(dtq);
 1738 
 1739                         _IF_DEQUEUE(&sc->txslot[slot].indma, m);
 1740                         if (m == NULL)
 1741                                 panic("enintr: dtqsync");
 1742                         map = (void *)m->m_pkthdr.rcvif;
 1743                         uma_zfree(sc->map_zone, map);
 1744                         m_freem(m);
 1745 
 1746                         sc->txslot[slot].mbsize -= EN_DQ_LEN(dtq);
 1747                         DBG(sc, INTR, ("tx%d: free %d dma bytes, mbsize now "
 1748                             "%d", slot, EN_DQ_LEN(dtq), 
 1749                             sc->txslot[slot].mbsize));
 1750                 }
 1751                 EN_WRAPADD(0, MID_DTQ_N, idx, 1);
 1752         }
 1753         sc->dtq_chip = MID_DTQ_REG2A(val);      /* sync softc */
 1754 
 1755         return (kick);
 1756 }
 1757 
 1758 /*
 1759  * Service interrupt
 1760  *
 1761  * LOCK: locked, needed
 1762  */
 1763 static int
 1764 en_intr_service(struct en_softc *sc)
 1765 {
 1766         uint32_t chip;
 1767         uint32_t vci;
 1768         int need_softserv = 0;
 1769         struct en_vcc *vc;
 1770 
 1771         chip = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
 1772 
 1773         while (sc->hwslistp != chip) {
 1774                 /* fetch and remove it from hardware service list */
 1775                 vci = en_read(sc, sc->hwslistp);
 1776                 EN_WRAPADD(MID_SLOFF, MID_SLEND, sc->hwslistp, 4);
 1777 
 1778                 if ((vc = sc->vccs[vci]) == NULL ||
 1779                     (vc->vcc.flags & ATMIO_FLAG_NORX)) {
 1780                         DBG(sc, INTR, ("unexpected rx interrupt VCI %d", vci));
 1781                         en_write(sc, MID_VC(vci), MIDV_TRASH);  /* rx off */
 1782                         continue;
 1783                 }
 1784 
 1785                 /* remove from hwsl */
 1786                 en_write(sc, MID_VC(vci), vc->rxslot->mode);
 1787                 EN_COUNT(sc->stats.hwpull);
 1788 
 1789                 DBG(sc, INTR, ("pulled VCI %d off hwslist", vci));
 1790 
 1791                 /* add it to the software service list (if needed) */
 1792                 if ((vc->vflags & VCC_SWSL) == 0) {
 1793                         EN_COUNT(sc->stats.swadd);
 1794                         need_softserv = 1;
 1795                         vc->vflags |= VCC_SWSL;
 1796                         sc->swslist[sc->swsl_tail] = vci;
 1797                         EN_WRAPADD(0, MID_SL_N, sc->swsl_tail, 1);
 1798                         sc->swsl_size++;
 1799                         DBG(sc, INTR, ("added VCI %d to swslist", vci));
 1800                 }
 1801         }
 1802         return (need_softserv);
 1803 }
 1804 
 1805 /*
 1806  * Handle a receive DMA completion
 1807  */
 1808 static void
 1809 en_rx_drain(struct en_softc *sc, u_int drq)
 1810 {
 1811         struct en_rxslot *slot;
 1812         struct en_vcc *vc;
 1813         struct mbuf *m;
 1814         struct atm_pseudohdr ah;
 1815 
 1816         slot = &sc->rxslot[EN_DQ_SLOT(drq)];
 1817 
 1818         m = NULL;       /* assume "JK" trash DMA */
 1819         if (EN_DQ_LEN(drq) != 0) {
 1820                 _IF_DEQUEUE(&slot->indma, m);
 1821                 KASSERT(m != NULL, ("drqsync: %s: lost mbuf in slot %td!",
 1822                     sc->ifp->if_xname, slot - sc->rxslot));
 1823                 uma_zfree(sc->map_zone, (struct en_map *)m->m_pkthdr.rcvif);
 1824         }
 1825         if ((vc = slot->vcc) == NULL) {
 1826                 /* ups */
 1827                 if (m != NULL)
 1828                         m_freem(m);
 1829                 return;
 1830         }
 1831 
 1832         /* do something with this mbuf */
 1833         if (vc->vflags & VCC_DRAIN) {
 1834                 /* drain? */
 1835                 if (m != NULL)
 1836                         m_freem(m);
 1837                 if (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0 &&
 1838                     (en_read(sc, MID_VC(vc->vcc.vci)) & MIDV_INSERVICE) == 0 &&
 1839                     (vc->vflags & VCC_SWSL) == 0) {
 1840                         vc->vflags &= ~VCC_CLOSE_RX;
 1841                         if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
 1842                                 en_close_finish(sc, vc);
 1843                         else
 1844                                 cv_signal(&sc->cv_close);
 1845                 }
 1846                 return;
 1847         }
 1848 
 1849         if (m != NULL) {
 1850                 ATM_PH_FLAGS(&ah) = vc->vcc.flags;
 1851                 ATM_PH_VPI(&ah) = 0;
 1852                 ATM_PH_SETVCI(&ah, vc->vcc.vci);
 1853 
 1854                 DBG(sc, INTR, ("rx%td: rxvci%d: atm_input, mbuf %p, len %d, "
 1855                     "hand %p", slot - sc->rxslot, vc->vcc.vci, m,
 1856                     EN_DQ_LEN(drq), vc->rxhand));
 1857 
 1858                 m->m_pkthdr.rcvif = sc->ifp;
 1859                 sc->ifp->if_ipackets++;
 1860 
 1861                 vc->ipackets++;
 1862                 vc->ibytes += m->m_pkthdr.len;
 1863 
 1864 #ifdef EN_DEBUG
 1865                 if (sc->debug & DBG_IPACKETS)
 1866                         en_dump_packet(sc, m);
 1867 #endif
 1868 #ifdef ENABLE_BPF
 1869                 BPF_MTAP(sc->ifp, m);
 1870 #endif
 1871                 EN_UNLOCK(sc);
 1872                 atm_input(sc->ifp, &ah, m, vc->rxhand);
 1873                 EN_LOCK(sc);
 1874         }
 1875 }
 1876 
 1877 /*
 1878  * check for RX DMA complete, and pass the data "upstairs"
 1879  *
 1880  * LOCK: locked, needed
 1881  */
 1882 static int
 1883 en_intr_rx_dma(struct en_softc *sc)
 1884 {
 1885         uint32_t val;
 1886         uint32_t idx;
 1887         uint32_t drq;
 1888 
 1889         val = en_read(sc, MID_DMA_RDRX);        /* chip's current location */
 1890         idx = MID_DRQ_A2REG(sc->drq_chip);      /* where we last saw chip */
 1891 
 1892         while (idx != val) {
 1893                 sc->drq_free++;
 1894                 if ((drq = sc->drq[idx]) != 0) {
 1895                         /* don't forget to zero it out when done */
 1896                         sc->drq[idx] = 0;
 1897                         en_rx_drain(sc, drq);
 1898                 }
 1899                 EN_WRAPADD(0, MID_DRQ_N, idx, 1);
 1900         }
 1901         sc->drq_chip = MID_DRQ_REG2A(val);      /* sync softc */
 1902 
 1903         if (sc->need_drqs) {
 1904                 /* true if we had a DRQ shortage */
 1905                 sc->need_drqs = 0;
 1906                 DBG(sc, INTR, ("cleared need DRQ condition"));
 1907                 return (1);
 1908         } else
 1909                 return (0);
 1910 }
 1911 
 1912 /*
 1913  * en_mget: get an mbuf chain that can hold totlen bytes and return it
 1914  * (for recv). For the actual allocation totlen is rounded up to a multiple
 1915  * of 4. We also ensure, that each mbuf has a multiple of 4 bytes.
 1916  *
 1917  * After this call the sum of all the m_len's in the chain will be totlen.
 1918  * This is called at interrupt time, so we can't wait here.
 1919  *
 1920  * LOCK: any, not needed
 1921  */
 1922 static struct mbuf *
 1923 en_mget(struct en_softc *sc, u_int pktlen)
 1924 {
 1925         struct mbuf *m, *tmp;
 1926         u_int totlen, pad;
 1927 
 1928         totlen = roundup(pktlen, sizeof(uint32_t));
 1929         pad = totlen - pktlen;
 1930 
 1931         /*
 1932          * First get an mbuf with header. Keep space for a couple of
 1933          * words at the begin.
 1934          */
 1935         /* called from interrupt context */
 1936         MGETHDR(m, M_DONTWAIT, MT_DATA);
 1937         if (m == NULL)
 1938                 return (NULL);
 1939 
 1940         m->m_pkthdr.rcvif = NULL;
 1941         m->m_pkthdr.len = pktlen;
 1942         m->m_len = EN_RX1BUF;
 1943         MH_ALIGN(m, EN_RX1BUF);
 1944         if (m->m_len >= totlen) {
 1945                 m->m_len = totlen;
 1946 
 1947         } else {
 1948                 totlen -= m->m_len;
 1949 
 1950                 /* called from interrupt context */
 1951                 tmp = m_getm(m, totlen, M_DONTWAIT, MT_DATA);
 1952                 if (tmp == NULL) {
 1953                         m_free(m);
 1954                         return (NULL);
 1955                 }
 1956                 tmp = m->m_next;
 1957                 /* m_getm could do this for us */
 1958                 while (tmp != NULL) {
 1959                         tmp->m_len = min(MCLBYTES, totlen);
 1960                         totlen -= tmp->m_len;
 1961                         tmp = tmp->m_next;
 1962                 }
 1963         }
 1964 
 1965         return (m);
 1966 }
 1967 
 1968 /*
 1969  * Argument for RX DMAMAP loader.
 1970  */
 1971 struct rxarg {
 1972         struct en_softc *sc;
 1973         struct mbuf *m;
 1974         u_int pre_skip;         /* number of bytes to skip at begin */
 1975         u_int post_skip;        /* number of bytes to skip at end */
 1976         struct en_vcc *vc;      /* vc we are receiving on */
 1977         int wait;               /* wait for DRQ entries */
 1978 };
 1979 
 1980 /*
 1981  * Copy the segment table to the buffer for later use. And compute the
 1982  * number of dma queue entries we need.
 1983  *
 1984  * LOCK: locked, needed
 1985  */
 1986 static void
 1987 en_rxdma_load(void *uarg, bus_dma_segment_t *segs, int nseg,
 1988     bus_size_t mapsize, int error)
 1989 {
 1990         struct rxarg *rx = uarg;
 1991         struct en_softc *sc = rx->sc;
 1992         struct en_rxslot *slot = rx->vc->rxslot;
 1993         u_int           free;           /* number of free DRQ entries */
 1994         uint32_t        cur;            /* current buffer offset */
 1995         uint32_t        drq;            /* DRQ entry pointer */
 1996         uint32_t        last_drq;       /* where we have written last */
 1997         u_int           needalign, cnt, count, bcode;
 1998         bus_addr_t      addr;
 1999         bus_size_t      rest;
 2000         int             i;
 2001 
 2002         if (error != 0)
 2003                 return;
 2004         if (nseg > EN_MAX_DMASEG)
 2005                 panic("too many DMA segments");
 2006 
 2007         rx->wait = 0;
 2008 
 2009         free = sc->drq_free;
 2010         drq = sc->drq_us;
 2011         cur = slot->cur;
 2012 
 2013         last_drq = 0;
 2014 
 2015         /*
 2016          * Local macro to add an entry to the receive DMA area. If there
 2017          * are no entries left, return. Save the byte offset of the entry
 2018          * in last_drq for later use.
 2019          */
 2020 #define PUT_DRQ_ENTRY(ENI, BCODE, COUNT, ADDR)                          \
 2021         if (free == 0) {                                                \
 2022                 EN_COUNT(sc->stats.rxdrqout);                           \
 2023                 rx->wait = 1;                                           \
 2024                 return;                                                 \
 2025         }                                                               \
 2026         last_drq = drq;                                                 \
 2027         en_write(sc, drq + 0, (ENI || !sc->is_adaptec) ?                \
 2028             MID_MK_RXQ_ENI(COUNT, rx->vc->vcc.vci, 0, BCODE) :          \
 2029             MID_MK_RXQ_ADP(COUNT, rx->vc->vcc.vci, 0, BCODE));          \
 2030         en_write(sc, drq + 4, ADDR);                                    \
 2031                                                                         \
 2032         EN_WRAPADD(MID_DRQOFF, MID_DRQEND, drq, 8);                     \
 2033         free--;
 2034 
 2035         /*
 2036          * Local macro to generate a DMA entry to DMA cnt bytes. Updates
 2037          * the current buffer byte offset accordingly.
 2038          */
 2039 #define DO_DRQ(TYPE) do {                                               \
 2040         rest -= cnt;                                                    \
 2041         EN_WRAPADD(slot->start, slot->stop, cur, cnt);                  \
 2042         DBG(sc, SERV, ("rx%td: "TYPE" %u bytes, %ju left, cur %#x",     \
 2043             slot - sc->rxslot, cnt, (uintmax_t)rest, cur));             \
 2044                                                                         \
 2045         PUT_DRQ_ENTRY(1, bcode, count, addr);                           \
 2046                                                                         \
 2047         addr += cnt;                                                    \
 2048     } while (0)
 2049 
 2050         /*
 2051          * Skip the RBD at the beginning
 2052          */
 2053         if (rx->pre_skip > 0) {
 2054                 /* update DMA address */
 2055                 EN_WRAPADD(slot->start, slot->stop, cur, rx->pre_skip);
 2056 
 2057                 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
 2058         }
 2059 
 2060         for (i = 0; i < nseg; i++, segs++) {
 2061                 addr = segs->ds_addr;
 2062                 rest = segs->ds_len;
 2063 
 2064                 if (sc->is_adaptec) {
 2065                         /* adaptec card - simple */
 2066 
 2067                         /* advance the on-card buffer pointer */
 2068                         EN_WRAPADD(slot->start, slot->stop, cur, rest);
 2069                         DBG(sc, SERV, ("rx%td: adp %ju bytes %#jx "
 2070                             "(cur now 0x%x)", slot - sc->rxslot,
 2071                             (uintmax_t)rest, (uintmax_t)addr, cur));
 2072 
 2073                         PUT_DRQ_ENTRY(0, 0, rest, addr);
 2074 
 2075                         continue;
 2076                 }
 2077 
 2078                 /*
 2079                  * do we need to do a DMA op to align to the maximum
 2080                  * burst? Note, that we are alway 32-bit aligned.
 2081                  */
 2082                 if (sc->alburst &&
 2083                     (needalign = (addr & sc->bestburstmask)) != 0) {
 2084                         /* compute number of bytes, words and code */
 2085                         cnt = sc->bestburstlen - needalign;
 2086                         if (cnt > rest)
 2087                                 cnt = rest;
 2088                         count = cnt / sizeof(uint32_t);
 2089                         if (sc->noalbursts) {
 2090                                 bcode = MIDDMA_WORD;
 2091                         } else {
 2092                                 bcode = en_dmaplan[count].bcode;
 2093                                 count = cnt >> en_dmaplan[count].divshift;
 2094                         }
 2095                         DO_DRQ("al_dma");
 2096                 }
 2097 
 2098                 /* do we need to do a max-sized burst? */
 2099                 if (rest >= sc->bestburstlen) {
 2100                         count = rest >> sc->bestburstshift;
 2101                         cnt = count << sc->bestburstshift;
 2102                         bcode = sc->bestburstcode;
 2103                         DO_DRQ("best_dma");
 2104                 }
 2105 
 2106                 /* do we need to do a cleanup burst? */
 2107                 if (rest != 0) {
 2108                         cnt = rest;
 2109                         count = rest / sizeof(uint32_t);
 2110                         if (sc->noalbursts) {
 2111                                 bcode = MIDDMA_WORD;
 2112                         } else {
 2113                                 bcode = en_dmaplan[count].bcode;
 2114                                 count = cnt >> en_dmaplan[count].divshift;
 2115                         }
 2116                         DO_DRQ("clean_dma");
 2117                 }
 2118         }
 2119 
 2120         /*
 2121          * Skip stuff at the end
 2122          */
 2123         if (rx->post_skip > 0) {
 2124                 /* update DMA address */
 2125                 EN_WRAPADD(slot->start, slot->stop, cur, rx->post_skip);
 2126 
 2127                 PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
 2128         }
 2129 
 2130         /* record the end for the interrupt routine */
 2131         sc->drq[MID_DRQ_A2REG(last_drq)] =
 2132             EN_DQ_MK(slot - sc->rxslot, rx->m->m_pkthdr.len);
 2133 
 2134         /* set the end flag in the last descriptor */
 2135         en_write(sc, last_drq + 0, SETQ_END(sc, en_read(sc, last_drq + 0)));
 2136 
 2137 #undef PUT_DRQ_ENTRY
 2138 #undef DO_DRQ
 2139 
 2140         /* commit */
 2141         slot->cur = cur;
 2142         sc->drq_free = free;
 2143         sc->drq_us = drq;
 2144 
 2145         /* signal to card */
 2146         en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
 2147 }
 2148 
 2149 /*
 2150  * en_service: handle a service interrupt
 2151  *
 2152  * Q: why do we need a software service list?
 2153  *
 2154  * A: if we remove a VCI from the hardware list and we find that we are
 2155  *    out of DRQs we must defer processing until some DRQs become free.
 2156  *    so we must remember to look at this RX VCI/slot later, but we can't
 2157  *    put it back on the hardware service list (since that isn't allowed).
 2158  *    so we instead save it on the software service list.   it would be nice 
 2159  *    if we could peek at the VCI on top of the hwservice list without removing
 2160  *    it, however this leads to a race condition: if we peek at it and
 2161  *    decide we are done with it new data could come in before we have a 
 2162  *    chance to remove it from the hwslist.   by the time we get it out of
 2163  *    the list the interrupt for the new data will be lost.   oops!
 2164  *
 2165  * LOCK: locked, needed
 2166  */
 2167 static void
 2168 en_service(struct en_softc *sc)
 2169 {
 2170         struct mbuf     *m, *lastm;
 2171         struct en_map   *map;
 2172         struct rxarg    rx;
 2173         uint32_t        cur;
 2174         uint32_t        dstart;         /* data start (as reported by card) */
 2175         uint32_t        rbd;            /* receive buffer descriptor */
 2176         uint32_t        pdu;            /* AAL5 trailer */
 2177         int             mlen;
 2178         int             error;
 2179         struct en_rxslot *slot;
 2180         struct en_vcc *vc;
 2181 
 2182         rx.sc = sc;
 2183 
 2184   next_vci:
 2185         if (sc->swsl_size == 0) {
 2186                 DBG(sc, SERV, ("en_service done"));
 2187                 return;
 2188         }
 2189 
 2190         /*
 2191          * get vcc to service
 2192          */
 2193         rx.vc = vc = sc->vccs[sc->swslist[sc->swsl_head]];
 2194         slot = vc->rxslot;
 2195         KASSERT (slot->vcc->rxslot == slot, ("en_service: rx slot/vci sync"));
 2196 
 2197         /*
 2198          * determine our mode and if we've got any work to do
 2199          */
 2200         DBG(sc, SERV, ("rx%td: service vci=%d start/stop/cur=0x%x 0x%x "
 2201             "0x%x", slot - sc->rxslot, vc->vcc.vci, slot->start,
 2202             slot->stop, slot->cur));
 2203 
 2204   same_vci:
 2205         cur = slot->cur;
 2206 
 2207         dstart = MIDV_DSTART(en_read(sc, MID_DST_RP(vc->vcc.vci)));
 2208         dstart = (dstart * sizeof(uint32_t)) + slot->start;
 2209 
 2210         /* check to see if there is any data at all */
 2211         if (dstart == cur) {
 2212                 EN_WRAPADD(0, MID_SL_N, sc->swsl_head, 1); 
 2213                 /* remove from swslist */
 2214                 vc->vflags &= ~VCC_SWSL;
 2215                 sc->swsl_size--;
 2216                 DBG(sc, SERV, ("rx%td: remove vci %d from swslist",
 2217                     slot - sc->rxslot, vc->vcc.vci));
 2218                 goto next_vci;
 2219         }
 2220 
 2221         /*
 2222          * figure out how many bytes we need
 2223          * [mlen = # bytes to go in mbufs]
 2224          */
 2225         rbd = en_read(sc, cur);
 2226         if (MID_RBD_ID(rbd) != MID_RBD_STDID) 
 2227                 panic("en_service: id mismatch");
 2228 
 2229         if (rbd & MID_RBD_T) {
 2230                 mlen = 0;               /* we've got trash */
 2231                 rx.pre_skip = MID_RBD_SIZE;
 2232                 rx.post_skip = 0;
 2233                 EN_COUNT(sc->stats.ttrash);
 2234                 DBG(sc, SERV, ("RX overflow lost %d cells!", MID_RBD_CNT(rbd)));
 2235 
 2236         } else if (vc->vcc.aal != ATMIO_AAL_5) {
 2237                 /* 1 cell (ick!) */
 2238                 mlen = MID_CHDR_SIZE + MID_ATMDATASZ;
 2239                 rx.pre_skip = MID_RBD_SIZE;
 2240                 rx.post_skip = 0;
 2241 
 2242         } else {
 2243                 rx.pre_skip = MID_RBD_SIZE;
 2244 
 2245                 /* get PDU trailer in correct byte order */
 2246                 pdu = cur + MID_RBD_CNT(rbd) * MID_ATMDATASZ +
 2247                     MID_RBD_SIZE - MID_PDU_SIZE;
 2248                 if (pdu >= slot->stop)
 2249                         pdu -= EN_RXSZ * 1024;
 2250                 pdu = en_read(sc, pdu);
 2251 
 2252                 if (MID_RBD_CNT(rbd) * MID_ATMDATASZ <
 2253                     MID_PDU_LEN(pdu)) {
 2254                         device_printf(sc->dev, "invalid AAL5 length\n");
 2255                         rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
 2256                         mlen = 0;
 2257                         sc->ifp->if_ierrors++;
 2258 
 2259                 } else if (rbd & MID_RBD_CRCERR) {
 2260                         device_printf(sc->dev, "CRC error\n");
 2261                         rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
 2262                         mlen = 0;
 2263                         sc->ifp->if_ierrors++;
 2264 
 2265                 } else {
 2266                         mlen = MID_PDU_LEN(pdu);
 2267                         rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ - mlen;
 2268                 }
 2269         }
 2270 
 2271         /*
 2272          * now allocate mbufs for mlen bytes of data, if out of mbufs, trash all
 2273          *
 2274          * notes:
 2275          *  1. it is possible that we've already allocated an mbuf for this pkt
 2276          *     but ran out of DRQs, in which case we saved the allocated mbuf
 2277          *     on "q".
 2278          *  2. if we save an buf in "q" we store the "cur" (pointer) in the
 2279          *     buf as an identity (that we can check later).
 2280          *  3. after this block of code, if m is still NULL then we ran out of
 2281          *     mbufs
 2282          */
 2283         _IF_DEQUEUE(&slot->q, m);
 2284         if (m != NULL) {
 2285                 if (m->m_pkthdr.csum_data != cur) {
 2286                         /* wasn't ours */
 2287                         DBG(sc, SERV, ("rx%td: q'ed buf %p not ours",
 2288                             slot - sc->rxslot, m));
 2289                         _IF_PREPEND(&slot->q, m);
 2290                         m = NULL;
 2291                         EN_COUNT(sc->stats.rxqnotus);
 2292                 } else {
 2293                         EN_COUNT(sc->stats.rxqus);
 2294                         DBG(sc, SERV, ("rx%td: recovered q'ed buf %p",
 2295                             slot - sc->rxslot, m));
 2296                 }
 2297         }
 2298         if (mlen == 0 && m != NULL) {
 2299                 /* should not happen */
 2300                 m_freem(m);
 2301                 m = NULL;
 2302         }
 2303 
 2304         if (mlen != 0 && m == NULL) {
 2305                 m = en_mget(sc, mlen);
 2306                 if (m == NULL) {
 2307                         rx.post_skip += mlen;
 2308                         mlen = 0;
 2309                         EN_COUNT(sc->stats.rxmbufout);
 2310                         DBG(sc, SERV, ("rx%td: out of mbufs",
 2311                             slot - sc->rxslot));
 2312                 } else
 2313                         rx.post_skip -= roundup(mlen, sizeof(uint32_t)) - mlen;
 2314 
 2315                 DBG(sc, SERV, ("rx%td: allocate buf %p, mlen=%d",
 2316                     slot - sc->rxslot, m, mlen));
 2317         }
 2318 
 2319         DBG(sc, SERV, ("rx%td: VCI %d, rbuf %p, mlen %d, skip %u/%u",
 2320             slot - sc->rxslot, vc->vcc.vci, m, mlen, rx.pre_skip,
 2321             rx.post_skip));
 2322 
 2323         if (m != NULL) {
 2324                 /* M_NOWAIT - called from interrupt context */
 2325                 map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
 2326                 if (map == NULL) {
 2327                         rx.post_skip += mlen;
 2328                         m_freem(m);
 2329                         DBG(sc, SERV, ("rx%td: out of maps",
 2330                             slot - sc->rxslot));
 2331                         goto skip;
 2332                 }
 2333                 rx.m = m;
 2334                 error = bus_dmamap_load_mbuf(sc->txtag, map->map, m,
 2335                     en_rxdma_load, &rx, BUS_DMA_NOWAIT);
 2336 
 2337                 if (error != 0) {
 2338                         device_printf(sc->dev, "loading RX map failed "
 2339                             "%d\n", error);
 2340                         uma_zfree(sc->map_zone, map);
 2341                         m_freem(m);
 2342                         rx.post_skip += mlen;
 2343                         goto skip;
 2344 
 2345                 }
 2346                 map->flags |= ENMAP_LOADED;
 2347 
 2348                 if (rx.wait) {
 2349                         /* out of DRQs - wait */
 2350                         uma_zfree(sc->map_zone, map);
 2351 
 2352                         m->m_pkthdr.csum_data = cur;
 2353                         _IF_ENQUEUE(&slot->q, m);
 2354                         EN_COUNT(sc->stats.rxdrqout);
 2355 
 2356                         sc->need_drqs = 1;      /* flag condition */
 2357                         return;
 2358 
 2359                 }
 2360                 (void)m_length(m, &lastm);
 2361                 lastm->m_len -= roundup(mlen, sizeof(uint32_t)) - mlen;
 2362 
 2363                 m->m_pkthdr.rcvif = (void *)map;
 2364                 _IF_ENQUEUE(&slot->indma, m);
 2365 
 2366                 /* get next packet in this slot */
 2367                 goto same_vci;
 2368         }
 2369   skip:
 2370         /*
 2371          * Here we end if we should drop the packet from the receive buffer.
 2372          * The number of bytes to drop is in fill. We can do this with on
 2373          * JK entry. If we don't even have that one - wait.
 2374          */
 2375         if (sc->drq_free == 0) {
 2376                 sc->need_drqs = 1;      /* flag condition */
 2377                 return;
 2378         }
 2379         rx.post_skip += rx.pre_skip;
 2380         DBG(sc, SERV, ("rx%td: skipping %u", slot - sc->rxslot, rx.post_skip));
 2381 
 2382         /* advance buffer address */
 2383         EN_WRAPADD(slot->start, slot->stop, cur, rx.post_skip);
 2384 
 2385         /* write DRQ entry */
 2386         if (sc->is_adaptec)
 2387                 en_write(sc, sc->drq_us,
 2388                     MID_MK_RXQ_ADP(WORD_IDX(slot->start, cur),
 2389                     vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
 2390         else
 2391                 en_write(sc, sc->drq_us,
 2392                     MID_MK_RXQ_ENI(WORD_IDX(slot->start, cur),
 2393                     vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
 2394         en_write(sc, sc->drq_us + 4, 0);
 2395         EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_us, 8);
 2396         sc->drq_free--;
 2397 
 2398         /* signal to RX interrupt */
 2399         sc->drq[MID_DRQ_A2REG(sc->drq_us)] = EN_DQ_MK(slot - sc->rxslot, 0);
 2400         slot->cur = cur;
 2401 
 2402         /* signal to card */
 2403         en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
 2404 
 2405         goto same_vci;
 2406 }
 2407 
 2408 /*
 2409  * interrupt handler
 2410  *
 2411  * LOCK: unlocked, needed
 2412  */
 2413 void
 2414 en_intr(void *arg)
 2415 {
 2416         struct en_softc *sc = arg;
 2417         uint32_t reg, kick, mask;
 2418         int lcv, need_softserv;
 2419 
 2420         EN_LOCK(sc);
 2421 
 2422         reg = en_read(sc, MID_INTACK);
 2423         DBG(sc, INTR, ("interrupt=0x%b", reg, MID_INTBITS));
 2424 
 2425         if ((reg & MID_INT_ANY) == 0) {
 2426                 EN_UNLOCK(sc);
 2427                 return;
 2428         }
 2429 
 2430         /*
 2431          * unexpected errors that need a reset
 2432          */
 2433         if ((reg & (MID_INT_IDENT | MID_INT_LERR | MID_INT_DMA_ERR)) != 0) {
 2434                 device_printf(sc->dev, "unexpected interrupt=0x%b, "
 2435                     "resetting\n", reg, MID_INTBITS);
 2436 #ifdef EN_DEBUG
 2437                 panic("en: unexpected error");
 2438 #else
 2439                 en_reset_ul(sc);
 2440                 en_init(sc);
 2441 #endif
 2442                 EN_UNLOCK(sc);
 2443                 return;
 2444         }
 2445 
 2446         if (reg & MID_INT_SUNI)
 2447                 utopia_intr(&sc->utopia);
 2448 
 2449         kick = 0;
 2450         if (reg & MID_INT_TX)
 2451                 kick |= en_intr_tx(sc, reg);
 2452 
 2453         if (reg & MID_INT_DMA_TX)
 2454                 kick |= en_intr_tx_dma(sc);
 2455 
 2456         /*
 2457          * kick xmit channels as needed.
 2458          */
 2459         if (kick) {
 2460                 DBG(sc, INTR, ("tx kick mask = 0x%x", kick));
 2461                 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2)
 2462                         if ((kick & mask) && _IF_QLEN(&sc->txslot[lcv].q) != 0)
 2463                                 en_txdma(sc, &sc->txslot[lcv]);
 2464         }
 2465 
 2466         need_softserv = 0;
 2467         if (reg & MID_INT_DMA_RX)
 2468                 need_softserv |= en_intr_rx_dma(sc);
 2469 
 2470         if (reg & MID_INT_SERVICE)
 2471                 need_softserv |= en_intr_service(sc);
 2472 
 2473         if (need_softserv)
 2474                 en_service(sc);
 2475 
 2476         /*
 2477          * keep our stats
 2478          */
 2479         if (reg & MID_INT_DMA_OVR) {
 2480                 EN_COUNT(sc->stats.dmaovr);
 2481                 DBG(sc, INTR, ("MID_INT_DMA_OVR"));
 2482         }
 2483         reg = en_read(sc, MID_STAT);
 2484         sc->stats.otrash += MID_OTRASH(reg);
 2485         sc->stats.vtrash += MID_VTRASH(reg);
 2486 
 2487         EN_UNLOCK(sc);
 2488 }
 2489 
 2490 /*
 2491  * Read at most n SUNI regs starting at reg into val
 2492  */
 2493 static int
 2494 en_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
 2495 {
 2496         struct en_softc *sc = ifatm->ifp->if_softc;
 2497         u_int i;
 2498 
 2499         EN_CHECKLOCK(sc);
 2500         if (reg >= MID_NSUNI)
 2501                 return (EINVAL);
 2502         if (reg + *n > MID_NSUNI)
 2503                 *n = MID_NSUNI - reg;
 2504 
 2505         for (i = 0; i < *n; i++)
 2506                 val[i] = en_read(sc, MID_SUNIOFF + 4 * (reg + i));
 2507 
 2508         return (0);
 2509 }
 2510 
 2511 /*
 2512  * change the bits given by mask to them in val in register reg
 2513  */
 2514 static int
 2515 en_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
 2516 {
 2517         struct en_softc *sc = ifatm->ifp->if_softc;
 2518         uint32_t regval;
 2519 
 2520         EN_CHECKLOCK(sc);
 2521         if (reg >= MID_NSUNI)
 2522                 return (EINVAL);
 2523         regval = en_read(sc, MID_SUNIOFF + 4 * reg);
 2524         regval = (regval & ~mask) | (val & mask);
 2525         en_write(sc, MID_SUNIOFF + 4 * reg, regval);
 2526         return (0);
 2527 }
 2528 
 2529 static const struct utopia_methods en_utopia_methods = {
 2530         en_utopia_readregs,
 2531         en_utopia_writereg
 2532 };
 2533 
 2534 /*********************************************************************/
 2535 /*
 2536  * Probing the DMA brokeness of the card
 2537  */
 2538 
 2539 /*
 2540  * Physical address load helper function for DMA probe
 2541  *
 2542  * LOCK: unlocked, not needed
 2543  */
 2544 static void
 2545 en_dmaprobe_load(void *uarg, bus_dma_segment_t *segs, int nseg, int error)
 2546 {
 2547         if (error == 0)
 2548                 *(bus_addr_t *)uarg = segs[0].ds_addr;
 2549 }
 2550 
 2551 /*
 2552  * en_dmaprobe: helper function for en_attach.
 2553  *
 2554  * see how the card handles DMA by running a few DMA tests.   we need
 2555  * to figure out the largest number of bytes we can DMA in one burst
 2556  * ("bestburstlen"), and if the starting address for a burst needs to
 2557  * be aligned on any sort of boundary or not ("alburst").
 2558  *
 2559  * Things turn out more complex than that, because on my (harti) brand
 2560  * new motherboard (2.4GHz) we can do 64byte aligned DMAs, but everything
 2561  * we more than 4 bytes fails (with an RX DMA timeout) for physical
 2562  * addresses that end with 0xc. Therefor we search not only the largest
 2563  * burst that is supported (hopefully 64) but also check what is the largerst
 2564  * unaligned supported size. If that appears to be lesser than 4 words,
 2565  * set the noalbursts flag. That will be set only if also alburst is set.
 2566  */
 2567 
 2568 /*
 2569  * en_dmaprobe_doit: do actual testing for the DMA test.
 2570  * Cycle through all bursts sizes from 8 up to 64 and try whether it works.
 2571  * Return the largest one that works.
 2572  *
 2573  * LOCK: unlocked, not needed
 2574  */
 2575 static int
 2576 en_dmaprobe_doit(struct en_softc *sc, uint8_t *sp, bus_addr_t psp)
 2577 {
 2578         uint8_t *dp = sp + MIDDMA_MAXBURST;
 2579         bus_addr_t pdp = psp + MIDDMA_MAXBURST;
 2580         int lcv, retval = 4, cnt;
 2581         uint32_t reg, bcode, midvloc;
 2582 
 2583         if (sc->en_busreset)
 2584                 sc->en_busreset(sc);
 2585         en_write(sc, MID_RESID, 0x0);   /* reset card before touching RAM */
 2586 
 2587         /*
 2588          * set up a 1k buffer at MID_BUFOFF
 2589          */
 2590         midvloc = ((MID_BUFOFF - MID_RAMOFF) / sizeof(uint32_t))
 2591             >> MIDV_LOCTOPSHFT;
 2592         en_write(sc, MIDX_PLACE(0), MIDX_MKPLACE(en_k2sz(1), midvloc));
 2593         en_write(sc, MID_VC(0), (midvloc << MIDV_LOCSHIFT) 
 2594             | (en_k2sz(1) << MIDV_SZSHIFT) | MIDV_TRASH);
 2595         en_write(sc, MID_DST_RP(0), 0);
 2596         en_write(sc, MID_WP_ST_CNT(0), 0);
 2597 
 2598         /* set up sample data */
 2599         for (lcv = 0 ; lcv < MIDDMA_MAXBURST; lcv++)
 2600                 sp[lcv] = lcv + 1;
 2601 
 2602         /* enable DMA (only) */
 2603         en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
 2604 
 2605         sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
 2606         sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
 2607 
 2608         /*
 2609          * try it now . . .  DMA it out, then DMA it back in and compare
 2610          *
 2611          * note: in order to get the dma stuff to reverse directions it wants
 2612          * the "end" flag set!   since we are not dma'ing valid data we may
 2613          * get an ident mismatch interrupt (which we will ignore).
 2614          */
 2615         DBG(sc, DMA, ("test sp=%p/%#lx, dp=%p/%#lx", 
 2616             sp, (u_long)psp, dp, (u_long)pdp));
 2617         for (lcv = 8 ; lcv <= MIDDMA_MAXBURST ; lcv = lcv * 2) {
 2618                 DBG(sc, DMA, ("test lcv=%d", lcv));
 2619 
 2620                 /* zero SRAM and dest buffer */
 2621                 bus_space_set_region_4(sc->en_memt, sc->en_base,
 2622                     MID_BUFOFF, 0, 1024 / 4);
 2623                 bzero(dp, MIDDMA_MAXBURST);
 2624 
 2625                 bcode = en_sz2b(lcv);
 2626 
 2627                 /* build lcv-byte-DMA x NBURSTS */
 2628                 if (sc->is_adaptec)
 2629                         en_write(sc, sc->dtq_chip,
 2630                             MID_MK_TXQ_ADP(lcv, 0, MID_DMA_END, 0));
 2631                 else
 2632                         en_write(sc, sc->dtq_chip,
 2633                             MID_MK_TXQ_ENI(1, 0, MID_DMA_END, bcode));
 2634                 en_write(sc, sc->dtq_chip + 4, psp);
 2635                 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
 2636                 en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip));
 2637 
 2638                 cnt = 1000;
 2639                 while ((reg = en_readx(sc, MID_DMA_RDTX)) !=
 2640                     MID_DTQ_A2REG(sc->dtq_chip)) {
 2641                         DELAY(1);
 2642                         if (--cnt == 0) {
 2643                                 DBG(sc, DMA, ("unexpected timeout in tx "
 2644                                     "DMA test\n  alignment=0x%lx, burst size=%d"
 2645                                     ", dma addr reg=%#x, rdtx=%#x, stat=%#x\n",
 2646                                     (u_long)sp & 63, lcv,
 2647                                     en_read(sc, MID_DMA_ADDR), reg,
 2648                                     en_read(sc, MID_INTSTAT)));
 2649                                 return (retval);
 2650                         }
 2651                 }
 2652 
 2653                 reg = en_read(sc, MID_INTACK); 
 2654                 if ((reg & MID_INT_DMA_TX) != MID_INT_DMA_TX) {
 2655                         DBG(sc, DMA, ("unexpected status in tx DMA test: %#x\n",
 2656                             reg));
 2657                         return (retval);
 2658                 }
 2659                 /* re-enable DMA (only) */
 2660                 en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
 2661 
 2662                 /* "return to sender..."  address is known ... */
 2663 
 2664                 /* build lcv-byte-DMA x NBURSTS */
 2665                 if (sc->is_adaptec)
 2666                         en_write(sc, sc->drq_chip,
 2667                             MID_MK_RXQ_ADP(lcv, 0, MID_DMA_END, 0));
 2668                 else
 2669                         en_write(sc, sc->drq_chip,
 2670                             MID_MK_RXQ_ENI(1, 0, MID_DMA_END, bcode));
 2671                 en_write(sc, sc->drq_chip + 4, pdp);
 2672                 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
 2673                 en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
 2674                 cnt = 1000;
 2675                 while ((reg = en_readx(sc, MID_DMA_RDRX)) !=
 2676                     MID_DRQ_A2REG(sc->drq_chip)) {
 2677                         DELAY(1);
 2678                         cnt--;
 2679                         if (--cnt == 0) {
 2680                                 DBG(sc, DMA, ("unexpected timeout in rx "
 2681                                     "DMA test, rdrx=%#x\n", reg));
 2682                                 return (retval);
 2683                         }
 2684                 }
 2685                 reg = en_read(sc, MID_INTACK); 
 2686                 if ((reg & MID_INT_DMA_RX) != MID_INT_DMA_RX) {
 2687                         DBG(sc, DMA, ("unexpected status in rx DMA "
 2688                             "test: 0x%x\n", reg));
 2689                         return (retval);
 2690                 }
 2691                 if (bcmp(sp, dp, lcv)) {
 2692                         DBG(sc, DMA, ("DMA test failed! lcv=%d, sp=%p, "
 2693                             "dp=%p", lcv, sp, dp));
 2694                         return (retval);
 2695                 }
 2696 
 2697                 retval = lcv;
 2698         }
 2699         return (retval);        /* studly 64 byte DMA present!  oh baby!! */
 2700 }
 2701 
 2702 /*
 2703  * Find the best DMA parameters
 2704  *
 2705  * LOCK: unlocked, not needed
 2706  */
 2707 static void
 2708 en_dmaprobe(struct en_softc *sc)
 2709 {
 2710         bus_dma_tag_t tag;
 2711         bus_dmamap_t map;
 2712         int err;
 2713         void *buffer;
 2714         int bestalgn, lcv, try, bestnoalgn;
 2715         bus_addr_t phys;
 2716         uint8_t *addr;
 2717 
 2718         sc->alburst = 0;
 2719         sc->noalbursts = 0;
 2720 
 2721         /*
 2722          * Allocate some DMA-able memory.
 2723          * We need 3 times the max burst size aligned to the max burst size.
 2724          */
 2725         err = bus_dma_tag_create(NULL, MIDDMA_MAXBURST, 0,
 2726             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2727             3 * MIDDMA_MAXBURST, 1, 3 * MIDDMA_MAXBURST, 0,
 2728             NULL, NULL, &tag);
 2729         if (err)
 2730                 panic("%s: cannot create test DMA tag %d", __func__, err);
 2731 
 2732         err = bus_dmamem_alloc(tag, &buffer, 0, &map);
 2733         if (err)
 2734                 panic("%s: cannot allocate test DMA memory %d", __func__, err);
 2735 
 2736         err = bus_dmamap_load(tag, map, buffer, 3 * MIDDMA_MAXBURST,
 2737             en_dmaprobe_load, &phys, BUS_DMA_NOWAIT);
 2738         if (err)
 2739                 panic("%s: cannot load test DMA map %d", __func__, err);
 2740         addr = buffer;
 2741         DBG(sc, DMA, ("phys=%#lx addr=%p", (u_long)phys, addr));
 2742 
 2743         /*
 2744          * Now get the best burst size of the aligned case.
 2745          */
 2746         bestalgn = bestnoalgn = en_dmaprobe_doit(sc, addr, phys);
 2747 
 2748         /*
 2749          * Now try unaligned. 
 2750          */
 2751         for (lcv = 4; lcv < MIDDMA_MAXBURST; lcv += 4) {
 2752                 try = en_dmaprobe_doit(sc, addr + lcv, phys + lcv);
 2753 
 2754                 if (try < bestnoalgn)
 2755                         bestnoalgn = try;
 2756         }
 2757 
 2758         if (bestnoalgn < bestalgn) {
 2759                 sc->alburst = 1;
 2760                 if (bestnoalgn < 32)
 2761                         sc->noalbursts = 1;
 2762         }
 2763 
 2764         sc->bestburstlen = bestalgn;
 2765         sc->bestburstshift = en_log2(bestalgn);
 2766         sc->bestburstmask = sc->bestburstlen - 1; /* must be power of 2 */
 2767         sc->bestburstcode = en_sz2b(bestalgn);
 2768 
 2769         /*
 2770          * Reset the chip before freeing the buffer. It may still be trying
 2771          * to DMA.
 2772          */
 2773         if (sc->en_busreset)
 2774                 sc->en_busreset(sc);
 2775         en_write(sc, MID_RESID, 0x0);   /* reset card before touching RAM */
 2776 
 2777         DELAY(10000);                   /* may still do DMA */
 2778 
 2779         /*
 2780          * Free the DMA stuff
 2781          */
 2782         bus_dmamap_unload(tag, map);
 2783         bus_dmamem_free(tag, buffer, map);
 2784         bus_dma_tag_destroy(tag);
 2785 }
 2786 
 2787 /*********************************************************************/
 2788 /*
 2789  * Attach/detach.
 2790  */
 2791 
 2792 /*
 2793  * Attach to the card.
 2794  *
 2795  * LOCK: unlocked, not needed (but initialized)
 2796  */
 2797 int
 2798 en_attach(struct en_softc *sc)
 2799 {
 2800         struct ifnet *ifp = sc->ifp;
 2801         int sz;
 2802         uint32_t reg, lcv, check, ptr, sav, midvloc;
 2803 
 2804 #ifdef EN_DEBUG
 2805         sc->debug = EN_DEBUG;
 2806 #endif
 2807 
 2808         /*
 2809          * Probe card to determine memory size.
 2810          *
 2811          * The stupid ENI card always reports to PCI that it needs 4MB of
 2812          * space (2MB regs and 2MB RAM). If it has less than 2MB RAM the
 2813          * addresses wrap in the RAM address space (i.e. on a 512KB card
 2814          * addresses 0x3ffffc, 0x37fffc, and 0x2ffffc are aliases for
 2815          * 0x27fffc  [note that RAM starts at offset 0x200000]).
 2816          */
 2817 
 2818         /* reset card before touching RAM */
 2819         if (sc->en_busreset)
 2820                 sc->en_busreset(sc);
 2821         en_write(sc, MID_RESID, 0x0);
 2822 
 2823         for (lcv = MID_PROBEOFF; lcv <= MID_MAXOFF ; lcv += MID_PROBSIZE) {
 2824                 en_write(sc, lcv, lcv); /* data[address] = address */
 2825                 for (check = MID_PROBEOFF; check < lcv ;check += MID_PROBSIZE) {
 2826                         reg = en_read(sc, check);
 2827                         if (reg != check)
 2828                                 /* found an alias! - quit */
 2829                                 goto done_probe;
 2830                 }
 2831         }
 2832   done_probe:
 2833         lcv -= MID_PROBSIZE;                    /* take one step back */
 2834         sc->en_obmemsz = (lcv + 4) - MID_RAMOFF;
 2835 
 2836         /*
 2837          * determine the largest DMA burst supported
 2838          */
 2839         en_dmaprobe(sc);
 2840 
 2841         /*
 2842          * "hello world"
 2843          */
 2844 
 2845         /* reset */
 2846         if (sc->en_busreset)
 2847                 sc->en_busreset(sc);
 2848         en_write(sc, MID_RESID, 0x0);           /* reset */
 2849 
 2850         /* zero memory */
 2851         bus_space_set_region_4(sc->en_memt, sc->en_base,
 2852             MID_RAMOFF, 0, sc->en_obmemsz / 4);
 2853 
 2854         reg = en_read(sc, MID_RESID);
 2855 
 2856         device_printf(sc->dev, "ATM midway v%d, board IDs %d.%d, %s%s%s, "
 2857             "%ldKB on-board RAM\n", MID_VER(reg), MID_MID(reg), MID_DID(reg), 
 2858             (MID_IS_SABRE(reg)) ? "sabre controller, " : "",
 2859             (MID_IS_SUNI(reg)) ? "SUNI" : "Utopia",
 2860             (!MID_IS_SUNI(reg) && MID_IS_UPIPE(reg)) ? " (pipelined)" : "",
 2861             (long)sc->en_obmemsz / 1024);
 2862 
 2863         /*
 2864          * fill in common ATM interface stuff
 2865          */
 2866         IFP2IFATM(sc->ifp)->mib.hw_version = (MID_VER(reg) << 16) |
 2867             (MID_MID(reg) << 8) | MID_DID(reg);
 2868         if (MID_DID(reg) & 0x4)
 2869                 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
 2870         else
 2871                 IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
 2872 
 2873         IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
 2874         IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
 2875         IFP2IFATM(sc->ifp)->mib.vci_bits = MID_VCI_BITS;
 2876         IFP2IFATM(sc->ifp)->mib.max_vccs = MID_N_VC;
 2877         IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
 2878 
 2879         if (sc->is_adaptec) {
 2880                 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ADP155P;
 2881                 if (sc->bestburstlen == 64 && sc->alburst == 0)
 2882                         device_printf(sc->dev,
 2883                             "passed 64 byte DMA test\n");
 2884                 else
 2885                         device_printf(sc->dev, "FAILED DMA TEST: "
 2886                             "burst=%d, alburst=%d\n", sc->bestburstlen,
 2887                             sc->alburst);
 2888         } else {
 2889                 IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ENI155P;
 2890                 device_printf(sc->dev, "maximum DMA burst length = %d "
 2891                     "bytes%s\n", sc->bestburstlen, sc->alburst ?
 2892                     sc->noalbursts ?  " (no large bursts)" : " (must align)" :
 2893                     "");
 2894         }
 2895 
 2896         /*
 2897          * link into network subsystem and prepare card
 2898          */
 2899         sc->ifp->if_softc = sc;
 2900         ifp->if_flags = IFF_SIMPLEX;
 2901         ifp->if_ioctl = en_ioctl;
 2902         ifp->if_start = en_start;
 2903 
 2904         mtx_init(&sc->en_mtx, device_get_nameunit(sc->dev),
 2905             MTX_NETWORK_LOCK, MTX_DEF);
 2906         cv_init(&sc->cv_close, "VC close");
 2907 
 2908         /*
 2909          * Make the sysctl tree
 2910          */
 2911         sysctl_ctx_init(&sc->sysctl_ctx);
 2912 
 2913         if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
 2914             SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
 2915             device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "")) == NULL)
 2916                 goto fail;
 2917 
 2918         if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
 2919             OID_AUTO, "istats", CTLFLAG_RD, sc, 0, en_sysctl_istats,
 2920             "S", "internal statistics") == NULL)
 2921                 goto fail;
 2922 
 2923 #ifdef EN_DEBUG
 2924         if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
 2925             OID_AUTO, "debug", CTLFLAG_RW , &sc->debug, 0, "") == NULL)
 2926                 goto fail;
 2927 #endif
 2928 
 2929         IFP2IFATM(sc->ifp)->phy = &sc->utopia;
 2930         utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->en_mtx,
 2931             &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
 2932             &en_utopia_methods);
 2933         utopia_init_media(&sc->utopia);
 2934 
 2935         MGET(sc->padbuf, M_TRYWAIT, MT_DATA);
 2936         if (sc->padbuf == NULL)
 2937                 goto fail;
 2938         bzero(sc->padbuf->m_data, MLEN);
 2939 
 2940         if (bus_dma_tag_create(NULL, 1, 0,
 2941             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2942             EN_TXSZ * 1024, EN_MAX_DMASEG, EN_TXSZ * 1024, 0,
 2943             NULL, NULL, &sc->txtag))
 2944                 goto fail;
 2945 
 2946         sc->map_zone = uma_zcreate("en dma maps", sizeof(struct en_map),
 2947             en_map_ctor, en_map_dtor, NULL, en_map_fini, UMA_ALIGN_PTR,
 2948             UMA_ZONE_ZINIT);
 2949         if (sc->map_zone == NULL)
 2950                 goto fail;
 2951         uma_zone_set_max(sc->map_zone, EN_MAX_MAPS);
 2952 
 2953         /*
 2954          * init softc
 2955          */
 2956         sc->vccs = malloc(MID_N_VC * sizeof(sc->vccs[0]),
 2957             M_DEVBUF, M_ZERO | M_WAITOK);
 2958 
 2959         sz = sc->en_obmemsz - (MID_BUFOFF - MID_RAMOFF);
 2960         ptr = sav = MID_BUFOFF;
 2961         ptr = roundup(ptr, EN_TXSZ * 1024);     /* align */
 2962         sz = sz - (ptr - sav);
 2963         if (EN_TXSZ*1024 * EN_NTX > sz) {
 2964                 device_printf(sc->dev, "EN_NTX/EN_TXSZ too big\n");
 2965                 goto fail;
 2966         }
 2967         for (lcv = 0 ;lcv < EN_NTX ;lcv++) {
 2968                 sc->txslot[lcv].mbsize = 0;
 2969                 sc->txslot[lcv].start = ptr;
 2970                 ptr += (EN_TXSZ * 1024);
 2971                 sz -= (EN_TXSZ * 1024);
 2972                 sc->txslot[lcv].stop = ptr;
 2973                 sc->txslot[lcv].nref = 0;
 2974                 DBG(sc, INIT, ("tx%d: start 0x%x, stop 0x%x", lcv,
 2975                     sc->txslot[lcv].start, sc->txslot[lcv].stop));
 2976         }
 2977 
 2978         sav = ptr;
 2979         ptr = roundup(ptr, EN_RXSZ * 1024);     /* align */
 2980         sz = sz - (ptr - sav);
 2981         sc->en_nrx = sz / (EN_RXSZ * 1024);
 2982         if (sc->en_nrx <= 0) {
 2983                 device_printf(sc->dev, "EN_NTX/EN_TXSZ/EN_RXSZ too big\n");
 2984                 goto fail;
 2985         }
 2986 
 2987         /* 
 2988          * ensure that there is always one VC slot on the service list free
 2989          * so that we can tell the difference between a full and empty list.
 2990          */
 2991         if (sc->en_nrx >= MID_N_VC)
 2992                 sc->en_nrx = MID_N_VC - 1;
 2993 
 2994         for (lcv = 0 ; lcv < sc->en_nrx ; lcv++) {
 2995                 sc->rxslot[lcv].vcc = NULL;
 2996                 midvloc = sc->rxslot[lcv].start = ptr;
 2997                 ptr += (EN_RXSZ * 1024);
 2998                 sz -= (EN_RXSZ * 1024);
 2999                 sc->rxslot[lcv].stop = ptr;
 3000                 midvloc = midvloc - MID_RAMOFF;
 3001                 /* mask, cvt to words */
 3002                 midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2;
 3003                 /* we only want the top 11 bits */
 3004                 midvloc = midvloc >> MIDV_LOCTOPSHFT;
 3005                 midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
 3006                 sc->rxslot[lcv].mode = midvloc | 
 3007                     (en_k2sz(EN_RXSZ) << MIDV_SZSHIFT) | MIDV_TRASH;
 3008 
 3009                 DBG(sc, INIT, ("rx%d: start 0x%x, stop 0x%x, mode 0x%x", lcv,
 3010                     sc->rxslot[lcv].start, sc->rxslot[lcv].stop,
 3011                     sc->rxslot[lcv].mode));
 3012         }
 3013 
 3014         device_printf(sc->dev, "%d %dKB receive buffers, %d %dKB transmit "
 3015             "buffers\n", sc->en_nrx, EN_RXSZ, EN_NTX, EN_TXSZ);
 3016         device_printf(sc->dev, "end station identifier (mac address) "
 3017             "%6D\n", IFP2IFATM(sc->ifp)->mib.esi, ":");
 3018 
 3019         /*
 3020          * Start SUNI stuff. This will call our readregs/writeregs
 3021          * functions and these assume the lock to be held so we must get it
 3022          * here.
 3023          */
 3024         EN_LOCK(sc);
 3025         utopia_start(&sc->utopia);
 3026         utopia_reset(&sc->utopia);
 3027         EN_UNLOCK(sc);
 3028 
 3029         /*
 3030          * final commit
 3031          */
 3032         atm_ifattach(ifp); 
 3033 
 3034 #ifdef ENABLE_BPF
 3035         bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
 3036 #endif
 3037 
 3038         return (0);
 3039 
 3040  fail:
 3041         en_destroy(sc);
 3042         return (-1);
 3043 }
 3044 
 3045 /*
 3046  * Free all internal resources. No access to bus resources here.
 3047  * No locking required here (interrupt is already disabled).
 3048  *
 3049  * LOCK: unlocked, needed (but destroyed)
 3050  */
 3051 void
 3052 en_destroy(struct en_softc *sc)
 3053 {
 3054         u_int i;
 3055 
 3056         if (sc->utopia.state & UTP_ST_ATTACHED) {
 3057                 /* these assume the lock to be held */
 3058                 EN_LOCK(sc);
 3059                 utopia_stop(&sc->utopia);
 3060                 utopia_detach(&sc->utopia);
 3061                 EN_UNLOCK(sc);
 3062         }
 3063 
 3064         if (sc->vccs != NULL) {
 3065                 /* get rid of sticky VCCs */
 3066                 for (i = 0; i < MID_N_VC; i++)
 3067                         if (sc->vccs[i] != NULL)
 3068                                 uma_zfree(en_vcc_zone, sc->vccs[i]);
 3069                 free(sc->vccs, M_DEVBUF);
 3070         }
 3071 
 3072         if (sc->padbuf != NULL)
 3073                 m_free(sc->padbuf);
 3074 
 3075         /*
 3076          * Destroy the map zone before the tag (the fini function will
 3077          * destroy the DMA maps using the tag)
 3078          */
 3079         if (sc->map_zone != NULL)
 3080                 uma_zdestroy(sc->map_zone);
 3081 
 3082         if (sc->txtag != NULL)
 3083                 bus_dma_tag_destroy(sc->txtag);
 3084 
 3085         (void)sysctl_ctx_free(&sc->sysctl_ctx);
 3086 
 3087         cv_destroy(&sc->cv_close);
 3088         mtx_destroy(&sc->en_mtx);
 3089 }
 3090 
 3091 /*
 3092  * Module loaded/unloaded
 3093  */
 3094 int
 3095 en_modevent(module_t mod __unused, int event, void *arg __unused)
 3096 {
 3097 
 3098         switch (event) {
 3099 
 3100           case MOD_LOAD:
 3101                 en_vcc_zone = uma_zcreate("EN vccs", sizeof(struct en_vcc),
 3102                     NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
 3103                 if (en_vcc_zone == NULL)
 3104                         return (ENOMEM);
 3105                 break;
 3106 
 3107           case MOD_UNLOAD:
 3108                 uma_zdestroy(en_vcc_zone);
 3109                 break;
 3110         }
 3111         return (0);
 3112 }
 3113 
 3114 /*********************************************************************/
 3115 /*
 3116  * Debugging support
 3117  */
 3118 
 3119 #ifdef EN_DDBHOOK
 3120 /*
 3121  * functions we can call from ddb
 3122  */
 3123 
 3124 /*
 3125  * en_dump: dump the state
 3126  */
 3127 #define END_SWSL        0x00000040              /* swsl state */
 3128 #define END_DRQ         0x00000020              /* drq state */
 3129 #define END_DTQ         0x00000010              /* dtq state */
 3130 #define END_RX          0x00000008              /* rx state */
 3131 #define END_TX          0x00000004              /* tx state */
 3132 #define END_MREGS       0x00000002              /* registers */
 3133 #define END_STATS       0x00000001              /* dump stats */
 3134 
 3135 #define END_BITS "\2\7SWSL\6DRQ\5DTQ\4RX\3TX\2MREGS\1STATS"
 3136 
 3137 static void
 3138 en_dump_stats(const struct en_stats *s)
 3139 {
 3140         printf("en_stats:\n");
 3141         printf("\t%d/%d mfix (%d failed)\n", s->mfixaddr, s->mfixlen,
 3142             s->mfixfail);
 3143         printf("\t%d rx dma overflow interrupts\n", s->dmaovr);
 3144         printf("\t%d times out of TX space and stalled\n", s->txoutspace);
 3145         printf("\t%d times out of DTQs\n", s->txdtqout);
 3146         printf("\t%d times launched a packet\n", s->launch);
 3147         printf("\t%d times pulled the hw service list\n", s->hwpull);
 3148         printf("\t%d times pushed a vci on the sw service list\n", s->swadd);
 3149         printf("\t%d times RX pulled an mbuf from Q that wasn't ours\n",
 3150             s->rxqnotus);
 3151         printf("\t%d times RX pulled a good mbuf from Q\n", s->rxqus);
 3152         printf("\t%d times ran out of DRQs\n", s->rxdrqout);
 3153         printf("\t%d transmit packets dropped due to mbsize\n", s->txmbovr);
 3154         printf("\t%d cells trashed due to turned off rxvc\n", s->vtrash);
 3155         printf("\t%d cells trashed due to totally full buffer\n", s->otrash);
 3156         printf("\t%d cells trashed due almost full buffer\n", s->ttrash);
 3157         printf("\t%d rx mbuf allocation failures\n", s->rxmbufout);
 3158         printf("\t%d times out of tx maps\n", s->txnomap);
 3159 #ifdef NATM
 3160 #ifdef NATM_STAT
 3161         printf("\tnatmintr so_rcv: ok/drop cnt: %d/%d, ok/drop bytes: %d/%d\n",
 3162             natm_sookcnt, natm_sodropcnt, natm_sookbytes, natm_sodropbytes);
 3163 #endif
 3164 #endif
 3165 }
 3166 
 3167 static void
 3168 en_dump_mregs(struct en_softc *sc)
 3169 {
 3170         u_int cnt;
 3171 
 3172         printf("mregs:\n");
 3173         printf("resid = 0x%x\n", en_read(sc, MID_RESID));
 3174         printf("interrupt status = 0x%b\n",
 3175             (int)en_read(sc, MID_INTSTAT), MID_INTBITS);
 3176         printf("interrupt enable = 0x%b\n", 
 3177              (int)en_read(sc, MID_INTENA), MID_INTBITS);
 3178         printf("mcsr = 0x%b\n", (int)en_read(sc, MID_MAST_CSR), MID_MCSRBITS);
 3179         printf("serv_write = [chip=%u] [us=%u]\n", en_read(sc, MID_SERV_WRITE),
 3180              MID_SL_A2REG(sc->hwslistp));
 3181         printf("dma addr = 0x%x\n", en_read(sc, MID_DMA_ADDR));
 3182         printf("DRQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
 3183             MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX)), 
 3184             MID_DRQ_REG2A(en_read(sc, MID_DMA_WRRX)), sc->drq_chip, sc->drq_us);
 3185         printf("DTQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
 3186             MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX)), 
 3187             MID_DTQ_REG2A(en_read(sc, MID_DMA_WRTX)), sc->dtq_chip, sc->dtq_us);
 3188 
 3189         printf("  unusal txspeeds:");
 3190         for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
 3191                 if (sc->vccs[cnt]->txspeed)
 3192                         printf(" vci%d=0x%x", cnt, sc->vccs[cnt]->txspeed);
 3193         printf("\n");
 3194 
 3195         printf("  rxvc slot mappings:");
 3196         for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
 3197                 if (sc->vccs[cnt]->rxslot != NULL)
 3198                         printf("  %d->%td", cnt,
 3199                             sc->vccs[cnt]->rxslot - sc->rxslot);
 3200         printf("\n");
 3201 }
 3202 
 3203 static void
 3204 en_dump_tx(struct en_softc *sc)
 3205 {
 3206         u_int slot;
 3207 
 3208         printf("tx:\n");
 3209         for (slot = 0 ; slot < EN_NTX; slot++) {
 3210                 printf("tx%d: start/stop/cur=0x%x/0x%x/0x%x [%d]  ", slot,
 3211                     sc->txslot[slot].start, sc->txslot[slot].stop,
 3212                     sc->txslot[slot].cur,
 3213                     (sc->txslot[slot].cur - sc->txslot[slot].start) / 4);
 3214                 printf("mbsize=%d, bfree=%d\n", sc->txslot[slot].mbsize,
 3215                     sc->txslot[slot].bfree);
 3216                 printf("txhw: base_address=0x%x, size=%u, read=%u, "
 3217                     "descstart=%u\n",
 3218                     (u_int)MIDX_BASE(en_read(sc, MIDX_PLACE(slot))), 
 3219                     MIDX_SZ(en_read(sc, MIDX_PLACE(slot))),
 3220                     en_read(sc, MIDX_READPTR(slot)),
 3221                     en_read(sc, MIDX_DESCSTART(slot)));
 3222         }
 3223 }
 3224 
 3225 static void
 3226 en_dump_rx(struct en_softc *sc)
 3227 {
 3228         struct en_rxslot *slot;
 3229 
 3230         printf("  recv slots:\n");
 3231         for (slot = sc->rxslot ; slot < &sc->rxslot[sc->en_nrx]; slot++) {
 3232                 printf("rx%td: start/stop/cur=0x%x/0x%x/0x%x mode=0x%x ",
 3233                     slot - sc->rxslot, slot->start, slot->stop, slot->cur,
 3234                     slot->mode);
 3235                 if (slot->vcc != NULL) {
 3236                         printf("vci=%u\n", slot->vcc->vcc.vci);
 3237                         printf("RXHW: mode=0x%x, DST_RP=0x%x, WP_ST_CNT=0x%x\n",
 3238                             en_read(sc, MID_VC(slot->vcc->vcc.vci)),
 3239                             en_read(sc, MID_DST_RP(slot->vcc->vcc.vci)),
 3240                             en_read(sc, MID_WP_ST_CNT(slot->vcc->vcc.vci)));
 3241                 }
 3242         }
 3243 }
 3244 
 3245 /*
 3246  * This is only correct for non-adaptec adapters
 3247  */
 3248 static void
 3249 en_dump_dtqs(struct en_softc *sc)
 3250 {
 3251         uint32_t ptr, reg;
 3252 
 3253         printf("  dtq [need_dtqs=%d,dtq_free=%d]:\n", sc->need_dtqs,
 3254             sc->dtq_free);
 3255         ptr = sc->dtq_chip;
 3256         while (ptr != sc->dtq_us) {
 3257                 reg = en_read(sc, ptr);
 3258                 printf("\t0x%x=[%#x cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n", 
 3259                     sc->dtq[MID_DTQ_A2REG(ptr)], reg, MID_DMA_CNT(reg),
 3260                     MID_DMA_TXCHAN(reg), (reg & MID_DMA_END) != 0,
 3261                     MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
 3262                 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, ptr, 8);
 3263         }
 3264 }
 3265 
 3266 static void
 3267 en_dump_drqs(struct en_softc *sc)
 3268 {
 3269         uint32_t ptr, reg;
 3270 
 3271         printf("  drq [need_drqs=%d,drq_free=%d]:\n", sc->need_drqs,
 3272             sc->drq_free);
 3273         ptr = sc->drq_chip;
 3274         while (ptr != sc->drq_us) {
 3275                 reg = en_read(sc, ptr);
 3276                 printf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n", 
 3277                     sc->drq[MID_DRQ_A2REG(ptr)], MID_DMA_CNT(reg),
 3278                     MID_DMA_RXVCI(reg), (reg & MID_DMA_END) != 0,
 3279                     MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
 3280                 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, ptr, 8);
 3281         }
 3282 }
 3283 
 3284 /* Do not staticize - meant for calling from DDB! */
 3285 int
 3286 en_dump(int unit, int level)
 3287 {
 3288         struct en_softc *sc;
 3289         int lcv, cnt;
 3290         devclass_t dc;
 3291         int maxunit;
 3292 
 3293         dc = devclass_find("en");
 3294         if (dc == NULL) {
 3295                 printf("%s: can't find devclass!\n", __func__);
 3296                 return (0);
 3297         }
 3298         maxunit = devclass_get_maxunit(dc);
 3299         for (lcv = 0 ; lcv < maxunit ; lcv++) {
 3300                 sc = devclass_get_softc(dc, lcv);
 3301                 if (sc == NULL)
 3302                         continue;
 3303                 if (unit != -1 && unit != lcv)
 3304                         continue;
 3305 
 3306                 device_printf(sc->dev, "dumping device at level 0x%b\n",
 3307                     level, END_BITS);
 3308 
 3309                 if (sc->dtq_us == 0) {
 3310                         printf("<hasn't been en_init'd yet>\n");
 3311                         continue;
 3312                 }
 3313 
 3314                 if (level & END_STATS)
 3315                         en_dump_stats(&sc->stats);
 3316                 if (level & END_MREGS)
 3317                         en_dump_mregs(sc);
 3318                 if (level & END_TX)
 3319                         en_dump_tx(sc);
 3320                 if (level & END_RX)
 3321                         en_dump_rx(sc);
 3322                 if (level & END_DTQ)
 3323                         en_dump_dtqs(sc);
 3324                 if (level & END_DRQ)
 3325                         en_dump_drqs(sc);
 3326 
 3327                 if (level & END_SWSL) {
 3328                         printf(" swslist [size=%d]: ", sc->swsl_size);
 3329                         for (cnt = sc->swsl_head ; cnt != sc->swsl_tail ; 
 3330                             cnt = (cnt + 1) % MID_SL_N)
 3331                                 printf("0x%x ", sc->swslist[cnt]);
 3332                         printf("\n");
 3333                 }
 3334         }
 3335         return (0);
 3336 }
 3337 
 3338 /*
 3339  * en_dumpmem: dump the memory
 3340  *
 3341  * Do not staticize - meant for calling from DDB!
 3342  */
 3343 int
 3344 en_dumpmem(int unit, int addr, int len)
 3345 {
 3346         struct en_softc *sc;
 3347         uint32_t reg;
 3348         devclass_t dc;
 3349 
 3350         dc = devclass_find("en");
 3351         if (dc == NULL) {
 3352                 printf("%s: can't find devclass\n", __func__);
 3353                 return (0);
 3354         }
 3355         sc = devclass_get_softc(dc, unit);
 3356         if (sc == NULL) {
 3357                 printf("%s: invalid unit number: %d\n", __func__, unit);
 3358                 return (0);
 3359         }
 3360 
 3361         addr = addr & ~3;
 3362         if (addr < MID_RAMOFF || addr + len * 4 > MID_MAXOFF || len <= 0) {
 3363                 printf("invalid addr/len number: %d, %d\n", addr, len);
 3364                 return (0);
 3365         }
 3366         printf("dumping %d words starting at offset 0x%x\n", len, addr);
 3367         while (len--) {
 3368                 reg = en_read(sc, addr);
 3369                 printf("mem[0x%x] = 0x%x\n", addr, reg);
 3370                 addr += 4;
 3371         }
 3372         return (0);
 3373 }
 3374 #endif

Cache object: 399693f342b7bfad2f904f9f1c961215


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.