The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgb/cxgb_sge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2 
    3 Copyright (c) 2007, Chelsio Inc.
    4 All rights reserved.
    5 
    6 Redistribution and use in source and binary forms, with or without
    7 modification, are permitted provided that the following conditions are met:
    8 
    9  1. Redistributions of source code must retain the above copyright notice,
   10     this list of conditions and the following disclaimer.
   11 
   12  2. Neither the name of the Chelsio Corporation nor the names of its
   13     contributors may be used to endorse or promote products derived from
   14     this software without specific prior written permission.
   15  
   16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26 POSSIBILITY OF SUCH DAMAGE.
   27 
   28 ***************************************************************************/
   29 #define DEBUG_BUFRING
   30 
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/kernel.h>
   38 #include <sys/module.h>
   39 #include <sys/bus.h>
   40 #include <sys/conf.h>
   41 #include <machine/bus.h>
   42 #include <machine/resource.h>
   43 #include <sys/bus_dma.h>
   44 #include <sys/rman.h>
   45 #include <sys/queue.h>
   46 #include <sys/sysctl.h>
   47 #include <sys/taskqueue.h>
   48 
   49 #include <sys/proc.h>
   50 #include <sys/sbuf.h>
   51 #include <sys/sched.h>
   52 #include <sys/smp.h>
   53 #include <sys/systm.h>
   54 #include <sys/syslog.h>
   55 
   56 #include <netinet/in_systm.h>
   57 #include <netinet/in.h>
   58 #include <netinet/ip.h>
   59 #include <netinet/tcp.h>
   60 
   61 #include <dev/pci/pcireg.h>
   62 #include <dev/pci/pcivar.h>
   63 
   64 #include <vm/vm.h>
   65 #include <vm/pmap.h>
   66 
   67 #ifdef CONFIG_DEFINED
   68 #include <cxgb_include.h>
   69 #include <sys/mvec.h>
   70 #else
   71 #include <dev/cxgb/cxgb_include.h>
   72 #include <dev/cxgb/sys/mvec.h>
   73 #endif
   74 
   75 int      txq_fills = 0;
   76 /*
   77  * XXX don't re-enable this until TOE stops assuming
   78  * we have an m_ext
   79  */
   80 static int recycle_enable = 0;
   81 extern int cxgb_txq_buf_ring_size;
   82 int cxgb_cached_allocations;
   83 int cxgb_cached;
   84 int cxgb_ext_freed = 0;
   85 int cxgb_ext_inited = 0;
   86 int fl_q_size = 0;
   87 int jumbo_q_size = 0;
   88 
   89 extern int cxgb_use_16k_clusters;
   90 extern int cxgb_pcpu_cache_enable;
   91 extern int nmbjumbop;
   92 extern int nmbjumbo9;
   93 extern int nmbjumbo16;
   94 
   95 
   96 
   97 
   98 #define USE_GTS 0
   99 
  100 #define SGE_RX_SM_BUF_SIZE      1536
  101 #define SGE_RX_DROP_THRES       16
  102 #define SGE_RX_COPY_THRES       128
  103 
  104 /*
  105  * Period of the Tx buffer reclaim timer.  This timer does not need to run
  106  * frequently as Tx buffers are usually reclaimed by new Tx packets.
  107  */
  108 #define TX_RECLAIM_PERIOD       (hz >> 1)
  109 
  110 /* 
  111  * Values for sge_txq.flags
  112  */
  113 enum {
  114         TXQ_RUNNING     = 1 << 0,  /* fetch engine is running */
  115         TXQ_LAST_PKT_DB = 1 << 1,  /* last packet rang the doorbell */
  116 };
  117 
  118 struct tx_desc {
  119         uint64_t        flit[TX_DESC_FLITS];
  120 } __packed;
  121 
  122 struct rx_desc {
  123         uint32_t        addr_lo;
  124         uint32_t        len_gen;
  125         uint32_t        gen2;
  126         uint32_t        addr_hi;
  127 } __packed;;
  128 
  129 struct rsp_desc {               /* response queue descriptor */
  130         struct rss_header       rss_hdr;
  131         uint32_t                flags;
  132         uint32_t                len_cq;
  133         uint8_t                 imm_data[47];
  134         uint8_t                 intr_gen;
  135 } __packed;
  136 
  137 #define RX_SW_DESC_MAP_CREATED  (1 << 0)
  138 #define TX_SW_DESC_MAP_CREATED  (1 << 1)
  139 #define RX_SW_DESC_INUSE        (1 << 3)
  140 #define TX_SW_DESC_MAPPED       (1 << 4)
  141 
  142 #define RSPQ_NSOP_NEOP           G_RSPD_SOP_EOP(0)
  143 #define RSPQ_EOP                 G_RSPD_SOP_EOP(F_RSPD_EOP)
  144 #define RSPQ_SOP                 G_RSPD_SOP_EOP(F_RSPD_SOP)
  145 #define RSPQ_SOP_EOP             G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
  146 
  147 struct tx_sw_desc {                /* SW state per Tx descriptor */
  148         struct mbuf_iovec mi;
  149         bus_dmamap_t    map;
  150         int             flags;
  151 };
  152 
  153 struct rx_sw_desc {                /* SW state per Rx descriptor */
  154         caddr_t          rxsd_cl;
  155         caddr_t          data;
  156         bus_dmamap_t      map;
  157         int               flags;
  158 };
  159 
  160 struct txq_state {
  161         unsigned int compl;
  162         unsigned int gen;
  163         unsigned int pidx;
  164 };
  165 
  166 struct refill_fl_cb_arg {
  167         int               error;
  168         bus_dma_segment_t seg;
  169         int               nseg;
  170 };
  171 
  172 /*
  173  * Maps a number of flits to the number of Tx descriptors that can hold them.
  174  * The formula is
  175  *
  176  * desc = 1 + (flits - 2) / (WR_FLITS - 1).
  177  *
  178  * HW allows up to 4 descriptors to be combined into a WR.
  179  */
  180 static uint8_t flit_desc_map[] = {
  181         0,
  182 #if SGE_NUM_GENBITS == 1
  183         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  184         2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  185         3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
  186         4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
  187 #elif SGE_NUM_GENBITS == 2
  188         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  189         2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  190         3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
  191         4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  192 #else
  193 # error "SGE_NUM_GENBITS must be 1 or 2"
  194 #endif
  195 };
  196 
  197 
  198 int cxgb_debug = 0;
  199 
  200 static void sge_timer_cb(void *arg);
  201 static void sge_timer_reclaim(void *arg, int ncount);
  202 static void sge_txq_reclaim_handler(void *arg, int ncount);
  203 
  204 /**
  205  *      reclaim_completed_tx - reclaims completed Tx descriptors
  206  *      @adapter: the adapter
  207  *      @q: the Tx queue to reclaim completed descriptors from
  208  *
  209  *      Reclaims Tx descriptors that the SGE has indicated it has processed,
  210  *      and frees the associated buffers if possible.  Called with the Tx
  211  *      queue's lock held.
  212  */
  213 static __inline int
  214 reclaim_completed_tx_(struct sge_txq *q, int reclaim_min)
  215 {
  216         int reclaim = desc_reclaimable(q);
  217 
  218         if (reclaim < reclaim_min)
  219                 return (0);
  220         
  221         mtx_assert(&q->lock, MA_OWNED);
  222         if (reclaim > 0) {
  223                 t3_free_tx_desc(q, reclaim);
  224                 q->cleaned += reclaim;
  225                 q->in_use -= reclaim;
  226         } 
  227         return (reclaim);
  228 }
  229 
  230 /**
  231  *      should_restart_tx - are there enough resources to restart a Tx queue?
  232  *      @q: the Tx queue
  233  *
  234  *      Checks if there are enough descriptors to restart a suspended Tx queue.
  235  */
  236 static __inline int
  237 should_restart_tx(const struct sge_txq *q)
  238 {
  239         unsigned int r = q->processed - q->cleaned;
  240 
  241         return q->in_use - r < (q->size >> 1);
  242 }
  243 
  244 /**
  245  *      t3_sge_init - initialize SGE
  246  *      @adap: the adapter
  247  *      @p: the SGE parameters
  248  *
  249  *      Performs SGE initialization needed every time after a chip reset.
  250  *      We do not initialize any of the queue sets here, instead the driver
  251  *      top-level must request those individually.  We also do not enable DMA
  252  *      here, that should be done after the queues have been set up.
  253  */
  254 void
  255 t3_sge_init(adapter_t *adap, struct sge_params *p)
  256 {
  257         u_int ctrl, ups;
  258 
  259         ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
  260 
  261         ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
  262                F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
  263                V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
  264                V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
  265 #if SGE_NUM_GENBITS == 1
  266         ctrl |= F_EGRGENCTRL;
  267 #endif
  268         if (adap->params.rev > 0) {
  269                 if (!(adap->flags & (USING_MSIX | USING_MSI)))
  270                         ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
  271         }
  272         t3_write_reg(adap, A_SG_CONTROL, ctrl);
  273         t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
  274                      V_LORCQDRBTHRSH(512));
  275         t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
  276         t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
  277                      V_TIMEOUT(200 * core_ticks_per_usec(adap)));
  278         t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
  279                      adap->params.rev < T3_REV_C ? 1000 : 500);
  280         t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
  281         t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
  282         t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
  283         t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
  284         t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
  285 }
  286 
  287 
  288 /**
  289  *      sgl_len - calculates the size of an SGL of the given capacity
  290  *      @n: the number of SGL entries
  291  *
  292  *      Calculates the number of flits needed for a scatter/gather list that
  293  *      can hold the given number of entries.
  294  */
  295 static __inline unsigned int
  296 sgl_len(unsigned int n)
  297 {
  298         return ((3 * n) / 2 + (n & 1));
  299 }
  300 
  301 /**
  302  *      get_imm_packet - return the next ingress packet buffer from a response
  303  *      @resp: the response descriptor containing the packet data
  304  *
  305  *      Return a packet containing the immediate data of the given response.
  306  */
  307 static int
  308 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
  309 {
  310 
  311         m->m_len = m->m_pkthdr.len = IMMED_PKT_SIZE;
  312         m->m_ext.ext_buf = NULL;
  313         m->m_ext.ext_type = 0;
  314         memcpy(mtod(m, uint8_t *), resp->imm_data, IMMED_PKT_SIZE); 
  315         return (0);     
  316 }
  317 
  318 static __inline u_int
  319 flits_to_desc(u_int n)
  320 {
  321         return (flit_desc_map[n]);
  322 }
  323 
  324 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
  325                     F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
  326                     V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
  327                     F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
  328                     F_HIRCQPARITYERROR)
  329 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
  330 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
  331                       F_RSPQDISABLED)
  332 
  333 /**
  334  *      t3_sge_err_intr_handler - SGE async event interrupt handler
  335  *      @adapter: the adapter
  336  *
  337  *      Interrupt handler for SGE asynchronous (non-data) events.
  338  */
  339 void
  340 t3_sge_err_intr_handler(adapter_t *adapter)
  341 {
  342         unsigned int v, status;
  343 
  344         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
  345         if (status & SGE_PARERR)
  346                 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
  347                          status & SGE_PARERR);
  348         if (status & SGE_FRAMINGERR)
  349                 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
  350                          status & SGE_FRAMINGERR);
  351         if (status & F_RSPQCREDITOVERFOW)
  352                 CH_ALERT(adapter, "SGE response queue credit overflow\n");
  353 
  354         if (status & F_RSPQDISABLED) {
  355                 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
  356 
  357                 CH_ALERT(adapter,
  358                          "packet delivered to disabled response queue (0x%x)\n",
  359                          (v >> S_RSPQ0DISABLED) & 0xff);
  360         }
  361 
  362         t3_write_reg(adapter, A_SG_INT_CAUSE, status);
  363         if (status & SGE_FATALERR)
  364                 t3_fatal_err(adapter);
  365 }
  366 
  367 void
  368 t3_sge_prep(adapter_t *adap, struct sge_params *p)
  369 {
  370         int i, nqsets;
  371 
  372         nqsets = min(SGE_QSETS, mp_ncpus*4);
  373 
  374         fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
  375 
  376         while (!powerof2(fl_q_size))
  377                 fl_q_size--;
  378 #if __FreeBSD_version > 700000
  379         if (cxgb_use_16k_clusters) 
  380                 jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
  381         else
  382                 jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
  383 #else
  384         jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
  385 #endif
  386         while (!powerof2(jumbo_q_size))
  387                 jumbo_q_size--;         
  388         
  389         /* XXX Does ETHER_ALIGN need to be accounted for here? */
  390         p->max_pkt_size = adap->sge.qs[0].fl[1].buf_size - sizeof(struct cpl_rx_data);
  391 
  392         for (i = 0; i < SGE_QSETS; ++i) {
  393                 struct qset_params *q = p->qset + i;
  394 
  395                 if (adap->params.nports > 2) {
  396                         q->coalesce_usecs = 50;
  397                 } else {
  398 #ifdef INVARIANTS                       
  399                         q->coalesce_usecs = 10;
  400 #else
  401                         q->coalesce_usecs = 5;
  402 #endif                  
  403                 }
  404                 q->polling = 0;
  405                 q->rspq_size = RSPQ_Q_SIZE;
  406                 q->fl_size = fl_q_size;
  407                 q->jumbo_size = jumbo_q_size;
  408                 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
  409                 q->txq_size[TXQ_OFLD] = 1024;
  410                 q->txq_size[TXQ_CTRL] = 256;
  411                 q->cong_thres = 0;
  412         }
  413 }
  414 
  415 int
  416 t3_sge_alloc(adapter_t *sc)
  417 {
  418 
  419         /* The parent tag. */
  420         if (bus_dma_tag_create( NULL,                   /* parent */
  421                                 1, 0,                   /* algnmnt, boundary */
  422                                 BUS_SPACE_MAXADDR,      /* lowaddr */
  423                                 BUS_SPACE_MAXADDR,      /* highaddr */
  424                                 NULL, NULL,             /* filter, filterarg */
  425                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
  426                                 BUS_SPACE_UNRESTRICTED, /* nsegments */
  427                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
  428                                 0,                      /* flags */
  429                                 NULL, NULL,             /* lock, lockarg */
  430                                 &sc->parent_dmat)) {
  431                 device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
  432                 return (ENOMEM);
  433         }
  434 
  435         /*
  436          * DMA tag for normal sized RX frames
  437          */
  438         if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
  439                 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
  440                 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
  441                 device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
  442                 return (ENOMEM);
  443         }
  444 
  445         /* 
  446          * DMA tag for jumbo sized RX frames.
  447          */
  448         if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
  449                 BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
  450                 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
  451                 device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
  452                 return (ENOMEM);
  453         }
  454 
  455         /* 
  456          * DMA tag for TX frames.
  457          */
  458         if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
  459                 BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
  460                 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
  461                 NULL, NULL, &sc->tx_dmat)) {
  462                 device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
  463                 return (ENOMEM);
  464         }
  465 
  466         return (0);
  467 }
  468 
  469 int
  470 t3_sge_free(struct adapter * sc)
  471 {
  472 
  473         if (sc->tx_dmat != NULL)
  474                 bus_dma_tag_destroy(sc->tx_dmat);
  475 
  476         if (sc->rx_jumbo_dmat != NULL)
  477                 bus_dma_tag_destroy(sc->rx_jumbo_dmat);
  478 
  479         if (sc->rx_dmat != NULL)
  480                 bus_dma_tag_destroy(sc->rx_dmat);
  481 
  482         if (sc->parent_dmat != NULL)
  483                 bus_dma_tag_destroy(sc->parent_dmat);
  484 
  485         return (0);
  486 }
  487 
  488 void
  489 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  490 {
  491 
  492         qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
  493         qs->rspq.polling = 0 /* p->polling */;
  494 }
  495 
  496 #if !defined(__i386__) && !defined(__amd64__)
  497 static void
  498 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  499 {
  500         struct refill_fl_cb_arg *cb_arg = arg;
  501         
  502         cb_arg->error = error;
  503         cb_arg->seg = segs[0];
  504         cb_arg->nseg = nseg;
  505 
  506 }
  507 #endif
  508 /**
  509  *      refill_fl - refill an SGE free-buffer list
  510  *      @sc: the controller softc
  511  *      @q: the free-list to refill
  512  *      @n: the number of new buffers to allocate
  513  *
  514  *      (Re)populate an SGE free-buffer list with up to @n new packet buffers.
  515  *      The caller must assure that @n does not exceed the queue's capacity.
  516  */
  517 static void
  518 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
  519 {
  520         struct rx_sw_desc *sd = &q->sdesc[q->pidx];
  521         struct rx_desc *d = &q->desc[q->pidx];
  522         struct refill_fl_cb_arg cb_arg;
  523         caddr_t cl;
  524         int err, count = 0;
  525         int header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) + sizeof(struct m_ext_) + sizeof(uint32_t);
  526         
  527         cb_arg.error = 0;
  528         while (n--) {
  529                 /*
  530                  * We only allocate a cluster, mbuf allocation happens after rx
  531                  */
  532                 if ((cl = cxgb_cache_get(q->zone)) == NULL) {
  533                         log(LOG_WARNING, "Failed to allocate cluster\n");
  534                         goto done;
  535                 }
  536                 
  537                 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
  538                         if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
  539                                 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
  540                                 uma_zfree(q->zone, cl);
  541                                 goto done;
  542                         }
  543                         sd->flags |= RX_SW_DESC_MAP_CREATED;
  544                 }
  545 #if !defined(__i386__) && !defined(__amd64__)
  546                 err = bus_dmamap_load(q->entry_tag, sd->map,
  547                     cl + header_size, q->buf_size,
  548                     refill_fl_cb, &cb_arg, 0);
  549                 
  550                 if (err != 0 || cb_arg.error) {
  551                         log(LOG_WARNING, "failure in refill_fl %d\n", cb_arg.error);
  552                         /*
  553                          * XXX free cluster
  554                          */
  555                         return;
  556                 }
  557 #else
  558                 cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)(cl + header_size));
  559 #endif          
  560                 sd->flags |= RX_SW_DESC_INUSE;
  561                 sd->rxsd_cl = cl;
  562                 sd->data = cl + header_size;
  563                 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
  564                 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
  565                 d->len_gen = htobe32(V_FLD_GEN1(q->gen));
  566                 d->gen2 = htobe32(V_FLD_GEN2(q->gen));
  567 
  568                 d++;
  569                 sd++;
  570 
  571                 if (++q->pidx == q->size) {
  572                         q->pidx = 0;
  573                         q->gen ^= 1;
  574                         sd = q->sdesc;
  575                         d = q->desc;
  576                 }
  577                 q->credits++;
  578                 count++;
  579         }
  580 
  581 done:
  582         if (count)
  583                 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
  584 }
  585 
  586 
  587 /**
  588  *      free_rx_bufs - free the Rx buffers on an SGE free list
  589  *      @sc: the controle softc
  590  *      @q: the SGE free list to clean up
  591  *
  592  *      Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
  593  *      this queue should be stopped before calling this function.
  594  */
  595 static void
  596 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
  597 {
  598         u_int cidx = q->cidx;
  599 
  600         while (q->credits--) {
  601                 struct rx_sw_desc *d = &q->sdesc[cidx];
  602 
  603                 if (d->flags & RX_SW_DESC_INUSE) {
  604                         bus_dmamap_unload(q->entry_tag, d->map);
  605                         bus_dmamap_destroy(q->entry_tag, d->map);
  606                         uma_zfree(q->zone, d->rxsd_cl);
  607                 }
  608                 d->rxsd_cl = NULL;
  609                 if (++cidx == q->size)
  610                         cidx = 0;
  611         }
  612 }
  613 
  614 static __inline void
  615 __refill_fl(adapter_t *adap, struct sge_fl *fl)
  616 {
  617         refill_fl(adap, fl, min(16U, fl->size - fl->credits));
  618 }
  619 
  620 static __inline void
  621 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
  622 {
  623         if ((fl->size - fl->credits) < max)
  624                 refill_fl(adap, fl, min(max, fl->size - fl->credits));
  625 }
  626 
  627 void
  628 refill_fl_service(adapter_t *adap, struct sge_fl *fl)
  629 {
  630         __refill_fl_lt(adap, fl, 512);
  631 }
  632 
  633 /**
  634  *      recycle_rx_buf - recycle a receive buffer
  635  *      @adapter: the adapter
  636  *      @q: the SGE free list
  637  *      @idx: index of buffer to recycle
  638  *
  639  *      Recycles the specified buffer on the given free list by adding it at
  640  *      the next available slot on the list.
  641  */
  642 static void
  643 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
  644 {
  645         struct rx_desc *from = &q->desc[idx];
  646         struct rx_desc *to   = &q->desc[q->pidx];
  647 
  648         q->sdesc[q->pidx] = q->sdesc[idx];
  649         to->addr_lo = from->addr_lo;        // already big endian
  650         to->addr_hi = from->addr_hi;        // likewise
  651         wmb();
  652         to->len_gen = htobe32(V_FLD_GEN1(q->gen));
  653         to->gen2 = htobe32(V_FLD_GEN2(q->gen));
  654         q->credits++;
  655 
  656         if (++q->pidx == q->size) {
  657                 q->pidx = 0;
  658                 q->gen ^= 1;
  659         }
  660         t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
  661 }
  662 
  663 static void
  664 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  665 {
  666         uint32_t *addr;
  667 
  668         addr = arg;
  669         *addr = segs[0].ds_addr;
  670 }
  671 
  672 static int
  673 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
  674     bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
  675     bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
  676 {
  677         size_t len = nelem * elem_size;
  678         void *s = NULL;
  679         void *p = NULL;
  680         int err;
  681 
  682         if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
  683                                       BUS_SPACE_MAXADDR_32BIT,
  684                                       BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
  685                                       len, 0, NULL, NULL, tag)) != 0) {
  686                 device_printf(sc->dev, "Cannot allocate descriptor tag\n");
  687                 return (ENOMEM);
  688         }
  689 
  690         if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
  691                                     map)) != 0) {
  692                 device_printf(sc->dev, "Cannot allocate descriptor memory\n");
  693                 return (ENOMEM);
  694         }
  695 
  696         bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
  697         bzero(p, len);
  698         *(void **)desc = p;
  699 
  700         if (sw_size) {
  701                 len = nelem * sw_size;
  702                 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
  703                 *(void **)sdesc = s;
  704         }
  705         if (parent_entry_tag == NULL)
  706                 return (0);
  707             
  708         if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
  709                                       BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
  710                                       NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
  711                                       TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
  712                                       NULL, NULL, entry_tag)) != 0) {
  713                 device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
  714                 return (ENOMEM);
  715         }
  716         return (0);
  717 }
  718 
  719 static void
  720 sge_slow_intr_handler(void *arg, int ncount)
  721 {
  722         adapter_t *sc = arg;
  723 
  724         t3_slow_intr_handler(sc);
  725 }
  726 
  727 /**
  728  *      sge_timer_cb - perform periodic maintenance of an SGE qset
  729  *      @data: the SGE queue set to maintain
  730  *
  731  *      Runs periodically from a timer to perform maintenance of an SGE queue
  732  *      set.  It performs two tasks:
  733  *
  734  *      a) Cleans up any completed Tx descriptors that may still be pending.
  735  *      Normal descriptor cleanup happens when new packets are added to a Tx
  736  *      queue so this timer is relatively infrequent and does any cleanup only
  737  *      if the Tx queue has not seen any new packets in a while.  We make a
  738  *      best effort attempt to reclaim descriptors, in that we don't wait
  739  *      around if we cannot get a queue's lock (which most likely is because
  740  *      someone else is queueing new packets and so will also handle the clean
  741  *      up).  Since control queues use immediate data exclusively we don't
  742  *      bother cleaning them up here.
  743  *
  744  *      b) Replenishes Rx queues that have run out due to memory shortage.
  745  *      Normally new Rx buffers are added when existing ones are consumed but
  746  *      when out of memory a queue can become empty.  We try to add only a few
  747  *      buffers here, the queue will be replenished fully as these new buffers
  748  *      are used up if memory shortage has subsided.
  749  *      
  750  *      c) Return coalesced response queue credits in case a response queue is
  751  *      starved.
  752  *
  753  *      d) Ring doorbells for T304 tunnel queues since we have seen doorbell 
  754  *      fifo overflows and the FW doesn't implement any recovery scheme yet.
  755  */
  756 static void
  757 sge_timer_cb(void *arg)
  758 {
  759         adapter_t *sc = arg;
  760 #ifndef IFNET_MULTIQUEUE        
  761         struct port_info *pi;
  762         struct sge_qset *qs;
  763         struct sge_txq  *txq;
  764         int i, j;
  765         int reclaim_ofl, refill_rx;
  766 
  767         for (i = 0; i < sc->params.nports; i++) {
  768                 pi = &sc->port[i];
  769                 for (j = 0; j < pi->nqsets; j++) {
  770                         qs = &sc->sge.qs[pi->first_qset + j];
  771                         txq = &qs->txq[0];
  772                         reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
  773                         refill_rx = ((qs->fl[0].credits < qs->fl[0].size) || 
  774                             (qs->fl[1].credits < qs->fl[1].size));
  775                         if (reclaim_ofl || refill_rx) {
  776                                 taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
  777                                 break;
  778                         }
  779                 }
  780         }
  781 #endif
  782         if (sc->params.nports > 2) {
  783                 int i;
  784 
  785                 for_each_port(sc, i) {
  786                         struct port_info *pi = &sc->port[i];
  787 
  788                         t3_write_reg(sc, A_SG_KDOORBELL, 
  789                                      F_SELEGRCNTX | 
  790                                      (FW_TUNNEL_SGEEC_START + pi->first_qset));
  791                 }
  792         }       
  793         if (sc->open_device_map != 0) 
  794                 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
  795 }
  796 
  797 /*
  798  * This is meant to be a catch-all function to keep sge state private
  799  * to sge.c
  800  *
  801  */
  802 int
  803 t3_sge_init_adapter(adapter_t *sc)
  804 {
  805         callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE);
  806         callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
  807         TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
  808         mi_init();
  809         cxgb_cache_init();
  810         return (0);
  811 }
  812 
  813 int
  814 t3_sge_reset_adapter(adapter_t *sc)
  815 {
  816         callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
  817         return (0);
  818 }
  819 
  820 int
  821 t3_sge_init_port(struct port_info *pi)
  822 {
  823         TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
  824         return (0);
  825 }
  826 
  827 void
  828 t3_sge_deinit_sw(adapter_t *sc)
  829 {
  830 
  831         mi_deinit();
  832 }
  833 
  834 /**
  835  *      refill_rspq - replenish an SGE response queue
  836  *      @adapter: the adapter
  837  *      @q: the response queue to replenish
  838  *      @credits: how many new responses to make available
  839  *
  840  *      Replenishes a response queue by making the supplied number of responses
  841  *      available to HW.
  842  */
  843 static __inline void
  844 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
  845 {
  846 
  847         /* mbufs are allocated on demand when a rspq entry is processed. */
  848         t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
  849                      V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
  850 }
  851 
  852 static __inline void
  853 sge_txq_reclaim_(struct sge_txq *txq, int force)
  854 {
  855 
  856         if (desc_reclaimable(txq) < 16)
  857                 return;
  858         if (mtx_trylock(&txq->lock) == 0) 
  859                 return;
  860         reclaim_completed_tx_(txq, 16);
  861         mtx_unlock(&txq->lock);
  862 
  863 }
  864 
  865 static void
  866 sge_txq_reclaim_handler(void *arg, int ncount)
  867 {
  868         struct sge_txq *q = arg;
  869 
  870         sge_txq_reclaim_(q, TRUE);
  871 }
  872 
  873 
  874 
  875 static void
  876 sge_timer_reclaim(void *arg, int ncount)
  877 {
  878         struct port_info *pi = arg;
  879         int i, nqsets = pi->nqsets;
  880         adapter_t *sc = pi->adapter;
  881         struct sge_qset *qs;
  882         struct sge_txq *txq;
  883         struct mtx *lock;
  884 
  885 #ifdef IFNET_MULTIQUEUE
  886         panic("%s should not be called with multiqueue support\n", __FUNCTION__);
  887 #endif 
  888         for (i = 0; i < nqsets; i++) {
  889                 qs = &sc->sge.qs[pi->first_qset + i];
  890 
  891                 txq = &qs->txq[TXQ_OFLD];
  892                 sge_txq_reclaim_(txq, FALSE);
  893                 
  894                 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
  895                             &sc->sge.qs[0].rspq.lock;
  896 
  897                 if (mtx_trylock(lock)) {
  898                         /* XXX currently assume that we are *NOT* polling */
  899                         uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
  900 
  901                         if (qs->fl[0].credits < qs->fl[0].size - 16)
  902                                 __refill_fl(sc, &qs->fl[0]);
  903                         if (qs->fl[1].credits < qs->fl[1].size - 16)
  904                                 __refill_fl(sc, &qs->fl[1]);
  905                         
  906                         if (status & (1 << qs->rspq.cntxt_id)) {
  907                                 if (qs->rspq.credits) {
  908                                         refill_rspq(sc, &qs->rspq, 1);
  909                                         qs->rspq.credits--;
  910                                         t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, 
  911                                             1 << qs->rspq.cntxt_id);
  912                                 }
  913                         }
  914                         mtx_unlock(lock);
  915                 }
  916         }
  917 }
  918 
  919 /**
  920  *      init_qset_cntxt - initialize an SGE queue set context info
  921  *      @qs: the queue set
  922  *      @id: the queue set id
  923  *
  924  *      Initializes the TIDs and context ids for the queues of a queue set.
  925  */
  926 static void
  927 init_qset_cntxt(struct sge_qset *qs, u_int id)
  928 {
  929 
  930         qs->rspq.cntxt_id = id;
  931         qs->fl[0].cntxt_id = 2 * id;
  932         qs->fl[1].cntxt_id = 2 * id + 1;
  933         qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
  934         qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
  935         qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
  936         qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
  937         qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
  938 
  939         mbufq_init(&qs->txq[TXQ_ETH].sendq);
  940         mbufq_init(&qs->txq[TXQ_OFLD].sendq);
  941         mbufq_init(&qs->txq[TXQ_CTRL].sendq);
  942 }
  943 
  944 
  945 static void
  946 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
  947 {
  948         txq->in_use += ndesc;
  949         /*
  950          * XXX we don't handle stopping of queue
  951          * presumably start handles this when we bump against the end
  952          */
  953         txqs->gen = txq->gen;
  954         txq->unacked += ndesc;
  955         txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
  956         txq->unacked &= 31;
  957         txqs->pidx = txq->pidx;
  958         txq->pidx += ndesc;
  959 #ifdef INVARIANTS
  960         if (((txqs->pidx > txq->cidx) &&
  961                 (txq->pidx < txqs->pidx) &&
  962                 (txq->pidx >= txq->cidx)) ||
  963             ((txqs->pidx < txq->cidx) &&
  964                 (txq->pidx >= txq-> cidx)) ||
  965             ((txqs->pidx < txq->cidx) &&
  966                 (txq->cidx < txqs->pidx)))
  967                 panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
  968                     txqs->pidx, txq->pidx, txq->cidx);
  969 #endif
  970         if (txq->pidx >= txq->size) {
  971                 txq->pidx -= txq->size;
  972                 txq->gen ^= 1;
  973         }
  974 
  975 }
  976 
  977 /**
  978  *      calc_tx_descs - calculate the number of Tx descriptors for a packet
  979  *      @m: the packet mbufs
  980  *      @nsegs: the number of segments 
  981  *
  982  *      Returns the number of Tx descriptors needed for the given Ethernet
  983  *      packet.  Ethernet packets require addition of WR and CPL headers.
  984  */
  985 static __inline unsigned int
  986 calc_tx_descs(const struct mbuf *m, int nsegs)
  987 {
  988         unsigned int flits;
  989 
  990         if (m->m_pkthdr.len <= WR_LEN - sizeof(struct cpl_tx_pkt))
  991                 return 1;
  992 
  993         flits = sgl_len(nsegs) + 2;
  994 #ifdef TSO_SUPPORTED
  995         if (m->m_pkthdr.csum_flags & CSUM_TSO)
  996                 flits++;
  997 #endif  
  998         return flits_to_desc(flits);
  999 }
 1000 
 1001 static unsigned int
 1002 busdma_map_mbufs(struct mbuf **m, struct sge_txq *txq,
 1003     struct tx_sw_desc *txsd, bus_dma_segment_t *segs, int *nsegs)
 1004 {
 1005         struct mbuf *m0;
 1006         int err, pktlen, pass = 0;
 1007         
 1008 retry:
 1009         err = 0;
 1010         m0 = *m;
 1011         pktlen = m0->m_pkthdr.len;
 1012 #if defined(__i386__) || defined(__amd64__)
 1013         if (busdma_map_sg_collapse(m, segs, nsegs) == 0) {
 1014                 goto done;
 1015         } else
 1016 #endif
 1017                 err = bus_dmamap_load_mbuf_sg(txq->entry_tag, txsd->map, m0, segs, nsegs, 0);
 1018 
 1019         if (err == 0) {
 1020                 goto done;
 1021         }
 1022         if (err == EFBIG && pass == 0) {
 1023                 pass = 1;
 1024                 /* Too many segments, try to defrag */
 1025                 m0 = m_defrag(m0, M_DONTWAIT);
 1026                 if (m0 == NULL) {
 1027                         m_freem(*m);
 1028                         *m = NULL;
 1029                         return (ENOBUFS);
 1030                 }
 1031                 *m = m0;
 1032                 goto retry;
 1033         } else if (err == ENOMEM) {
 1034                 return (err);
 1035         } if (err) {
 1036                 if (cxgb_debug)
 1037                         printf("map failure err=%d pktlen=%d\n", err, pktlen);
 1038                 m_freem(m0);
 1039                 *m = NULL;
 1040                 return (err);
 1041         }
 1042 done:
 1043 #if !defined(__i386__) && !defined(__amd64__)
 1044         bus_dmamap_sync(txq->entry_tag, txsd->map, BUS_DMASYNC_PREWRITE);
 1045 #endif  
 1046         txsd->flags |= TX_SW_DESC_MAPPED;
 1047 
 1048         return (0);
 1049 }
 1050 
 1051 /**
 1052  *      make_sgl - populate a scatter/gather list for a packet
 1053  *      @sgp: the SGL to populate
 1054  *      @segs: the packet dma segments
 1055  *      @nsegs: the number of segments
 1056  *
 1057  *      Generates a scatter/gather list for the buffers that make up a packet
 1058  *      and returns the SGL size in 8-byte words.  The caller must size the SGL
 1059  *      appropriately.
 1060  */
 1061 static __inline void
 1062 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
 1063 {
 1064         int i, idx;
 1065         
 1066         for (idx = 0, i = 0; i < nsegs; i++) {
 1067                 /*
 1068                  * firmware doesn't like empty segments
 1069                  */
 1070                 if (segs[i].ds_len == 0)
 1071                         continue;
 1072                 if (i && idx == 0) 
 1073                         ++sgp;
 1074                 
 1075                 sgp->len[idx] = htobe32(segs[i].ds_len);
 1076                 sgp->addr[idx] = htobe64(segs[i].ds_addr);
 1077                 idx ^= 1;
 1078         }
 1079         
 1080         if (idx) {
 1081                 sgp->len[idx] = 0;
 1082                 sgp->addr[idx] = 0;
 1083         }
 1084 }
 1085         
 1086 /**
 1087  *      check_ring_tx_db - check and potentially ring a Tx queue's doorbell
 1088  *      @adap: the adapter
 1089  *      @q: the Tx queue
 1090  *
 1091  *      Ring the doorbel if a Tx queue is asleep.  There is a natural race,
 1092  *      where the HW is going to sleep just after we checked, however,
 1093  *      then the interrupt handler will detect the outstanding TX packet
 1094  *      and ring the doorbell for us.
 1095  *
 1096  *      When GTS is disabled we unconditionally ring the doorbell.
 1097  */
 1098 static __inline void
 1099 check_ring_tx_db(adapter_t *adap, struct sge_txq *q)
 1100 {
 1101 #if USE_GTS
 1102         clear_bit(TXQ_LAST_PKT_DB, &q->flags);
 1103         if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
 1104                 set_bit(TXQ_LAST_PKT_DB, &q->flags);
 1105 #ifdef T3_TRACE
 1106                 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
 1107                           q->cntxt_id);
 1108 #endif
 1109                 t3_write_reg(adap, A_SG_KDOORBELL,
 1110                              F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 1111         }
 1112 #else
 1113         wmb();            /* write descriptors before telling HW */
 1114         t3_write_reg(adap, A_SG_KDOORBELL,
 1115                      F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 1116 #endif
 1117 }
 1118 
 1119 static __inline void
 1120 wr_gen2(struct tx_desc *d, unsigned int gen)
 1121 {
 1122 #if SGE_NUM_GENBITS == 2
 1123         d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
 1124 #endif
 1125 }
 1126 
 1127 /**
 1128  *      write_wr_hdr_sgl - write a WR header and, optionally, SGL
 1129  *      @ndesc: number of Tx descriptors spanned by the SGL
 1130  *      @txd: first Tx descriptor to be written
 1131  *      @txqs: txq state (generation and producer index)
 1132  *      @txq: the SGE Tx queue
 1133  *      @sgl: the SGL
 1134  *      @flits: number of flits to the start of the SGL in the first descriptor
 1135  *      @sgl_flits: the SGL size in flits
 1136  *      @wr_hi: top 32 bits of WR header based on WR type (big endian)
 1137  *      @wr_lo: low 32 bits of WR header based on WR type (big endian)
 1138  *
 1139  *      Write a work request header and an associated SGL.  If the SGL is
 1140  *      small enough to fit into one Tx descriptor it has already been written
 1141  *      and we just need to write the WR header.  Otherwise we distribute the
 1142  *      SGL across the number of descriptors it spans.
 1143  */
 1144 static void
 1145 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
 1146     const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
 1147     unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
 1148 {
 1149 
 1150         struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
 1151         struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
 1152         
 1153         if (__predict_true(ndesc == 1)) {
 1154                 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
 1155                     V_WR_SGLSFLT(flits)) | wr_hi;
 1156                 wmb();
 1157                 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
 1158                     V_WR_GEN(txqs->gen)) | wr_lo;
 1159                 /* XXX gen? */
 1160                 wr_gen2(txd, txqs->gen);
 1161                 
 1162         } else {
 1163                 unsigned int ogen = txqs->gen;
 1164                 const uint64_t *fp = (const uint64_t *)sgl;
 1165                 struct work_request_hdr *wp = wrp;
 1166                 
 1167                 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
 1168                     V_WR_SGLSFLT(flits)) | wr_hi;
 1169                 
 1170                 while (sgl_flits) {
 1171                         unsigned int avail = WR_FLITS - flits;
 1172 
 1173                         if (avail > sgl_flits)
 1174                                 avail = sgl_flits;
 1175                         memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
 1176                         sgl_flits -= avail;
 1177                         ndesc--;
 1178                         if (!sgl_flits)
 1179                                 break;
 1180                         
 1181                         fp += avail;
 1182                         txd++;
 1183                         txsd++;
 1184                         if (++txqs->pidx == txq->size) {
 1185                                 txqs->pidx = 0;
 1186                                 txqs->gen ^= 1;
 1187                                 txd = txq->desc;
 1188                                 txsd = txq->sdesc;
 1189                         }
 1190                         
 1191                         /*
 1192                          * when the head of the mbuf chain
 1193                          * is freed all clusters will be freed
 1194                          * with it
 1195                          */
 1196                         KASSERT(txsd->mi.mi_base == NULL,
 1197                             ("overwriting valid entry mi_base==%p", txsd->mi.mi_base));
 1198                         wrp = (struct work_request_hdr *)txd;
 1199                         wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
 1200                             V_WR_SGLSFLT(1)) | wr_hi;
 1201                         wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
 1202                                     sgl_flits + 1)) |
 1203                             V_WR_GEN(txqs->gen)) | wr_lo;
 1204                         wr_gen2(txd, txqs->gen);
 1205                         flits = 1;
 1206                 }
 1207                 wrp->wr_hi |= htonl(F_WR_EOP);
 1208                 wmb();
 1209                 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
 1210                 wr_gen2((struct tx_desc *)wp, ogen);
 1211         }
 1212 }
 1213 
 1214 /* sizeof(*eh) + sizeof(*vhdr) + sizeof(*ip) + sizeof(*tcp) */
 1215 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 20 + 20)
 1216 
 1217 #ifdef VLAN_SUPPORTED
 1218 #define GET_VTAG(cntrl, m) \
 1219 do { \
 1220         if ((m)->m_flags & M_VLANTAG)                                               \
 1221                 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
 1222 } while (0)
 1223 
 1224 #define GET_VTAG_MI(cntrl, mi) \
 1225 do { \
 1226         if ((mi)->mi_flags & M_VLANTAG)                                 \
 1227                 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((mi)->mi_ether_vtag); \
 1228 } while (0)
 1229 #else
 1230 #define GET_VTAG(cntrl, m)
 1231 #define GET_VTAG_MI(cntrl, m)
 1232 #endif
 1233 
 1234 int
 1235 t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
 1236 {
 1237         adapter_t *sc;
 1238         struct mbuf *m0;
 1239         struct sge_txq *txq;
 1240         struct txq_state txqs;
 1241         struct port_info *pi;
 1242         unsigned int ndesc, flits, cntrl, mlen;
 1243         int err, nsegs, tso_info = 0;
 1244 
 1245         struct work_request_hdr *wrp;
 1246         struct tx_sw_desc *txsd;
 1247         struct sg_ent *sgp, *sgl;
 1248         uint32_t wr_hi, wr_lo, sgl_flits; 
 1249         bus_dma_segment_t segs[TX_MAX_SEGS];
 1250 
 1251         struct tx_desc *txd;
 1252         struct mbuf_vec *mv;
 1253         struct mbuf_iovec *mi;
 1254                 
 1255         DPRINTF("t3_encap cpu=%d ", curcpu);
 1256 
 1257         mi = NULL;
 1258         pi = qs->port;
 1259         sc = pi->adapter;
 1260         txq = &qs->txq[TXQ_ETH];
 1261         txd = &txq->desc[txq->pidx];
 1262         txsd = &txq->sdesc[txq->pidx];
 1263         sgl = txq->txq_sgl;
 1264         m0 = *m;
 1265         
 1266         DPRINTF("t3_encap port_id=%d qsidx=%d ", pi->port_id, pi->first_qset);
 1267         DPRINTF("mlen=%d txpkt_intf=%d tx_chan=%d\n", m[0]->m_pkthdr.len, pi->txpkt_intf, pi->tx_chan);
 1268         if (cxgb_debug)
 1269                 printf("mi_base=%p cidx=%d pidx=%d\n\n", txsd->mi.mi_base, txq->cidx, txq->pidx);
 1270         
 1271         mtx_assert(&txq->lock, MA_OWNED);
 1272         cntrl = V_TXPKT_INTF(pi->txpkt_intf);
 1273 /*
 1274  * XXX need to add VLAN support for 6.x
 1275  */
 1276 #ifdef VLAN_SUPPORTED
 1277         if  (m0->m_pkthdr.csum_flags & (CSUM_TSO))
 1278                 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
 1279 #endif
 1280         KASSERT(txsd->mi.mi_base == NULL,
 1281             ("overwriting valid entry mi_base==%p", txsd->mi.mi_base));
 1282         if (count > 1) {
 1283                 panic("count > 1 not support in CVS\n");
 1284                 if ((err = busdma_map_sg_vec(m, &m0, segs, count)))
 1285                         return (err);
 1286                 nsegs = count;
 1287         } else if ((err = busdma_map_sg_collapse(&m0, segs, &nsegs))) {
 1288                 if (cxgb_debug)
 1289                         printf("failed ... err=%d\n", err);
 1290                 return (err);
 1291         } 
 1292         KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d count=%d", nsegs, count));
 1293 
 1294         if (!(m0->m_pkthdr.len <= PIO_LEN)) {
 1295                 mi_collapse_mbuf(&txsd->mi, m0);
 1296                 mi = &txsd->mi;
 1297         }
 1298         if (count > 1) {
 1299                 struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
 1300                 int i, fidx;
 1301                 struct mbuf_iovec *batchmi;
 1302 
 1303                 mv = mtomv(m0);
 1304                 batchmi = mv->mv_vec;
 1305                 
 1306                 wrp = (struct work_request_hdr *)txd;
 1307 
 1308                 flits = count*2 + 1;
 1309                 txq_prod(txq, 1, &txqs);
 1310 
 1311                 for (fidx = 1, i = 0; i < count; i++, batchmi++, fidx += 2) {
 1312                         struct cpl_tx_pkt_batch_entry *cbe = &cpl_batch->pkt_entry[i];
 1313 
 1314                         cntrl = V_TXPKT_INTF(pi->txpkt_intf);
 1315                         GET_VTAG_MI(cntrl, batchmi);
 1316                         cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
 1317                         if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
 1318                                 cntrl |= F_TXPKT_IPCSUM_DIS;
 1319                         if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))))
 1320                                 cntrl |= F_TXPKT_L4CSUM_DIS;
 1321                         cbe->cntrl = htonl(cntrl);
 1322                         cbe->len = htonl(batchmi->mi_len | 0x80000000);
 1323                         cbe->addr = htobe64(segs[i].ds_addr);
 1324                         txd->flit[fidx] |= htobe64(1 << 24);
 1325                 }
 1326 
 1327                 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
 1328                     V_WR_SGLSFLT(flits)) | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
 1329                 wmb();
 1330                 wrp->wr_lo = htonl(V_WR_LEN(flits) |
 1331                     V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
 1332                 /* XXX gen? */
 1333                 wr_gen2(txd, txqs.gen);
 1334                 check_ring_tx_db(sc, txq);
 1335                 
 1336                 return (0);             
 1337         } else if (tso_info) {
 1338                 int min_size = TCPPKTHDRSIZE, eth_type, tagged;
 1339                 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
 1340                 struct ip *ip;
 1341                 struct tcphdr *tcp;
 1342                 char *pkthdr;
 1343 
 1344                 txd->flit[2] = 0;
 1345                 GET_VTAG(cntrl, m0);
 1346                 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
 1347                 hdr->cntrl = htonl(cntrl);
 1348                 mlen = m0->m_pkthdr.len;
 1349                 hdr->len = htonl(mlen | 0x80000000);
 1350 
 1351                 DPRINTF("tso buf len=%d\n", mlen);
 1352 
 1353                 tagged = m0->m_flags & M_VLANTAG;
 1354                 if (!tagged)
 1355                         min_size -= ETHER_VLAN_ENCAP_LEN;
 1356 
 1357                 if (__predict_false(mlen < min_size)) {
 1358                         printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%#x,flags=%#x",
 1359                             m0, mlen, m0->m_pkthdr.tso_segsz,
 1360                             m0->m_pkthdr.csum_flags, m0->m_flags);
 1361                         panic("tx tso packet too small");
 1362                 }
 1363 
 1364                 /* Make sure that ether, ip, tcp headers are all in m0 */
 1365                 if (__predict_false(m0->m_len < min_size)) {
 1366                         m0 = m_pullup(m0, min_size);
 1367                         if (__predict_false(m0 == NULL)) {
 1368                                 /* XXX panic probably an overreaction */
 1369                                 panic("couldn't fit header into mbuf");
 1370                         }
 1371                 }
 1372                 pkthdr = m0->m_data;
 1373 
 1374                 if (tagged) {
 1375                         eth_type = CPL_ETH_II_VLAN;
 1376                         ip = (struct ip *)(pkthdr + ETHER_HDR_LEN +
 1377                             ETHER_VLAN_ENCAP_LEN);
 1378                 } else {
 1379                         eth_type = CPL_ETH_II;
 1380                         ip = (struct ip *)(pkthdr + ETHER_HDR_LEN);
 1381                 }
 1382                 tcp = (struct tcphdr *)((uint8_t *)ip +
 1383                     sizeof(*ip)); 
 1384 
 1385                 tso_info |= V_LSO_ETH_TYPE(eth_type) |
 1386                             V_LSO_IPHDR_WORDS(ip->ip_hl) |
 1387                             V_LSO_TCPHDR_WORDS(tcp->th_off);
 1388                 hdr->lso_info = htonl(tso_info);
 1389 
 1390                 if (__predict_false(mlen <= PIO_LEN)) {
 1391                         /* pkt not undersized but fits in PIO_LEN
 1392                          * Indicates a TSO bug at the higher levels.
 1393                          */
 1394                         DPRINTF("**5592 Fix** mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%#x,flags=%#x",
 1395                             m0, mlen, m0->m_pkthdr.tso_segsz, m0->m_pkthdr.csum_flags, m0->m_flags);
 1396                         txq_prod(txq, 1, &txqs);
 1397                         m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
 1398                         m_freem(m0);
 1399                         m0 = NULL;
 1400                         flits = (mlen + 7) / 8 + 3;
 1401                         hdr->wr.wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
 1402                                           V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
 1403                                           F_WR_SOP | F_WR_EOP | txqs.compl);
 1404                         wmb();
 1405                         hdr->wr.wr_lo = htonl(V_WR_LEN(flits) |
 1406                             V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
 1407 
 1408                         wr_gen2(txd, txqs.gen);
 1409                         check_ring_tx_db(sc, txq);
 1410                         return (0);
 1411                 }
 1412                 flits = 3;      
 1413         } else {
 1414                 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
 1415 
 1416                 GET_VTAG(cntrl, m0);
 1417                 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
 1418                 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
 1419                         cntrl |= F_TXPKT_IPCSUM_DIS;
 1420                 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))))
 1421                         cntrl |= F_TXPKT_L4CSUM_DIS;
 1422                 cpl->cntrl = htonl(cntrl);
 1423                 mlen = m0->m_pkthdr.len;
 1424                 cpl->len = htonl(mlen | 0x80000000);
 1425 
 1426                 if (mlen <= PIO_LEN) {
 1427                         txq_prod(txq, 1, &txqs);
 1428                         m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
 1429                         m_freem(m0);
 1430                         m0 = NULL;
 1431                         flits = (mlen + 7) / 8 + 2;
 1432                         cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
 1433                                           V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
 1434                                           F_WR_SOP | F_WR_EOP | txqs.compl);
 1435                         wmb();
 1436                         cpl->wr.wr_lo = htonl(V_WR_LEN(flits) |
 1437                             V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
 1438 
 1439                         wr_gen2(txd, txqs.gen);
 1440                         check_ring_tx_db(sc, txq);
 1441                         DPRINTF("pio buf\n");
 1442                         return (0);
 1443                 }
 1444                 DPRINTF("regular buf\n");
 1445                 flits = 2;
 1446         }
 1447         wrp = (struct work_request_hdr *)txd;
 1448 
 1449 #ifdef  nomore
 1450         /*
 1451          * XXX need to move into one of the helper routines above
 1452          *
 1453          */
 1454         if ((err = busdma_map_mbufs(m, txq, txsd, segs, &nsegs)) != 0) 
 1455                 return (err);
 1456         m0 = *m;
 1457 #endif
 1458         ndesc = calc_tx_descs(m0, nsegs);
 1459         
 1460         sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
 1461         make_sgl(sgp, segs, nsegs);
 1462 
 1463         sgl_flits = sgl_len(nsegs);
 1464 
 1465         DPRINTF("make_sgl success nsegs==%d ndesc==%d\n", nsegs, ndesc);
 1466         txq_prod(txq, ndesc, &txqs);
 1467         wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
 1468         wr_lo = htonl(V_WR_TID(txq->token));
 1469         write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits, sgl_flits, wr_hi, wr_lo);
 1470         check_ring_tx_db(pi->adapter, txq);
 1471 
 1472         if ((m0->m_type == MT_DATA) &&
 1473             ((m0->m_flags & (M_EXT|M_NOFREE)) == M_EXT) &&
 1474             (m0->m_ext.ext_type != EXT_PACKET)) {
 1475                 m0->m_flags &= ~M_EXT ;
 1476                 cxgb_mbufs_outstanding--;
 1477                 m_free(m0);
 1478         }
 1479         
 1480         return (0);
 1481 }
 1482 
 1483 
 1484 /**
 1485  *      write_imm - write a packet into a Tx descriptor as immediate data
 1486  *      @d: the Tx descriptor to write
 1487  *      @m: the packet
 1488  *      @len: the length of packet data to write as immediate data
 1489  *      @gen: the generation bit value to write
 1490  *
 1491  *      Writes a packet as immediate data into a Tx descriptor.  The packet
 1492  *      contains a work request at its beginning.  We must write the packet
 1493  *      carefully so the SGE doesn't read accidentally before it's written in
 1494  *      its entirety.
 1495  */
 1496 static __inline void
 1497 write_imm(struct tx_desc *d, struct mbuf *m,
 1498           unsigned int len, unsigned int gen)
 1499 {
 1500         struct work_request_hdr *from = mtod(m, struct work_request_hdr *);
 1501         struct work_request_hdr *to = (struct work_request_hdr *)d;
 1502 
 1503         if (len > WR_LEN)
 1504                 panic("len too big %d\n", len);
 1505         if (len < sizeof(*from))
 1506                 panic("len too small %d", len);
 1507         
 1508         memcpy(&to[1], &from[1], len - sizeof(*from));
 1509         to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
 1510                                         V_WR_BCNTLFLT(len & 7));
 1511         wmb();
 1512         to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
 1513                                         V_WR_LEN((len + 7) / 8));
 1514         wr_gen2(d, gen);
 1515 
 1516         /*
 1517          * This check is a hack we should really fix the logic so
 1518          * that this can't happen
 1519          */
 1520         if (m->m_type != MT_DONTFREE)
 1521                 m_freem(m);
 1522         
 1523 }
 1524 
 1525 /**
 1526  *      check_desc_avail - check descriptor availability on a send queue
 1527  *      @adap: the adapter
 1528  *      @q: the TX queue
 1529  *      @m: the packet needing the descriptors
 1530  *      @ndesc: the number of Tx descriptors needed
 1531  *      @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
 1532  *
 1533  *      Checks if the requested number of Tx descriptors is available on an
 1534  *      SGE send queue.  If the queue is already suspended or not enough
 1535  *      descriptors are available the packet is queued for later transmission.
 1536  *      Must be called with the Tx queue locked.
 1537  *
 1538  *      Returns 0 if enough descriptors are available, 1 if there aren't
 1539  *      enough descriptors and the packet has been queued, and 2 if the caller
 1540  *      needs to retry because there weren't enough descriptors at the
 1541  *      beginning of the call but some freed up in the mean time.
 1542  */
 1543 static __inline int
 1544 check_desc_avail(adapter_t *adap, struct sge_txq *q,
 1545                  struct mbuf *m, unsigned int ndesc,
 1546                  unsigned int qid)
 1547 {
 1548         /* 
 1549          * XXX We currently only use this for checking the control queue
 1550          * the control queue is only used for binding qsets which happens
 1551          * at init time so we are guaranteed enough descriptors
 1552          */
 1553         if (__predict_false(!mbufq_empty(&q->sendq))) {
 1554 addq_exit:      mbufq_tail(&q->sendq, m);
 1555                 return 1;
 1556         }
 1557         if (__predict_false(q->size - q->in_use < ndesc)) {
 1558 
 1559                 struct sge_qset *qs = txq_to_qset(q, qid);
 1560 
 1561                 printf("stopping q\n");
 1562                 
 1563                 setbit(&qs->txq_stopped, qid);
 1564                 smp_mb();
 1565 
 1566                 if (should_restart_tx(q) &&
 1567                     test_and_clear_bit(qid, &qs->txq_stopped))
 1568                         return 2;
 1569 
 1570                 q->stops++;
 1571                 goto addq_exit;
 1572         }
 1573         return 0;
 1574 }
 1575 
 1576 
 1577 /**
 1578  *      reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
 1579  *      @q: the SGE control Tx queue
 1580  *
 1581  *      This is a variant of reclaim_completed_tx() that is used for Tx queues
 1582  *      that send only immediate data (presently just the control queues) and
 1583  *      thus do not have any mbufs
 1584  */
 1585 static __inline void
 1586 reclaim_completed_tx_imm(struct sge_txq *q)
 1587 {
 1588         unsigned int reclaim = q->processed - q->cleaned;
 1589 
 1590         mtx_assert(&q->lock, MA_OWNED);
 1591         
 1592         q->in_use -= reclaim;
 1593         q->cleaned += reclaim;
 1594 }
 1595 
 1596 static __inline int
 1597 immediate(const struct mbuf *m)
 1598 {
 1599         return m->m_len <= WR_LEN  && m->m_pkthdr.len <= WR_LEN ;
 1600 }
 1601 
 1602 /**
 1603  *      ctrl_xmit - send a packet through an SGE control Tx queue
 1604  *      @adap: the adapter
 1605  *      @q: the control queue
 1606  *      @m: the packet
 1607  *
 1608  *      Send a packet through an SGE control Tx queue.  Packets sent through
 1609  *      a control queue must fit entirely as immediate data in a single Tx
 1610  *      descriptor and have no page fragments.
 1611  */
 1612 static int
 1613 ctrl_xmit(adapter_t *adap, struct sge_txq *q, struct mbuf *m)
 1614 {
 1615         int ret;
 1616         struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
 1617 
 1618         if (__predict_false(!immediate(m))) {
 1619                 m_freem(m);
 1620                 return 0;
 1621         }
 1622         
 1623         wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
 1624         wrp->wr_lo = htonl(V_WR_TID(q->token));
 1625 
 1626         mtx_lock(&q->lock);
 1627 again:  reclaim_completed_tx_imm(q);
 1628 
 1629         ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
 1630         if (__predict_false(ret)) {
 1631                 if (ret == 1) {
 1632                         mtx_unlock(&q->lock);
 1633                         log(LOG_ERR, "no desc available\n");
 1634                         return (ENOSPC);
 1635                 }
 1636                 goto again;
 1637         }
 1638         write_imm(&q->desc[q->pidx], m, m->m_len, q->gen);
 1639         
 1640         q->in_use++;
 1641         if (++q->pidx >= q->size) {
 1642                 q->pidx = 0;
 1643                 q->gen ^= 1;
 1644         }
 1645         mtx_unlock(&q->lock);
 1646         wmb();
 1647         t3_write_reg(adap, A_SG_KDOORBELL,
 1648                      F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 1649         return (0);
 1650 }
 1651 
 1652 
 1653 /**
 1654  *      restart_ctrlq - restart a suspended control queue
 1655  *      @qs: the queue set cotaining the control queue
 1656  *
 1657  *      Resumes transmission on a suspended Tx control queue.
 1658  */
 1659 static void
 1660 restart_ctrlq(void *data, int npending)
 1661 {
 1662         struct mbuf *m;
 1663         struct sge_qset *qs = (struct sge_qset *)data;
 1664         struct sge_txq *q = &qs->txq[TXQ_CTRL];
 1665         adapter_t *adap = qs->port->adapter;
 1666 
 1667         log(LOG_WARNING, "Restart_ctrlq in_use=%d\n", q->in_use);
 1668         
 1669         mtx_lock(&q->lock);
 1670 again:  reclaim_completed_tx_imm(q);
 1671 
 1672         while (q->in_use < q->size &&
 1673                (m = mbufq_dequeue(&q->sendq)) != NULL) {
 1674 
 1675                 write_imm(&q->desc[q->pidx], m, m->m_len, q->gen);
 1676 
 1677                 if (++q->pidx >= q->size) {
 1678                         q->pidx = 0;
 1679                         q->gen ^= 1;
 1680                 }
 1681                 q->in_use++;
 1682         }
 1683         if (!mbufq_empty(&q->sendq)) {
 1684                 setbit(&qs->txq_stopped, TXQ_CTRL);
 1685                 smp_mb();
 1686 
 1687                 if (should_restart_tx(q) &&
 1688                     test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
 1689                         goto again;
 1690                 q->stops++;
 1691         }
 1692         mtx_unlock(&q->lock);
 1693         wmb();
 1694         t3_write_reg(adap, A_SG_KDOORBELL,
 1695                      F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 1696 }
 1697 
 1698 
 1699 /*
 1700  * Send a management message through control queue 0
 1701  */
 1702 int
 1703 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
 1704 {
 1705         return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], m);
 1706 }
 1707 
 1708 
 1709 /**
 1710  *      free_qset - free the resources of an SGE queue set
 1711  *      @sc: the controller owning the queue set
 1712  *      @q: the queue set
 1713  *
 1714  *      Release the HW and SW resources associated with an SGE queue set, such
 1715  *      as HW contexts, packet buffers, and descriptor rings.  Traffic to the
 1716  *      queue set must be quiesced prior to calling this.
 1717  */
 1718 void
 1719 t3_free_qset(adapter_t *sc, struct sge_qset *q)
 1720 {
 1721         int i;
 1722         
 1723         t3_free_tx_desc_all(&q->txq[TXQ_ETH]);
 1724         
 1725         for (i = 0; i < SGE_TXQ_PER_SET; i++) 
 1726                 if (q->txq[i].txq_mr.br_ring != NULL) {
 1727                         free(q->txq[i].txq_mr.br_ring, M_DEVBUF);
 1728                         mtx_destroy(&q->txq[i].txq_mr.br_lock);
 1729                 }
 1730         for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
 1731                 if (q->fl[i].desc) {
 1732                         mtx_lock_spin(&sc->sge.reg_lock);
 1733                         t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
 1734                         mtx_unlock_spin(&sc->sge.reg_lock);
 1735                         bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
 1736                         bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
 1737                                         q->fl[i].desc_map);
 1738                         bus_dma_tag_destroy(q->fl[i].desc_tag);
 1739                         bus_dma_tag_destroy(q->fl[i].entry_tag);
 1740                 }
 1741                 if (q->fl[i].sdesc) {
 1742                         free_rx_bufs(sc, &q->fl[i]);
 1743                         free(q->fl[i].sdesc, M_DEVBUF);
 1744                 }
 1745         }
 1746 
 1747         for (i = 0; i < SGE_TXQ_PER_SET; i++) {
 1748                 if (q->txq[i].desc) {
 1749                         mtx_lock_spin(&sc->sge.reg_lock);
 1750                         t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
 1751                         mtx_unlock_spin(&sc->sge.reg_lock);
 1752                         bus_dmamap_unload(q->txq[i].desc_tag,
 1753                                         q->txq[i].desc_map);
 1754                         bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
 1755                                         q->txq[i].desc_map);
 1756                         bus_dma_tag_destroy(q->txq[i].desc_tag);
 1757                         bus_dma_tag_destroy(q->txq[i].entry_tag);
 1758                         MTX_DESTROY(&q->txq[i].lock);
 1759                 }
 1760                 if (q->txq[i].sdesc) {
 1761                         free(q->txq[i].sdesc, M_DEVBUF);
 1762                 }
 1763         }
 1764 
 1765         if (q->rspq.desc) {
 1766                 mtx_lock_spin(&sc->sge.reg_lock);
 1767                 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
 1768                 mtx_unlock_spin(&sc->sge.reg_lock);
 1769                 
 1770                 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
 1771                 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
 1772                                 q->rspq.desc_map);
 1773                 bus_dma_tag_destroy(q->rspq.desc_tag);
 1774                 MTX_DESTROY(&q->rspq.lock);
 1775         }
 1776 
 1777         tcp_lro_free(&q->lro.ctrl);
 1778 
 1779         bzero(q, sizeof(*q));
 1780 }
 1781 
 1782 /**
 1783  *      t3_free_sge_resources - free SGE resources
 1784  *      @sc: the adapter softc
 1785  *
 1786  *      Frees resources used by the SGE queue sets.
 1787  */
 1788 void
 1789 t3_free_sge_resources(adapter_t *sc)
 1790 {
 1791         int i, nqsets;
 1792         
 1793 #ifdef IFNET_MULTIQUEUE
 1794         panic("%s should not be called when IFNET_MULTIQUEUE is defined", __FUNCTION__);
 1795 #endif          
 1796         for (nqsets = i = 0; i < (sc)->params.nports; i++) 
 1797                 nqsets += sc->port[i].nqsets;
 1798 
 1799         for (i = 0; i < nqsets; ++i)
 1800                 t3_free_qset(sc, &sc->sge.qs[i]);
 1801 }
 1802 
 1803 /**
 1804  *      t3_sge_start - enable SGE
 1805  *      @sc: the controller softc
 1806  *
 1807  *      Enables the SGE for DMAs.  This is the last step in starting packet
 1808  *      transfers.
 1809  */
 1810 void
 1811 t3_sge_start(adapter_t *sc)
 1812 {
 1813         t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
 1814 }
 1815 
 1816 /**
 1817  *      t3_sge_stop - disable SGE operation
 1818  *      @sc: the adapter
 1819  *
 1820  *      Disables the DMA engine.  This can be called in emeregencies (e.g.,
 1821  *      from error interrupts) or from normal process context.  In the latter
 1822  *      case it also disables any pending queue restart tasklets.  Note that
 1823  *      if it is called in interrupt context it cannot disable the restart
 1824  *      tasklets as it cannot wait, however the tasklets will have no effect
 1825  *      since the doorbells are disabled and the driver will call this again
 1826  *      later from process context, at which time the tasklets will be stopped
 1827  *      if they are still running.
 1828  */
 1829 void
 1830 t3_sge_stop(adapter_t *sc)
 1831 {
 1832         int i, nqsets;
 1833         
 1834         t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
 1835 
 1836         if (sc->tq == NULL)
 1837                 return;
 1838         
 1839         for (nqsets = i = 0; i < (sc)->params.nports; i++) 
 1840                 nqsets += sc->port[i].nqsets;
 1841 #ifdef notyet
 1842         /*
 1843          * 
 1844          * XXX
 1845          */
 1846         for (i = 0; i < nqsets; ++i) {
 1847                 struct sge_qset *qs = &sc->sge.qs[i];
 1848                 
 1849                 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
 1850                 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
 1851         }
 1852 #endif
 1853 }
 1854 
 1855 /**
 1856  *      t3_free_tx_desc - reclaims Tx descriptors and their buffers
 1857  *      @adapter: the adapter
 1858  *      @q: the Tx queue to reclaim descriptors from
 1859  *      @reclaimable: the number of descriptors to reclaim
 1860  *      @m_vec_size: maximum number of buffers to reclaim
 1861  *      @desc_reclaimed: returns the number of descriptors reclaimed
 1862  *
 1863  *      Reclaims Tx descriptors from an SGE Tx queue and frees the associated
 1864  *      Tx buffers.  Called with the Tx queue lock held.
 1865  *
 1866  *      Returns number of buffers of reclaimed   
 1867  */
 1868 void
 1869 t3_free_tx_desc(struct sge_txq *q, int reclaimable)
 1870 {
 1871         struct tx_sw_desc *txsd;
 1872         unsigned int cidx;
 1873         
 1874 #ifdef T3_TRACE
 1875         T3_TRACE2(sc->tb[q->cntxt_id & 7],
 1876                   "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
 1877 #endif
 1878         cidx = q->cidx;
 1879         txsd = &q->sdesc[cidx];
 1880         DPRINTF("reclaiming %d WR\n", reclaimable);
 1881         mtx_assert(&q->lock, MA_OWNED);
 1882         while (reclaimable--) {
 1883                 DPRINTF("cidx=%d d=%p\n", cidx, txsd);
 1884                 if (txsd->mi.mi_base != NULL) {
 1885                         if (txsd->flags & TX_SW_DESC_MAPPED) {
 1886                                 bus_dmamap_unload(q->entry_tag, txsd->map);
 1887                                 txsd->flags &= ~TX_SW_DESC_MAPPED;
 1888                         }
 1889                         m_freem_iovec(&txsd->mi);       
 1890                         buf_ring_scan(&q->txq_mr, txsd->mi.mi_base, __FILE__, __LINE__);
 1891                         txsd->mi.mi_base = NULL;
 1892                         /*
 1893                          * XXX check for cache hit rate here
 1894                          *
 1895                          */
 1896                         q->port->ifp->if_opackets++;
 1897 #if defined(DIAGNOSTIC) && 0
 1898                         if (m_get_priority(txsd->m[0]) != cidx) 
 1899                                 printf("pri=%d cidx=%d\n",
 1900                                     (int)m_get_priority(txsd->m[0]), cidx);
 1901 #endif                  
 1902 
 1903                 } else
 1904                         q->txq_skipped++;
 1905                 
 1906                 ++txsd;
 1907                 if (++cidx == q->size) {
 1908                         cidx = 0;
 1909                         txsd = q->sdesc;
 1910                 }
 1911         }
 1912         q->cidx = cidx;
 1913 
 1914 }
 1915 
 1916 void
 1917 t3_free_tx_desc_all(struct sge_txq *q)
 1918 {
 1919         int i;
 1920         struct tx_sw_desc *txsd;
 1921         
 1922         for (i = 0; i < q->size; i++) {
 1923                 txsd = &q->sdesc[i];
 1924                 if (txsd->mi.mi_base != NULL) {
 1925                         if (txsd->flags & TX_SW_DESC_MAPPED) {
 1926                                 bus_dmamap_unload(q->entry_tag, txsd->map);
 1927                                 txsd->flags &= ~TX_SW_DESC_MAPPED;
 1928                         }
 1929                         m_freem_iovec(&txsd->mi);
 1930                         bzero(&txsd->mi, sizeof(txsd->mi));
 1931                 }
 1932         }
 1933 }
 1934 
 1935 /**
 1936  *      is_new_response - check if a response is newly written
 1937  *      @r: the response descriptor
 1938  *      @q: the response queue
 1939  *
 1940  *      Returns true if a response descriptor contains a yet unprocessed
 1941  *      response.
 1942  */
 1943 static __inline int
 1944 is_new_response(const struct rsp_desc *r,
 1945     const struct sge_rspq *q)
 1946 {
 1947         return (r->intr_gen & F_RSPD_GEN2) == q->gen;
 1948 }
 1949 
 1950 #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
 1951 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
 1952                         V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
 1953                         V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
 1954                         V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
 1955 
 1956 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
 1957 #define NOMEM_INTR_DELAY 2500
 1958 
 1959 /**
 1960  *      write_ofld_wr - write an offload work request
 1961  *      @adap: the adapter
 1962  *      @m: the packet to send
 1963  *      @q: the Tx queue
 1964  *      @pidx: index of the first Tx descriptor to write
 1965  *      @gen: the generation value to use
 1966  *      @ndesc: number of descriptors the packet will occupy
 1967  *
 1968  *      Write an offload work request to send the supplied packet.  The packet
 1969  *      data already carry the work request with most fields populated.
 1970  */
 1971 static void
 1972 write_ofld_wr(adapter_t *adap, struct mbuf *m,
 1973     struct sge_txq *q, unsigned int pidx,
 1974     unsigned int gen, unsigned int ndesc,
 1975     bus_dma_segment_t *segs, unsigned int nsegs)
 1976 {
 1977         unsigned int sgl_flits, flits;
 1978         struct work_request_hdr *from;
 1979         struct sg_ent *sgp, sgl[TX_MAX_SEGS / 2 + 1];
 1980         struct tx_desc *d = &q->desc[pidx];
 1981         struct txq_state txqs;
 1982         
 1983         if (immediate(m) && nsegs == 0) {
 1984                 write_imm(d, m, m->m_len, gen);
 1985                 return;
 1986         }
 1987 
 1988         /* Only TX_DATA builds SGLs */
 1989         from = mtod(m, struct work_request_hdr *);
 1990         memcpy(&d->flit[1], &from[1], m->m_len - sizeof(*from));
 1991 
 1992         flits = m->m_len / 8;
 1993         sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : sgl;
 1994 
 1995         make_sgl(sgp, segs, nsegs);
 1996         sgl_flits = sgl_len(nsegs);
 1997 
 1998         txqs.gen = gen;
 1999         txqs.pidx = pidx;
 2000         txqs.compl = 0;
 2001 
 2002         write_wr_hdr_sgl(ndesc, d, &txqs, q, sgl, flits, sgl_flits,
 2003             from->wr_hi, from->wr_lo);
 2004 }
 2005 
 2006 /**
 2007  *      calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
 2008  *      @m: the packet
 2009  *
 2010  *      Returns the number of Tx descriptors needed for the given offload
 2011  *      packet.  These packets are already fully constructed.
 2012  */
 2013 static __inline unsigned int
 2014 calc_tx_descs_ofld(struct mbuf *m, unsigned int nsegs)
 2015 {
 2016         unsigned int flits, cnt = 0;
 2017         int ndescs;
 2018 
 2019         if (m->m_len <= WR_LEN && nsegs == 0)
 2020                 return (1);                 /* packet fits as immediate data */
 2021 
 2022         if (m->m_flags & M_IOVEC)
 2023                 cnt = mtomv(m)->mv_count;
 2024         else
 2025                 cnt = nsegs;
 2026 
 2027         /* headers */
 2028         flits = m->m_len / 8;
 2029 
 2030         ndescs = flits_to_desc(flits + sgl_len(cnt));
 2031 
 2032         CTR4(KTR_CXGB, "flits=%d sgl_len=%d nsegs=%d ndescs=%d",
 2033             flits, sgl_len(cnt), nsegs, ndescs);
 2034 
 2035         return (ndescs);
 2036 }
 2037 
 2038 /**
 2039  *      ofld_xmit - send a packet through an offload queue
 2040  *      @adap: the adapter
 2041  *      @q: the Tx offload queue
 2042  *      @m: the packet
 2043  *
 2044  *      Send an offload packet through an SGE offload queue.
 2045  */
 2046 static int
 2047 ofld_xmit(adapter_t *adap, struct sge_txq *q, struct mbuf *m)
 2048 {
 2049         int ret, nsegs;
 2050         unsigned int ndesc;
 2051         unsigned int pidx, gen;
 2052         bus_dma_segment_t segs[TX_MAX_SEGS], *vsegs;
 2053         struct tx_sw_desc *stx;
 2054 
 2055         nsegs = m_get_sgllen(m);
 2056         vsegs = m_get_sgl(m);
 2057         ndesc = calc_tx_descs_ofld(m, nsegs);
 2058         busdma_map_sgl(vsegs, segs, nsegs);
 2059 
 2060         stx = &q->sdesc[q->pidx];
 2061         KASSERT(stx->mi.mi_base == NULL, ("mi_base set"));
 2062         
 2063         mtx_lock(&q->lock);
 2064 again:  reclaim_completed_tx_(q, 16);
 2065         ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
 2066         if (__predict_false(ret)) {
 2067                 if (ret == 1) {
 2068                         printf("no ofld desc avail\n");
 2069                         
 2070                         m_set_priority(m, ndesc);     /* save for restart */
 2071                         mtx_unlock(&q->lock);
 2072                         return (EINTR);
 2073                 }
 2074                 goto again;
 2075         }
 2076 
 2077         gen = q->gen;
 2078         q->in_use += ndesc;
 2079         pidx = q->pidx;
 2080         q->pidx += ndesc;
 2081         if (q->pidx >= q->size) {
 2082                 q->pidx -= q->size;
 2083                 q->gen ^= 1;
 2084         }
 2085 #ifdef T3_TRACE
 2086         T3_TRACE5(adap->tb[q->cntxt_id & 7],
 2087                   "ofld_xmit: ndesc %u, pidx %u, len %u, main %u, frags %u",
 2088                   ndesc, pidx, skb->len, skb->len - skb->data_len,
 2089                   skb_shinfo(skb)->nr_frags);
 2090 #endif
 2091         mtx_unlock(&q->lock);
 2092 
 2093         write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs);
 2094         check_ring_tx_db(adap, q);
 2095         return (0);
 2096 }
 2097 
 2098 /**
 2099  *      restart_offloadq - restart a suspended offload queue
 2100  *      @qs: the queue set cotaining the offload queue
 2101  *
 2102  *      Resumes transmission on a suspended Tx offload queue.
 2103  */
 2104 static void
 2105 restart_offloadq(void *data, int npending)
 2106 {
 2107         struct mbuf *m;
 2108         struct sge_qset *qs = data;
 2109         struct sge_txq *q = &qs->txq[TXQ_OFLD];
 2110         adapter_t *adap = qs->port->adapter;
 2111         bus_dma_segment_t segs[TX_MAX_SEGS];
 2112         struct tx_sw_desc *stx = &q->sdesc[q->pidx];
 2113         int nsegs, cleaned;
 2114                 
 2115         mtx_lock(&q->lock);
 2116 again:  cleaned = reclaim_completed_tx_(q, 16);
 2117 
 2118         while ((m = mbufq_peek(&q->sendq)) != NULL) {
 2119                 unsigned int gen, pidx;
 2120                 unsigned int ndesc = m_get_priority(m);
 2121 
 2122                 if (__predict_false(q->size - q->in_use < ndesc)) {
 2123                         setbit(&qs->txq_stopped, TXQ_OFLD);
 2124                         smp_mb();
 2125 
 2126                         if (should_restart_tx(q) &&
 2127                             test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
 2128                                 goto again;
 2129                         q->stops++;
 2130                         break;
 2131                 }
 2132 
 2133                 gen = q->gen;
 2134                 q->in_use += ndesc;
 2135                 pidx = q->pidx;
 2136                 q->pidx += ndesc;
 2137                 if (q->pidx >= q->size) {
 2138                         q->pidx -= q->size;
 2139                         q->gen ^= 1;
 2140                 }
 2141                 
 2142                 (void)mbufq_dequeue(&q->sendq);
 2143                 busdma_map_mbufs(&m, q, stx, segs, &nsegs);
 2144                 mtx_unlock(&q->lock);
 2145                 write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs);
 2146                 mtx_lock(&q->lock);
 2147         }
 2148         mtx_unlock(&q->lock);
 2149         
 2150 #if USE_GTS
 2151         set_bit(TXQ_RUNNING, &q->flags);
 2152         set_bit(TXQ_LAST_PKT_DB, &q->flags);
 2153 #endif
 2154         wmb();
 2155         t3_write_reg(adap, A_SG_KDOORBELL,
 2156                      F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 2157 }
 2158 
 2159 /**
 2160  *      queue_set - return the queue set a packet should use
 2161  *      @m: the packet
 2162  *
 2163  *      Maps a packet to the SGE queue set it should use.  The desired queue
 2164  *      set is carried in bits 1-3 in the packet's priority.
 2165  */
 2166 static __inline int
 2167 queue_set(const struct mbuf *m)
 2168 {
 2169         return m_get_priority(m) >> 1;
 2170 }
 2171 
 2172 /**
 2173  *      is_ctrl_pkt - return whether an offload packet is a control packet
 2174  *      @m: the packet
 2175  *
 2176  *      Determines whether an offload packet should use an OFLD or a CTRL
 2177  *      Tx queue.  This is indicated by bit 0 in the packet's priority.
 2178  */
 2179 static __inline int
 2180 is_ctrl_pkt(const struct mbuf *m)
 2181 {
 2182         return m_get_priority(m) & 1;
 2183 }
 2184 
 2185 /**
 2186  *      t3_offload_tx - send an offload packet
 2187  *      @tdev: the offload device to send to
 2188  *      @m: the packet
 2189  *
 2190  *      Sends an offload packet.  We use the packet priority to select the
 2191  *      appropriate Tx queue as follows: bit 0 indicates whether the packet
 2192  *      should be sent as regular or control, bits 1-3 select the queue set.
 2193  */
 2194 int
 2195 t3_offload_tx(struct t3cdev *tdev, struct mbuf *m)
 2196 {
 2197         adapter_t *adap = tdev2adap(tdev);
 2198         struct sge_qset *qs = &adap->sge.qs[queue_set(m)];
 2199 
 2200         if (__predict_false(is_ctrl_pkt(m))) 
 2201                 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], m);
 2202 
 2203         return ofld_xmit(adap, &qs->txq[TXQ_OFLD], m);
 2204 }
 2205 
 2206 /**
 2207  *      deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
 2208  *      @tdev: the offload device that will be receiving the packets
 2209  *      @q: the SGE response queue that assembled the bundle
 2210  *      @m: the partial bundle
 2211  *      @n: the number of packets in the bundle
 2212  *
 2213  *      Delivers a (partial) bundle of Rx offload packets to an offload device.
 2214  */
 2215 static __inline void
 2216 deliver_partial_bundle(struct t3cdev *tdev,
 2217                         struct sge_rspq *q,
 2218                         struct mbuf *mbufs[], int n)
 2219 {
 2220         if (n) {
 2221                 q->offload_bundles++;
 2222                 cxgb_ofld_recv(tdev, mbufs, n);
 2223         }
 2224 }
 2225 
 2226 static __inline int
 2227 rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
 2228     struct mbuf *m, struct mbuf *rx_gather[],
 2229     unsigned int gather_idx)
 2230 {
 2231         
 2232         rq->offload_pkts++;
 2233         m->m_pkthdr.header = mtod(m, void *);
 2234         rx_gather[gather_idx++] = m;
 2235         if (gather_idx == RX_BUNDLE_SIZE) {
 2236                 cxgb_ofld_recv(tdev, rx_gather, RX_BUNDLE_SIZE);
 2237                 gather_idx = 0;
 2238                 rq->offload_bundles++;
 2239         }
 2240         return (gather_idx);
 2241 }
 2242 
 2243 static void
 2244 restart_tx(struct sge_qset *qs)
 2245 {
 2246         struct adapter *sc = qs->port->adapter;
 2247         
 2248         
 2249         if (isset(&qs->txq_stopped, TXQ_OFLD) &&
 2250             should_restart_tx(&qs->txq[TXQ_OFLD]) &&
 2251             test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
 2252                 qs->txq[TXQ_OFLD].restarts++;
 2253                 DPRINTF("restarting TXQ_OFLD\n");
 2254                 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
 2255         }
 2256         DPRINTF("stopped=0x%x restart=%d processed=%d cleaned=%d in_use=%d\n",
 2257             qs->txq_stopped, should_restart_tx(&qs->txq[TXQ_CTRL]),
 2258             qs->txq[TXQ_CTRL].processed, qs->txq[TXQ_CTRL].cleaned,
 2259             qs->txq[TXQ_CTRL].in_use);
 2260         
 2261         if (isset(&qs->txq_stopped, TXQ_CTRL) &&
 2262             should_restart_tx(&qs->txq[TXQ_CTRL]) &&
 2263             test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
 2264                 qs->txq[TXQ_CTRL].restarts++;
 2265                 DPRINTF("restarting TXQ_CTRL\n");
 2266                 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
 2267         }
 2268 }
 2269 
 2270 /**
 2271  *      t3_sge_alloc_qset - initialize an SGE queue set
 2272  *      @sc: the controller softc
 2273  *      @id: the queue set id
 2274  *      @nports: how many Ethernet ports will be using this queue set
 2275  *      @irq_vec_idx: the IRQ vector index for response queue interrupts
 2276  *      @p: configuration parameters for this queue set
 2277  *      @ntxq: number of Tx queues for the queue set
 2278  *      @pi: port info for queue set
 2279  *
 2280  *      Allocate resources and initialize an SGE queue set.  A queue set
 2281  *      comprises a response queue, two Rx free-buffer queues, and up to 3
 2282  *      Tx queues.  The Tx queues are assigned roles in the order Ethernet
 2283  *      queue, offload queue, and control queue.
 2284  */
 2285 int
 2286 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
 2287                   const struct qset_params *p, int ntxq, struct port_info *pi)
 2288 {
 2289         struct sge_qset *q = &sc->sge.qs[id];
 2290         int i, header_size, ret = 0;
 2291 
 2292         for (i = 0; i < SGE_TXQ_PER_SET; i++) {
 2293                 if ((q->txq[i].txq_mr.br_ring = malloc(cxgb_txq_buf_ring_size*sizeof(struct mbuf *),
 2294                             M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) {
 2295                         device_printf(sc->dev, "failed to allocate mbuf ring\n");
 2296                         goto err;
 2297                 }
 2298                 q->txq[i].txq_mr.br_prod = q->txq[i].txq_mr.br_cons = 0;
 2299                 q->txq[i].txq_mr.br_size = cxgb_txq_buf_ring_size;
 2300                 mtx_init(&q->txq[i].txq_mr.br_lock, "txq mbuf ring", NULL, MTX_DEF);
 2301         }
 2302 
 2303         init_qset_cntxt(q, id);
 2304         q->idx = id;
 2305         
 2306         if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
 2307                     sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
 2308                     &q->fl[0].desc, &q->fl[0].sdesc,
 2309                     &q->fl[0].desc_tag, &q->fl[0].desc_map,
 2310                     sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
 2311                 printf("error %d from alloc ring fl0\n", ret);
 2312                 goto err;
 2313         }
 2314 
 2315         if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
 2316                     sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
 2317                     &q->fl[1].desc, &q->fl[1].sdesc,
 2318                     &q->fl[1].desc_tag, &q->fl[1].desc_map,
 2319                     sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
 2320                 printf("error %d from alloc ring fl1\n", ret);
 2321                 goto err;
 2322         }
 2323 
 2324         if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
 2325                     &q->rspq.phys_addr, &q->rspq.desc, NULL,
 2326                     &q->rspq.desc_tag, &q->rspq.desc_map,
 2327                     NULL, NULL)) != 0) {
 2328                 printf("error %d from alloc ring rspq\n", ret);
 2329                 goto err;
 2330         }
 2331 
 2332         for (i = 0; i < ntxq; ++i) {
 2333                 /*
 2334                  * The control queue always uses immediate data so does not
 2335                  * need to keep track of any mbufs.
 2336                  * XXX Placeholder for future TOE support.
 2337                  */
 2338                 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
 2339 
 2340                 if ((ret = alloc_ring(sc, p->txq_size[i],
 2341                             sizeof(struct tx_desc), sz,
 2342                             &q->txq[i].phys_addr, &q->txq[i].desc,
 2343                             &q->txq[i].sdesc, &q->txq[i].desc_tag,
 2344                             &q->txq[i].desc_map,
 2345                             sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
 2346                         printf("error %d from alloc ring tx %i\n", ret, i);
 2347                         goto err;
 2348                 }
 2349                 mbufq_init(&q->txq[i].sendq);
 2350                 q->txq[i].gen = 1;
 2351                 q->txq[i].size = p->txq_size[i];
 2352                 snprintf(q->txq[i].lockbuf, TXQ_NAME_LEN, "t3 txq lock %d:%d:%d",
 2353                     device_get_unit(sc->dev), irq_vec_idx, i);
 2354                 MTX_INIT(&q->txq[i].lock, q->txq[i].lockbuf, NULL, MTX_DEF);
 2355         }
 2356 
 2357         q->txq[TXQ_ETH].port = pi;
 2358         
 2359         TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
 2360         TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
 2361         TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, &q->txq[TXQ_ETH]);
 2362         TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, &q->txq[TXQ_OFLD]);
 2363 
 2364         q->fl[0].gen = q->fl[1].gen = 1;
 2365         q->fl[0].size = p->fl_size;
 2366         q->fl[1].size = p->jumbo_size;
 2367 
 2368         q->rspq.gen = 1;
 2369         q->rspq.cidx = 0;
 2370         q->rspq.size = p->rspq_size;
 2371 
 2372 
 2373         header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) + sizeof(struct m_ext_) + sizeof(uint32_t);
 2374         q->txq[TXQ_ETH].stop_thres = nports *
 2375             flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
 2376 
 2377         q->fl[0].buf_size = (MCLBYTES - header_size);
 2378         q->fl[0].zone = zone_clust;
 2379         q->fl[0].type = EXT_CLUSTER;
 2380 #if __FreeBSD_version > 800000
 2381         if (cxgb_use_16k_clusters) {            
 2382                 q->fl[1].buf_size = MJUM16BYTES - header_size;
 2383                 q->fl[1].zone = zone_jumbo16;
 2384                 q->fl[1].type = EXT_JUMBO16;
 2385         } else {
 2386                 q->fl[1].buf_size = MJUM9BYTES - header_size;
 2387                 q->fl[1].zone = zone_jumbo9;
 2388                 q->fl[1].type = EXT_JUMBO9;             
 2389         }
 2390 #else
 2391         q->fl[1].buf_size = MJUMPAGESIZE - header_size;
 2392         q->fl[1].zone = zone_jumbop;
 2393         q->fl[1].type = EXT_JUMBOP;
 2394 #endif
 2395 
 2396         /*
 2397          * We allocate and setup the lro_ctrl structure irrespective of whether
 2398          * lro is available and/or enabled.
 2399          */
 2400         q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
 2401         ret = tcp_lro_init(&q->lro.ctrl);
 2402         if (ret) {
 2403                 printf("error %d from tcp_lro_init\n", ret);
 2404                 goto err;
 2405         }
 2406         q->lro.ctrl.ifp = pi->ifp;
 2407 
 2408         mtx_lock_spin(&sc->sge.reg_lock);
 2409         ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
 2410                                    q->rspq.phys_addr, q->rspq.size,
 2411                                    q->fl[0].buf_size, 1, 0);
 2412         if (ret) {
 2413                 printf("error %d from t3_sge_init_rspcntxt\n", ret);
 2414                 goto err_unlock;
 2415         }
 2416 
 2417         for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
 2418                 ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
 2419                                           q->fl[i].phys_addr, q->fl[i].size,
 2420                                           q->fl[i].buf_size, p->cong_thres, 1,
 2421                                           0);
 2422                 if (ret) {
 2423                         printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
 2424                         goto err_unlock;
 2425                 }
 2426         }
 2427 
 2428         ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
 2429                                  SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
 2430                                  q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
 2431                                  1, 0);
 2432         if (ret) {
 2433                 printf("error %d from t3_sge_init_ecntxt\n", ret);
 2434                 goto err_unlock;
 2435         }
 2436 
 2437         if (ntxq > 1) {
 2438                 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
 2439                                          USE_GTS, SGE_CNTXT_OFLD, id,
 2440                                          q->txq[TXQ_OFLD].phys_addr,
 2441                                          q->txq[TXQ_OFLD].size, 0, 1, 0);
 2442                 if (ret) {
 2443                         printf("error %d from t3_sge_init_ecntxt\n", ret);
 2444                         goto err_unlock;
 2445                 }
 2446         }
 2447 
 2448         if (ntxq > 2) {
 2449                 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
 2450                                          SGE_CNTXT_CTRL, id,
 2451                                          q->txq[TXQ_CTRL].phys_addr,
 2452                                          q->txq[TXQ_CTRL].size,
 2453                                          q->txq[TXQ_CTRL].token, 1, 0);
 2454                 if (ret) {
 2455                         printf("error %d from t3_sge_init_ecntxt\n", ret);
 2456                         goto err_unlock;
 2457                 }
 2458         }
 2459         
 2460         snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
 2461             device_get_unit(sc->dev), irq_vec_idx);
 2462         MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
 2463         
 2464         mtx_unlock_spin(&sc->sge.reg_lock);
 2465         t3_update_qset_coalesce(q, p);
 2466         q->port = pi;
 2467         
 2468         refill_fl(sc, &q->fl[0], q->fl[0].size);
 2469         refill_fl(sc, &q->fl[1], q->fl[1].size);
 2470         refill_rspq(sc, &q->rspq, q->rspq.size - 1);
 2471 
 2472         t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
 2473                      V_NEWTIMER(q->rspq.holdoff_tmr));
 2474 
 2475         return (0);
 2476 
 2477 err_unlock:
 2478         mtx_unlock_spin(&sc->sge.reg_lock);
 2479 err:    
 2480         t3_free_qset(sc, q);
 2481 
 2482         return (ret);
 2483 }
 2484 
 2485 /*
 2486  * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
 2487  * ethernet data.  Hardware assistance with various checksums and any vlan tag
 2488  * will also be taken into account here.
 2489  */
 2490 void
 2491 t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad)
 2492 {
 2493         struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
 2494         struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
 2495         struct ifnet *ifp = pi->ifp;
 2496         
 2497         DPRINTF("rx_eth m=%p m->m_data=%p p->iff=%d\n", m, mtod(m, uint8_t *), cpl->iff);
 2498 
 2499         if ((ifp->if_capenable & IFCAP_RXCSUM) && !cpl->fragment &&
 2500             cpl->csum_valid && cpl->csum == 0xffff) {
 2501                 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID);
 2502                 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
 2503                 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID|CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
 2504                 m->m_pkthdr.csum_data = 0xffff;
 2505         }
 2506         /* 
 2507          * XXX need to add VLAN support for 6.x
 2508          */
 2509 #ifdef VLAN_SUPPORTED
 2510         if (__predict_false(cpl->vlan_valid)) {
 2511                 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
 2512                 m->m_flags |= M_VLANTAG;
 2513         } 
 2514 #endif
 2515         
 2516         m->m_pkthdr.rcvif = ifp;
 2517         m->m_pkthdr.header = mtod(m, uint8_t *) + sizeof(*cpl) + ethpad;
 2518         ifp->if_ipackets++;
 2519 #ifndef DISABLE_MBUF_IOVEC
 2520         m_explode(m);
 2521 #endif  
 2522         /*
 2523          * adjust after conversion to mbuf chain
 2524          */
 2525         m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
 2526         m->m_len -= (sizeof(*cpl) + ethpad);
 2527         m->m_data += (sizeof(*cpl) + ethpad);
 2528 }
 2529 
 2530 static void
 2531 ext_free_handler(void *buf, void * args)
 2532 {
 2533         uintptr_t type = (uintptr_t)args;
 2534         uma_zone_t zone;
 2535         struct mbuf *m;
 2536 
 2537         m = buf;
 2538         zone = m_getzonefromtype(type);
 2539         m->m_ext.ext_type = (int)type;
 2540         cxgb_ext_freed++;
 2541         cxgb_cache_put(zone, m);
 2542 }
 2543 
 2544 static void
 2545 init_cluster_mbuf(caddr_t cl, int flags, int type, uma_zone_t zone)
 2546 {
 2547         struct mbuf *m;
 2548         int header_size;
 2549         
 2550         header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) +
 2551             sizeof(struct m_ext_) + sizeof(uint32_t);
 2552         
 2553         bzero(cl, header_size);
 2554         m = (struct mbuf *)cl;
 2555         
 2556         cxgb_ext_inited++;
 2557         SLIST_INIT(&m->m_pkthdr.tags);
 2558         m->m_type = MT_DATA;
 2559         m->m_flags = flags | M_NOFREE | M_EXT;
 2560         m->m_data = cl + header_size;
 2561         m->m_ext.ext_buf = cl;
 2562         m->m_ext.ref_cnt = (uint32_t *)(cl + header_size - sizeof(uint32_t));
 2563         m->m_ext.ext_size = m_getsizefromtype(type);
 2564         m->m_ext.ext_free = ext_free_handler;
 2565         m->m_ext.ext_args = (void *)(uintptr_t)type;
 2566         m->m_ext.ext_type = EXT_EXTREF;
 2567         *(m->m_ext.ref_cnt) = 1;
 2568         DPRINTF("data=%p ref_cnt=%p\n", m->m_data, m->m_ext.ref_cnt); 
 2569 }
 2570 
 2571 
 2572 /**
 2573  *      get_packet - return the next ingress packet buffer from a free list
 2574  *      @adap: the adapter that received the packet
 2575  *      @drop_thres: # of remaining buffers before we start dropping packets
 2576  *      @qs: the qset that the SGE free list holding the packet belongs to
 2577  *      @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
 2578  *      @r: response descriptor 
 2579  *
 2580  *      Get the next packet from a free list and complete setup of the
 2581  *      sk_buff.  If the packet is small we make a copy and recycle the
 2582  *      original buffer, otherwise we use the original buffer itself.  If a
 2583  *      positive drop threshold is supplied packets are dropped and their
 2584  *      buffers recycled if (a) the number of remaining buffers is under the
 2585  *      threshold and the packet is too big to copy, or (b) the packet should
 2586  *      be copied but there is no memory for the copy.
 2587  */
 2588 #ifdef DISABLE_MBUF_IOVEC
 2589 
 2590 static int
 2591 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
 2592     struct t3_mbuf_hdr *mh, struct rsp_desc *r)
 2593 {
 2594 
 2595         unsigned int len_cq =  ntohl(r->len_cq);
 2596         struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
 2597         struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
 2598         uint32_t len = G_RSPD_LEN(len_cq);
 2599         uint32_t flags = ntohl(r->flags);
 2600         uint8_t sopeop = G_RSPD_SOP_EOP(flags);
 2601         caddr_t cl;
 2602         struct mbuf *m, *m0;
 2603         int ret = 0;
 2604         
 2605         prefetch(sd->rxsd_cl);
 2606 
 2607         fl->credits--;
 2608         bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
 2609         
 2610         if (recycle_enable && len <= SGE_RX_COPY_THRES && sopeop == RSPQ_SOP_EOP) {
 2611                 if ((m0 = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
 2612                         goto skip_recycle;
 2613                 cl = mtod(m0, void *);
 2614                 memcpy(cl, sd->data, len);
 2615                 recycle_rx_buf(adap, fl, fl->cidx);
 2616                 m = m0;
 2617                 m0->m_len = len;
 2618         } else {
 2619         skip_recycle:
 2620 
 2621                 bus_dmamap_unload(fl->entry_tag, sd->map);
 2622                 cl = sd->rxsd_cl;
 2623                 m = m0 = (struct mbuf *)cl;
 2624 
 2625                 if ((sopeop == RSPQ_SOP_EOP) ||
 2626                     (sopeop == RSPQ_SOP))
 2627                         flags = M_PKTHDR;
 2628                 init_cluster_mbuf(cl, flags, fl->type, fl->zone);
 2629                 m0->m_len = len;
 2630         }               
 2631         switch(sopeop) {
 2632         case RSPQ_SOP_EOP:
 2633                 DBG(DBG_RX, ("get_packet: SOP-EOP m %p\n", m));
 2634                 mh->mh_head = mh->mh_tail = m;
 2635                 m->m_pkthdr.len = len;
 2636                 ret = 1;
 2637                 break;
 2638         case RSPQ_NSOP_NEOP:
 2639                 DBG(DBG_RX, ("get_packet: NO_SOP-NO_EOP m %p\n", m));
 2640                 if (mh->mh_tail == NULL) {
 2641                         log(LOG_ERR, "discarding intermediate descriptor entry\n");
 2642                         m_freem(m);
 2643                         break;
 2644                 }
 2645                 mh->mh_tail->m_next = m;
 2646                 mh->mh_tail = m;
 2647                 mh->mh_head->m_pkthdr.len += len;
 2648                 ret = 0;
 2649                 break;
 2650         case RSPQ_SOP:
 2651                 DBG(DBG_RX, ("get_packet: SOP m %p\n", m));
 2652                 m->m_pkthdr.len = len;
 2653                 mh->mh_head = mh->mh_tail = m;
 2654                 ret = 0;
 2655                 break;
 2656         case RSPQ_EOP:
 2657                 DBG(DBG_RX, ("get_packet: EOP m %p\n", m));
 2658                 mh->mh_head->m_pkthdr.len += len;
 2659                 mh->mh_tail->m_next = m;
 2660                 mh->mh_tail = m;
 2661                 ret = 1;
 2662                 break;
 2663         }
 2664         if (++fl->cidx == fl->size)
 2665                 fl->cidx = 0;
 2666 
 2667         return (ret);
 2668 }
 2669 
 2670 #else
 2671 
 2672 static int
 2673 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
 2674     struct mbuf **m, struct rsp_desc *r)
 2675 {
 2676         
 2677         unsigned int len_cq =  ntohl(r->len_cq);
 2678         struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
 2679         struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
 2680         uint32_t len = G_RSPD_LEN(len_cq);
 2681         uint32_t flags = ntohl(r->flags);
 2682         uint8_t sopeop = G_RSPD_SOP_EOP(flags);
 2683         void *cl;
 2684         int ret = 0;
 2685         struct mbuf *m0;
 2686 #if 0
 2687         if ((sd + 1 )->rxsd_cl)
 2688                 prefetch((sd + 1)->rxsd_cl);
 2689         if ((sd + 2)->rxsd_cl)
 2690                 prefetch((sd + 2)->rxsd_cl);
 2691 #endif
 2692         DPRINTF("rx cpu=%d\n", curcpu);
 2693         fl->credits--;
 2694         bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
 2695 
 2696         if (recycle_enable && len <= SGE_RX_COPY_THRES && sopeop == RSPQ_SOP_EOP) {
 2697                 if ((m0 = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
 2698                         goto skip_recycle;
 2699                 cl = mtod(m0, void *);
 2700                 memcpy(cl, sd->data, len);
 2701                 recycle_rx_buf(adap, fl, fl->cidx);
 2702                 *m = m0;
 2703         } else {
 2704         skip_recycle:
 2705                 bus_dmamap_unload(fl->entry_tag, sd->map);
 2706                 cl = sd->rxsd_cl;
 2707                 *m = m0 = (struct mbuf *)cl;
 2708         }
 2709 
 2710         switch(sopeop) {
 2711         case RSPQ_SOP_EOP:
 2712                 DBG(DBG_RX, ("get_packet: SOP-EOP m %p\n", m));
 2713                 if (cl == sd->rxsd_cl)
 2714                         init_cluster_mbuf(cl, M_PKTHDR, fl->type, fl->zone);
 2715                 m0->m_len = m0->m_pkthdr.len = len;
 2716                 ret = 1;
 2717                 goto done;
 2718                 break;
 2719         case RSPQ_NSOP_NEOP:
 2720                 DBG(DBG_RX, ("get_packet: NO_SOP-NO_EOP m %p\n", m));
 2721                 panic("chaining unsupported");
 2722                 ret = 0;
 2723                 break;
 2724         case RSPQ_SOP:
 2725                 DBG(DBG_RX, ("get_packet: SOP m %p\n", m));
 2726                 panic("chaining unsupported");
 2727                 m_iovinit(m0);
 2728                 ret = 0;
 2729                 break;
 2730         case RSPQ_EOP:
 2731                 DBG(DBG_RX, ("get_packet: EOP m %p\n", m));
 2732                 panic("chaining unsupported");
 2733                 ret = 1;
 2734                 break;
 2735         }
 2736         panic("append not supported");
 2737 #if 0   
 2738         m_iovappend(m0, cl, fl->buf_size, len, sizeof(uint32_t), sd->rxsd_ref);
 2739 #endif  
 2740 done:   
 2741         if (++fl->cidx == fl->size)
 2742                 fl->cidx = 0;
 2743 
 2744         return (ret);
 2745 }
 2746 #endif
 2747 /**
 2748  *      handle_rsp_cntrl_info - handles control information in a response
 2749  *      @qs: the queue set corresponding to the response
 2750  *      @flags: the response control flags
 2751  *
 2752  *      Handles the control information of an SGE response, such as GTS
 2753  *      indications and completion credits for the queue set's Tx queues.
 2754  *      HW coalesces credits, we don't do any extra SW coalescing.
 2755  */
 2756 static __inline void
 2757 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
 2758 {
 2759         unsigned int credits;
 2760 
 2761 #if USE_GTS
 2762         if (flags & F_RSPD_TXQ0_GTS)
 2763                 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
 2764 #endif
 2765         credits = G_RSPD_TXQ0_CR(flags);
 2766         if (credits) 
 2767                 qs->txq[TXQ_ETH].processed += credits;
 2768         
 2769         credits = G_RSPD_TXQ2_CR(flags);
 2770         if (credits) 
 2771                 qs->txq[TXQ_CTRL].processed += credits;
 2772 
 2773 # if USE_GTS
 2774         if (flags & F_RSPD_TXQ1_GTS)
 2775                 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
 2776 # endif
 2777         credits = G_RSPD_TXQ1_CR(flags);
 2778         if (credits)
 2779                 qs->txq[TXQ_OFLD].processed += credits;
 2780 
 2781 }
 2782 
 2783 static void
 2784 check_ring_db(adapter_t *adap, struct sge_qset *qs,
 2785     unsigned int sleeping)
 2786 {
 2787         ;
 2788 }
 2789 
 2790 /**
 2791  *      process_responses - process responses from an SGE response queue
 2792  *      @adap: the adapter
 2793  *      @qs: the queue set to which the response queue belongs
 2794  *      @budget: how many responses can be processed in this round
 2795  *
 2796  *      Process responses from an SGE response queue up to the supplied budget.
 2797  *      Responses include received packets as well as credits and other events
 2798  *      for the queues that belong to the response queue's queue set.
 2799  *      A negative budget is effectively unlimited.
 2800  *
 2801  *      Additionally choose the interrupt holdoff time for the next interrupt
 2802  *      on this queue.  If the system is under memory shortage use a fairly
 2803  *      long delay to help recovery.
 2804  */
 2805 int
 2806 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
 2807 {
 2808         struct sge_rspq *rspq = &qs->rspq;
 2809         struct rsp_desc *r = &rspq->desc[rspq->cidx];
 2810         int budget_left = budget;
 2811         unsigned int sleeping = 0;
 2812         int lro_enabled = qs->lro.enabled;
 2813         int skip_lro;
 2814         struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
 2815         struct mbuf *offload_mbufs[RX_BUNDLE_SIZE];
 2816         int ngathered = 0;
 2817 #ifdef DEBUG    
 2818         static int last_holdoff = 0;
 2819         if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
 2820                 printf("next_holdoff=%d\n", rspq->holdoff_tmr);
 2821                 last_holdoff = rspq->holdoff_tmr;
 2822         }
 2823 #endif
 2824         rspq->next_holdoff = rspq->holdoff_tmr;
 2825 
 2826         while (__predict_true(budget_left && is_new_response(r, rspq))) {
 2827                 int eth, eop = 0, ethpad = 0;
 2828                 uint32_t flags = ntohl(r->flags);
 2829                 uint32_t rss_csum = *(const uint32_t *)r;
 2830                 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
 2831                 
 2832                 eth = (r->rss_hdr.opcode == CPL_RX_PKT);
 2833                 
 2834                 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
 2835                         struct mbuf *m;
 2836 
 2837                         if (cxgb_debug)
 2838                                 printf("async notification\n");
 2839 
 2840                         if (rspq->rspq_mh.mh_head == NULL) {
 2841                                 rspq->rspq_mh.mh_head = m_gethdr(M_DONTWAIT, MT_DATA);
 2842                                 m = rspq->rspq_mh.mh_head;
 2843                         } else {
 2844                                 m = m_gethdr(M_DONTWAIT, MT_DATA);
 2845                         }
 2846 
 2847                         /* XXX m is lost here if rspq->rspq_mbuf is not NULL */
 2848 
 2849                         if (m == NULL)
 2850                                 goto no_mem;
 2851 
 2852                         memcpy(mtod(m, char *), r, AN_PKT_SIZE);
 2853                         m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
 2854                         *mtod(m, char *) = CPL_ASYNC_NOTIF;
 2855                         rss_csum = htonl(CPL_ASYNC_NOTIF << 24);
 2856                         eop = 1;
 2857                         rspq->async_notif++;
 2858                         goto skip;
 2859                 } else if  (flags & F_RSPD_IMM_DATA_VALID) {
 2860                         struct mbuf *m = NULL;
 2861 
 2862                         DPRINTF("IMM DATA VALID opcode=0x%x rspq->cidx=%d\n",
 2863                             r->rss_hdr.opcode, rspq->cidx);
 2864                         if (rspq->rspq_mh.mh_head == NULL)
 2865                                 rspq->rspq_mh.mh_head = m_gethdr(M_DONTWAIT, MT_DATA);
 2866                         else 
 2867                                 m = m_gethdr(M_DONTWAIT, MT_DATA);
 2868 
 2869                         if (rspq->rspq_mh.mh_head == NULL &&  m == NULL) {      
 2870                 no_mem:
 2871                                 rspq->next_holdoff = NOMEM_INTR_DELAY;
 2872                                 budget_left--;
 2873                                 break;
 2874                         }
 2875                         get_imm_packet(adap, r, rspq->rspq_mh.mh_head);
 2876                         eop = 1;
 2877                         rspq->imm_data++;
 2878                 } else if (r->len_cq) {
 2879                         int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
 2880                         
 2881 #ifdef DISABLE_MBUF_IOVEC
 2882                         eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mh, r);
 2883 #else
 2884                         eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mbuf, r);
 2885 #endif
 2886 #ifdef IFNET_MULTIQUEUE
 2887                         rspq->rspq_mh.mh_head->m_pkthdr.rss_hash = rss_hash;
 2888 #endif                  
 2889                         ethpad = 2;
 2890                 } else {
 2891                         DPRINTF("pure response\n");
 2892                         rspq->pure_rsps++;
 2893                 }
 2894         skip:
 2895                 if (flags & RSPD_CTRL_MASK) {
 2896                         sleeping |= flags & RSPD_GTS_MASK;
 2897                         handle_rsp_cntrl_info(qs, flags);
 2898                 }
 2899 
 2900                 r++;
 2901                 if (__predict_false(++rspq->cidx == rspq->size)) {
 2902                         rspq->cidx = 0;
 2903                         rspq->gen ^= 1;
 2904                         r = rspq->desc;
 2905                 }
 2906                 prefetch(r);
 2907                 if (++rspq->credits >= (rspq->size / 4)) {
 2908                         refill_rspq(adap, rspq, rspq->credits);
 2909                         rspq->credits = 0;
 2910                 }
 2911                 DPRINTF("eth=%d eop=%d flags=0x%x\n", eth, eop, flags);
 2912 
 2913                 if (!eth && eop) {
 2914                         rspq->rspq_mh.mh_head->m_pkthdr.csum_data = rss_csum;
 2915                         /*
 2916                          * XXX size mismatch
 2917                          */
 2918                         m_set_priority(rspq->rspq_mh.mh_head, rss_hash);
 2919 
 2920                         
 2921                         ngathered = rx_offload(&adap->tdev, rspq,
 2922                             rspq->rspq_mh.mh_head, offload_mbufs, ngathered);
 2923                         rspq->rspq_mh.mh_head = NULL;
 2924                         DPRINTF("received offload packet\n");
 2925                         
 2926                 } else if (eth && eop) {
 2927                         struct mbuf *m = rspq->rspq_mh.mh_head;
 2928                         prefetch(mtod(m, uint8_t *)); 
 2929                         prefetch(mtod(m, uint8_t *) + L1_CACHE_BYTES);
 2930 
 2931                         t3_rx_eth(adap, rspq, m, ethpad);
 2932                         /*
 2933                          * The T304 sends incoming packets on any qset.  If LRO
 2934                          * is also enabled, we could end up sending packet up
 2935                          * lro_ctrl->ifp's input.  That is incorrect.
 2936                          *
 2937                          * The mbuf's rcvif was derived from the cpl header and
 2938                          * is accurate.  Skip LRO and just use that.
 2939                          */
 2940                         skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
 2941 
 2942                         if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro &&
 2943                             (tcp_lro_rx(lro_ctrl, m, 0) == 0)) {
 2944                                 /* successfully queue'd for LRO */
 2945                         } else {
 2946                                 /*
 2947                                  * LRO not enabled, packet unsuitable for LRO,
 2948                                  * or unable to queue.  Pass it up right now in
 2949                                  * either case.
 2950                                  */
 2951                                 struct ifnet *ifp = m->m_pkthdr.rcvif;
 2952                                 (*ifp->if_input)(ifp, m);
 2953                         }
 2954                         DPRINTF("received tunnel packet\n");
 2955                         rspq->rspq_mh.mh_head = NULL;
 2956 
 2957                 }
 2958                 __refill_fl_lt(adap, &qs->fl[0], 32);
 2959                 __refill_fl_lt(adap, &qs->fl[1], 32);
 2960                 --budget_left;
 2961         }
 2962 
 2963         deliver_partial_bundle(&adap->tdev, rspq, offload_mbufs, ngathered);
 2964 
 2965         /* Flush LRO */
 2966         while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
 2967                 struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
 2968                 SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
 2969                 tcp_lro_flush(lro_ctrl, queued);
 2970         }
 2971 
 2972         if (sleeping)
 2973                 check_ring_db(adap, qs, sleeping);
 2974 
 2975         smp_mb();  /* commit Tx queue processed updates */
 2976         if (__predict_false(qs->txq_stopped > 1)) {
 2977                 printf("restarting tx on %p\n", qs);
 2978                 
 2979                 restart_tx(qs);
 2980         }
 2981         
 2982         __refill_fl_lt(adap, &qs->fl[0], 512);
 2983         __refill_fl_lt(adap, &qs->fl[1], 512);
 2984         budget -= budget_left;
 2985         return (budget);
 2986 }
 2987 
 2988 /*
 2989  * A helper function that processes responses and issues GTS.
 2990  */
 2991 static __inline int
 2992 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
 2993 {
 2994         int work;
 2995         static int last_holdoff = 0;
 2996         
 2997         work = process_responses(adap, rspq_to_qset(rq), -1);
 2998 
 2999         if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
 3000                 printf("next_holdoff=%d\n", rq->next_holdoff);
 3001                 last_holdoff = rq->next_holdoff;
 3002         }
 3003         t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
 3004             V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
 3005         
 3006         return (work);
 3007 }
 3008 
 3009 
 3010 /*
 3011  * Interrupt handler for legacy INTx interrupts for T3B-based cards.
 3012  * Handles data events from SGE response queues as well as error and other
 3013  * async events as they all use the same interrupt pin.  We use one SGE
 3014  * response queue per port in this mode and protect all response queues with
 3015  * queue 0's lock.
 3016  */
 3017 void
 3018 t3b_intr(void *data)
 3019 {
 3020         uint32_t i, map;
 3021         adapter_t *adap = data;
 3022         struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
 3023         
 3024         t3_write_reg(adap, A_PL_CLI, 0);
 3025         map = t3_read_reg(adap, A_SG_DATA_INTR);
 3026 
 3027         if (!map) 
 3028                 return;
 3029 
 3030         if (__predict_false(map & F_ERRINTR))
 3031                 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
 3032 
 3033         mtx_lock(&q0->lock);
 3034         for_each_port(adap, i)
 3035             if (map & (1 << i))
 3036                         process_responses_gts(adap, &adap->sge.qs[i].rspq);
 3037         mtx_unlock(&q0->lock);
 3038 }
 3039 
 3040 /*
 3041  * The MSI interrupt handler.  This needs to handle data events from SGE
 3042  * response queues as well as error and other async events as they all use
 3043  * the same MSI vector.  We use one SGE response queue per port in this mode
 3044  * and protect all response queues with queue 0's lock.
 3045  */
 3046 void
 3047 t3_intr_msi(void *data)
 3048 {
 3049         adapter_t *adap = data;
 3050         struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
 3051         int i, new_packets = 0;
 3052 
 3053         mtx_lock(&q0->lock);
 3054 
 3055         for_each_port(adap, i)
 3056             if (process_responses_gts(adap, &adap->sge.qs[i].rspq)) 
 3057                     new_packets = 1;
 3058         mtx_unlock(&q0->lock);
 3059         if (new_packets == 0)
 3060                 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
 3061 }
 3062 
 3063 void
 3064 t3_intr_msix(void *data)
 3065 {
 3066         struct sge_qset *qs = data;
 3067         adapter_t *adap = qs->port->adapter;
 3068         struct sge_rspq *rspq = &qs->rspq;
 3069 #ifndef IFNET_MULTIQUEUE
 3070         mtx_lock(&rspq->lock);
 3071 #else   
 3072         if (mtx_trylock(&rspq->lock)) 
 3073 #endif
 3074         {
 3075                 
 3076                 if (process_responses_gts(adap, rspq) == 0)
 3077                         rspq->unhandled_irqs++;
 3078                 mtx_unlock(&rspq->lock);
 3079         }
 3080 }
 3081 
 3082 #define QDUMP_SBUF_SIZE         32 * 400
 3083 static int
 3084 t3_dump_rspq(SYSCTL_HANDLER_ARGS)
 3085 {
 3086         struct sge_rspq *rspq;
 3087         struct sge_qset *qs;
 3088         int i, err, dump_end, idx;
 3089         static int multiplier = 1;
 3090         struct sbuf *sb;
 3091         struct rsp_desc *rspd;
 3092         uint32_t data[4];
 3093         
 3094         rspq = arg1;
 3095         qs = rspq_to_qset(rspq);
 3096         if (rspq->rspq_dump_count == 0) 
 3097                 return (0);
 3098         if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
 3099                 log(LOG_WARNING,
 3100                     "dump count is too large %d\n", rspq->rspq_dump_count);
 3101                 rspq->rspq_dump_count = 0;
 3102                 return (EINVAL);
 3103         }
 3104         if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
 3105                 log(LOG_WARNING,
 3106                     "dump start of %d is greater than queue size\n",
 3107                     rspq->rspq_dump_start);
 3108                 rspq->rspq_dump_start = 0;
 3109                 return (EINVAL);
 3110         }
 3111         err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
 3112         if (err)
 3113                 return (err);
 3114 retry_sbufops:
 3115         sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
 3116 
 3117         sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
 3118             (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
 3119             ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
 3120         sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
 3121             ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
 3122         
 3123         sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
 3124             (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
 3125         
 3126         dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
 3127         for (i = rspq->rspq_dump_start; i < dump_end; i++) {
 3128                 idx = i & (RSPQ_Q_SIZE-1);
 3129                 
 3130                 rspd = &rspq->desc[idx];
 3131                 sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
 3132                     idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
 3133                     rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
 3134                 sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
 3135                     rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
 3136                     be32toh(rspd->len_cq), rspd->intr_gen);
 3137         }
 3138         if (sbuf_overflowed(sb)) {
 3139                 sbuf_delete(sb);
 3140                 multiplier++;
 3141                 goto retry_sbufops;
 3142         }
 3143         sbuf_finish(sb);
 3144         err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
 3145         sbuf_delete(sb);
 3146         return (err);
 3147 }       
 3148 
 3149 static int
 3150 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
 3151 {
 3152         struct sge_txq *txq;
 3153         struct sge_qset *qs;
 3154         int i, j, err, dump_end;
 3155         static int multiplier = 1;
 3156         struct sbuf *sb;
 3157         struct tx_desc *txd;
 3158         uint32_t *WR, wr_hi, wr_lo, gen;
 3159         uint32_t data[4];
 3160         
 3161         txq = arg1;
 3162         qs = txq_to_qset(txq, TXQ_ETH);
 3163         if (txq->txq_dump_count == 0) {
 3164                 return (0);
 3165         }
 3166         if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
 3167                 log(LOG_WARNING,
 3168                     "dump count is too large %d\n", txq->txq_dump_count);
 3169                 txq->txq_dump_count = 1;
 3170                 return (EINVAL);
 3171         }
 3172         if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
 3173                 log(LOG_WARNING,
 3174                     "dump start of %d is greater than queue size\n",
 3175                     txq->txq_dump_start);
 3176                 txq->txq_dump_start = 0;
 3177                 return (EINVAL);
 3178         }
 3179         err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
 3180         if (err)
 3181                 return (err);
 3182         
 3183             
 3184 retry_sbufops:
 3185         sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
 3186 
 3187         sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
 3188             (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16), 
 3189             (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
 3190         sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
 3191             ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
 3192             ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
 3193         sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
 3194             txq->txq_dump_start,
 3195             (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
 3196 
 3197         dump_end = txq->txq_dump_start + txq->txq_dump_count;
 3198         for (i = txq->txq_dump_start; i < dump_end; i++) {
 3199                 txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
 3200                 WR = (uint32_t *)txd->flit;
 3201                 wr_hi = ntohl(WR[0]);
 3202                 wr_lo = ntohl(WR[1]);           
 3203                 gen = G_WR_GEN(wr_lo);
 3204                 
 3205                 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
 3206                     wr_hi, wr_lo, gen);
 3207                 for (j = 2; j < 30; j += 4) 
 3208                         sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
 3209                             WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
 3210 
 3211         }
 3212         if (sbuf_overflowed(sb)) {
 3213                 sbuf_delete(sb);
 3214                 multiplier++;
 3215                 goto retry_sbufops;
 3216         }
 3217         sbuf_finish(sb);
 3218         err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
 3219         sbuf_delete(sb);
 3220         return (err);
 3221 }
 3222 
 3223 static int
 3224 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
 3225 {
 3226         struct sge_txq *txq;
 3227         struct sge_qset *qs;
 3228         int i, j, err, dump_end;
 3229         static int multiplier = 1;
 3230         struct sbuf *sb;
 3231         struct tx_desc *txd;
 3232         uint32_t *WR, wr_hi, wr_lo, gen;
 3233         
 3234         txq = arg1;
 3235         qs = txq_to_qset(txq, TXQ_CTRL);
 3236         if (txq->txq_dump_count == 0) {
 3237                 return (0);
 3238         }
 3239         if (txq->txq_dump_count > 256) {
 3240                 log(LOG_WARNING,
 3241                     "dump count is too large %d\n", txq->txq_dump_count);
 3242                 txq->txq_dump_count = 1;
 3243                 return (EINVAL);
 3244         }
 3245         if (txq->txq_dump_start > 255) {
 3246                 log(LOG_WARNING,
 3247                     "dump start of %d is greater than queue size\n",
 3248                     txq->txq_dump_start);
 3249                 txq->txq_dump_start = 0;
 3250                 return (EINVAL);
 3251         }
 3252 
 3253 retry_sbufops:
 3254         sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
 3255         sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
 3256             txq->txq_dump_start,
 3257             (txq->txq_dump_start + txq->txq_dump_count) & 255);
 3258 
 3259         dump_end = txq->txq_dump_start + txq->txq_dump_count;
 3260         for (i = txq->txq_dump_start; i < dump_end; i++) {
 3261                 txd = &txq->desc[i & (255)];
 3262                 WR = (uint32_t *)txd->flit;
 3263                 wr_hi = ntohl(WR[0]);
 3264                 wr_lo = ntohl(WR[1]);           
 3265                 gen = G_WR_GEN(wr_lo);
 3266                 
 3267                 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
 3268                     wr_hi, wr_lo, gen);
 3269                 for (j = 2; j < 30; j += 4) 
 3270                         sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
 3271                             WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
 3272 
 3273         }
 3274         if (sbuf_overflowed(sb)) {
 3275                 sbuf_delete(sb);
 3276                 multiplier++;
 3277                 goto retry_sbufops;
 3278         }
 3279         sbuf_finish(sb);
 3280         err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
 3281         sbuf_delete(sb);
 3282         return (err);
 3283 }
 3284 
 3285 static int
 3286 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
 3287 {
 3288         adapter_t *sc = arg1;
 3289         struct qset_params *qsp = &sc->params.sge.qset[0]; 
 3290         int coalesce_usecs;     
 3291         struct sge_qset *qs;
 3292         int i, j, err, nqsets = 0;
 3293         struct mtx *lock;
 3294 
 3295         if ((sc->flags & FULL_INIT_DONE) == 0)
 3296                 return (ENXIO);
 3297                 
 3298         coalesce_usecs = qsp->coalesce_usecs;
 3299         err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
 3300 
 3301         if (err != 0) {
 3302                 return (err);
 3303         }
 3304         if (coalesce_usecs == qsp->coalesce_usecs)
 3305                 return (0);
 3306 
 3307         for (i = 0; i < sc->params.nports; i++) 
 3308                 for (j = 0; j < sc->port[i].nqsets; j++)
 3309                         nqsets++;
 3310 
 3311         coalesce_usecs = max(1, coalesce_usecs);
 3312 
 3313         for (i = 0; i < nqsets; i++) {
 3314                 qs = &sc->sge.qs[i];
 3315                 qsp = &sc->params.sge.qset[i];
 3316                 qsp->coalesce_usecs = coalesce_usecs;
 3317                 
 3318                 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
 3319                             &sc->sge.qs[0].rspq.lock;
 3320 
 3321                 mtx_lock(lock);
 3322                 t3_update_qset_coalesce(qs, qsp);
 3323                 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
 3324                     V_NEWTIMER(qs->rspq.holdoff_tmr));
 3325                 mtx_unlock(lock);
 3326         }
 3327 
 3328         return (0);
 3329 }
 3330 
 3331 
 3332 void
 3333 t3_add_attach_sysctls(adapter_t *sc)
 3334 {
 3335         struct sysctl_ctx_list *ctx;
 3336         struct sysctl_oid_list *children;
 3337 
 3338         ctx = device_get_sysctl_ctx(sc->dev);
 3339         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 3340 
 3341         /* random information */
 3342         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, 
 3343             "firmware_version",
 3344             CTLFLAG_RD, &sc->fw_version,
 3345             0, "firmware version");
 3346         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3347             "hw_revision",
 3348             CTLFLAG_RD, &sc->params.rev,
 3349             0, "chip model");
 3350         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3351             "enable_debug",
 3352             CTLFLAG_RW, &cxgb_debug,
 3353             0, "enable verbose debugging output");
 3354         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tunq_coalesce",
 3355             CTLFLAG_RD, &sc->tunq_coalesce,
 3356             "#tunneled packets freed");
 3357         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3358             "txq_overrun",
 3359             CTLFLAG_RD, &txq_fills,
 3360             0, "#times txq overrun");
 3361         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3362             "pcpu_cache_enable",
 3363             CTLFLAG_RW, &cxgb_pcpu_cache_enable,
 3364             0, "#enable driver local pcpu caches");
 3365         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3366             "cache_alloc",
 3367             CTLFLAG_RD, &cxgb_cached_allocations,
 3368             0, "#times a cluster was allocated from cache");
 3369         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3370             "cached",
 3371             CTLFLAG_RD, &cxgb_cached,
 3372             0, "#times a cluster was cached");
 3373         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3374             "ext_freed",
 3375             CTLFLAG_RD, &cxgb_ext_freed,
 3376             0, "#times a cluster was freed through ext_free");
 3377         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3378             "ext_inited",
 3379             CTLFLAG_RD, &cxgb_ext_inited,
 3380             0, "#times a cluster was initialized for ext_free");
 3381         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3382             "mbufs_outstanding",
 3383             CTLFLAG_RD, &cxgb_mbufs_outstanding,
 3384             0, "#mbufs in flight in the driver");
 3385         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3386             "pack_outstanding",
 3387             CTLFLAG_RD, &cxgb_pack_outstanding,
 3388             0, "#packet in flight in the driver");      
 3389 }
 3390 
 3391 
 3392 static const char *rspq_name = "rspq";
 3393 static const char *txq_names[] =
 3394 {
 3395         "txq_eth",
 3396         "txq_ofld",
 3397         "txq_ctrl"      
 3398 };
 3399 
 3400 static int
 3401 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
 3402 {
 3403         struct port_info *p = arg1;
 3404         uint64_t *parg;
 3405 
 3406         if (!p)
 3407                 return (EINVAL);
 3408 
 3409         parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
 3410 
 3411         PORT_LOCK(p);
 3412         t3_mac_update_stats(&p->mac);
 3413         PORT_UNLOCK(p);
 3414 
 3415         return (sysctl_handle_quad(oidp, parg, 0, req));
 3416 }
 3417 
 3418 void
 3419 t3_add_configured_sysctls(adapter_t *sc)
 3420 {
 3421         struct sysctl_ctx_list *ctx;
 3422         struct sysctl_oid_list *children;
 3423         int i, j;
 3424         
 3425         ctx = device_get_sysctl_ctx(sc->dev);
 3426         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 3427 
 3428         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 
 3429             "intr_coal",
 3430             CTLTYPE_INT|CTLFLAG_RW, sc,
 3431             0, t3_set_coalesce_usecs,
 3432             "I", "interrupt coalescing timer (us)");
 3433 
 3434         for (i = 0; i < sc->params.nports; i++) {
 3435                 struct port_info *pi = &sc->port[i];
 3436                 struct sysctl_oid *poid;
 3437                 struct sysctl_oid_list *poidlist;
 3438                 struct mac_stats *mstats = &pi->mac.stats;
 3439                 
 3440                 snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
 3441                 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, 
 3442                     pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
 3443                 poidlist = SYSCTL_CHILDREN(poid);
 3444                 SYSCTL_ADD_INT(ctx, poidlist, OID_AUTO, 
 3445                     "nqsets", CTLFLAG_RD, &pi->nqsets,
 3446                     0, "#queue sets");
 3447 
 3448                 for (j = 0; j < pi->nqsets; j++) {
 3449                         struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
 3450                         struct sysctl_oid *qspoid, *rspqpoid, *txqpoid, *ctrlqpoid, *lropoid;
 3451                         struct sysctl_oid_list *qspoidlist, *rspqpoidlist, *txqpoidlist, *ctrlqpoidlist, *lropoidlist;
 3452                         struct sge_txq *txq = &qs->txq[TXQ_ETH];
 3453                         
 3454                         snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
 3455                         
 3456                         qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, 
 3457                             qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
 3458                         qspoidlist = SYSCTL_CHILDREN(qspoid);
 3459                         
 3460                         rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 
 3461                             rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
 3462                         rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
 3463 
 3464                         txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 
 3465                             txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
 3466                         txqpoidlist = SYSCTL_CHILDREN(txqpoid);
 3467 
 3468                         ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 
 3469                             txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
 3470                         ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
 3471 
 3472                         lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 
 3473                             "lro_stats", CTLFLAG_RD, NULL, "LRO statistics");
 3474                         lropoidlist = SYSCTL_CHILDREN(lropoid);
 3475 
 3476                         SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
 3477                             CTLFLAG_RD, &qs->rspq.size,
 3478                             0, "#entries in response queue");
 3479                         SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
 3480                             CTLFLAG_RD, &qs->rspq.cidx,
 3481                             0, "consumer index");
 3482                         SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
 3483                             CTLFLAG_RD, &qs->rspq.credits,
 3484                             0, "#credits");
 3485                         SYSCTL_ADD_XLONG(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
 3486                             CTLFLAG_RD, &qs->rspq.phys_addr,
 3487                             "physical_address_of the queue");
 3488                         SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
 3489                             CTLFLAG_RW, &qs->rspq.rspq_dump_start,
 3490                             0, "start rspq dump entry");
 3491                         SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
 3492                             CTLFLAG_RW, &qs->rspq.rspq_dump_count,
 3493                             0, "#rspq entries to dump");
 3494                         SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
 3495                             CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
 3496                             0, t3_dump_rspq, "A", "dump of the response queue");
 3497 
 3498 
 3499                         SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "dropped",
 3500                             CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_drops,
 3501                             0, "#tunneled packets dropped");
 3502                         SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
 3503                             CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
 3504                             0, "#tunneled packets waiting to be sent");
 3505                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
 3506                             CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
 3507                             0, "#tunneled packets queue producer index");
 3508                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
 3509                             CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
 3510                             0, "#tunneled packets queue consumer index");
 3511                         SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "processed",
 3512                             CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
 3513                             0, "#tunneled packets processed by the card");
 3514                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
 3515                             CTLFLAG_RD, &txq->cleaned,
 3516                             0, "#tunneled packets cleaned");
 3517                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
 3518                             CTLFLAG_RD, &txq->in_use,
 3519                             0, "#tunneled packet slots in use");
 3520                         SYSCTL_ADD_ULONG(ctx, txqpoidlist, OID_AUTO, "frees",
 3521                             CTLFLAG_RD, &txq->txq_frees,
 3522                             "#tunneled packets freed");
 3523                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
 3524                             CTLFLAG_RD, &txq->txq_skipped,
 3525                             0, "#tunneled packet descriptors skipped");
 3526                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "coalesced",
 3527                             CTLFLAG_RD, &txq->txq_coalesced,
 3528                             0, "#tunneled packets coalesced");
 3529                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
 3530                             CTLFLAG_RD, &txq->txq_enqueued,
 3531                             0, "#tunneled packets enqueued to hardware");
 3532                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
 3533                             CTLFLAG_RD, &qs->txq_stopped,
 3534                             0, "tx queues stopped");
 3535                         SYSCTL_ADD_XLONG(ctx, txqpoidlist, OID_AUTO, "phys_addr",
 3536                             CTLFLAG_RD, &txq->phys_addr,
 3537                             "physical_address_of the queue");
 3538                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
 3539                             CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
 3540                             0, "txq generation");
 3541                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
 3542                             CTLFLAG_RD, &txq->cidx,
 3543                             0, "hardware queue cidx");                  
 3544                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
 3545                             CTLFLAG_RD, &txq->pidx,
 3546                             0, "hardware queue pidx");
 3547                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
 3548                             CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
 3549                             0, "txq start idx for dump");
 3550                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
 3551                             CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
 3552                             0, "txq #entries to dump");                 
 3553                         SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
 3554                             CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
 3555                             0, t3_dump_txq_eth, "A", "dump of the transmit queue");
 3556 
 3557                         SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
 3558                             CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
 3559                             0, "ctrlq start idx for dump");
 3560                         SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
 3561                             CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
 3562                             0, "ctrl #entries to dump");                        
 3563                         SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
 3564                             CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
 3565                             0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
 3566 
 3567                         SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_queued",
 3568                             CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
 3569                         SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_flushed",
 3570                             CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
 3571                         SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
 3572                             CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
 3573                         SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
 3574                             CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
 3575                 }
 3576 
 3577                 /* Now add a node for mac stats. */
 3578                 poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
 3579                     CTLFLAG_RD, NULL, "MAC statistics");
 3580                 poidlist = SYSCTL_CHILDREN(poid);
 3581 
 3582                 /*
 3583                  * We (ab)use the length argument (arg2) to pass on the offset
 3584                  * of the data that we are interested in.  This is only required
 3585                  * for the quad counters that are updated from the hardware (we
 3586                  * make sure that we return the latest value).
 3587                  * sysctl_handle_macstat first updates *all* the counters from
 3588                  * the hardware, and then returns the latest value of the
 3589                  * requested counter.  Best would be to update only the
 3590                  * requested counter from hardware, but t3_mac_update_stats()
 3591                  * hides all the register details and we don't want to dive into
 3592                  * all that here.
 3593                  */
 3594 #define CXGB_SYSCTL_ADD_QUAD(a) SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
 3595     (CTLTYPE_QUAD | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \
 3596     sysctl_handle_macstat, "QU", 0)
 3597                 CXGB_SYSCTL_ADD_QUAD(tx_octets);
 3598                 CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
 3599                 CXGB_SYSCTL_ADD_QUAD(tx_frames);
 3600                 CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
 3601                 CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
 3602                 CXGB_SYSCTL_ADD_QUAD(tx_pause);
 3603                 CXGB_SYSCTL_ADD_QUAD(tx_deferred);
 3604                 CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
 3605                 CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
 3606                 CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
 3607                 CXGB_SYSCTL_ADD_QUAD(tx_underrun);
 3608                 CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
 3609                 CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
 3610                 CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
 3611                 CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
 3612                 CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
 3613                 CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
 3614                 CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
 3615                 CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
 3616                 CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
 3617                 CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
 3618                 CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
 3619                 CXGB_SYSCTL_ADD_QUAD(rx_octets);
 3620                 CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
 3621                 CXGB_SYSCTL_ADD_QUAD(rx_frames);
 3622                 CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
 3623                 CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
 3624                 CXGB_SYSCTL_ADD_QUAD(rx_pause);
 3625                 CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
 3626                 CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
 3627                 CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
 3628                 CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
 3629                 CXGB_SYSCTL_ADD_QUAD(rx_runt);
 3630                 CXGB_SYSCTL_ADD_QUAD(rx_jabber);
 3631                 CXGB_SYSCTL_ADD_QUAD(rx_short);
 3632                 CXGB_SYSCTL_ADD_QUAD(rx_too_long);
 3633                 CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
 3634                 CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
 3635                 CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
 3636                 CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
 3637                 CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
 3638                 CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
 3639                 CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
 3640                 CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
 3641                 CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
 3642 #undef CXGB_SYSCTL_ADD_QUAD
 3643 
 3644 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
 3645     CTLFLAG_RD, &mstats->a, 0)
 3646                 CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
 3647                 CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
 3648                 CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
 3649                 CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
 3650                 CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
 3651                 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
 3652                 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
 3653                 CXGB_SYSCTL_ADD_ULONG(num_toggled);
 3654                 CXGB_SYSCTL_ADD_ULONG(num_resets);
 3655 #undef CXGB_SYSCTL_ADD_ULONG
 3656         }
 3657 }
 3658         
 3659 /**
 3660  *      t3_get_desc - dump an SGE descriptor for debugging purposes
 3661  *      @qs: the queue set
 3662  *      @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
 3663  *      @idx: the descriptor index in the queue
 3664  *      @data: where to dump the descriptor contents
 3665  *
 3666  *      Dumps the contents of a HW descriptor of an SGE queue.  Returns the
 3667  *      size of the descriptor.
 3668  */
 3669 int
 3670 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
 3671                 unsigned char *data)
 3672 {
 3673         if (qnum >= 6)
 3674                 return (EINVAL);
 3675 
 3676         if (qnum < 3) {
 3677                 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
 3678                         return -EINVAL;
 3679                 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
 3680                 return sizeof(struct tx_desc);
 3681         }
 3682 
 3683         if (qnum == 3) {
 3684                 if (!qs->rspq.desc || idx >= qs->rspq.size)
 3685                         return (EINVAL);
 3686                 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
 3687                 return sizeof(struct rsp_desc);
 3688         }
 3689 
 3690         qnum -= 4;
 3691         if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
 3692                 return (EINVAL);
 3693         memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
 3694         return sizeof(struct rx_desc);
 3695 }

Cache object: f9641065f92d51d905b272d0b5cc023d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.