The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgb/cxgb_sge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**************************************************************************
    2 
    3 Copyright (c) 2007-2009, Chelsio Inc.
    4 All rights reserved.
    5 
    6 Redistribution and use in source and binary forms, with or without
    7 modification, are permitted provided that the following conditions are met:
    8 
    9  1. Redistributions of source code must retain the above copyright notice,
   10     this list of conditions and the following disclaimer.
   11 
   12  2. Neither the name of the Chelsio Corporation nor the names of its
   13     contributors may be used to endorse or promote products derived from
   14     this software without specific prior written permission.
   15  
   16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26 POSSIBILITY OF SUCH DAMAGE.
   27 
   28 ***************************************************************************/
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include "opt_inet6.h"
   34 #include "opt_inet.h"
   35 
   36 #include <sys/param.h>
   37 #include <sys/systm.h>
   38 #include <sys/kernel.h>
   39 #include <sys/module.h>
   40 #include <sys/bus.h>
   41 #include <sys/conf.h>
   42 #include <machine/bus.h>
   43 #include <machine/resource.h>
   44 #include <sys/bus_dma.h>
   45 #include <sys/rman.h>
   46 #include <sys/queue.h>
   47 #include <sys/sysctl.h>
   48 #include <sys/taskqueue.h>
   49 
   50 #include <sys/proc.h>
   51 #include <sys/sbuf.h>
   52 #include <sys/sched.h>
   53 #include <sys/smp.h>
   54 #include <sys/systm.h>
   55 #include <sys/syslog.h>
   56 #include <sys/socket.h>
   57 #include <sys/sglist.h>
   58 
   59 #include <net/if.h>
   60 #include <net/if_var.h>
   61 #include <net/bpf.h>    
   62 #include <net/ethernet.h>
   63 #include <net/if_vlan_var.h>
   64 
   65 #include <netinet/in_systm.h>
   66 #include <netinet/in.h>
   67 #include <netinet/ip.h>
   68 #include <netinet/ip6.h>
   69 #include <netinet/tcp.h>
   70 
   71 #include <dev/pci/pcireg.h>
   72 #include <dev/pci/pcivar.h>
   73 
   74 #include <vm/vm.h>
   75 #include <vm/pmap.h>
   76 
   77 #include <cxgb_include.h>
   78 #include <sys/mvec.h>
   79 
   80 int     txq_fills = 0;
   81 int     multiq_tx_enable = 1;
   82 
   83 #ifdef TCP_OFFLOAD
   84 CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
   85 #endif
   86 
   87 extern struct sysctl_oid_list sysctl__hw_cxgb_children;
   88 int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
   89 SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
   90     "size of per-queue mbuf ring");
   91 
   92 static int cxgb_tx_coalesce_force = 0;
   93 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN,
   94     &cxgb_tx_coalesce_force, 0,
   95     "coalesce small packets into a single work request regardless of ring state");
   96 
   97 #define COALESCE_START_DEFAULT          TX_ETH_Q_SIZE>>1
   98 #define COALESCE_START_MAX              (TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
   99 #define COALESCE_STOP_DEFAULT           TX_ETH_Q_SIZE>>2
  100 #define COALESCE_STOP_MIN               TX_ETH_Q_SIZE>>5
  101 #define TX_RECLAIM_DEFAULT              TX_ETH_Q_SIZE>>5
  102 #define TX_RECLAIM_MAX                  TX_ETH_Q_SIZE>>2
  103 #define TX_RECLAIM_MIN                  TX_ETH_Q_SIZE>>6
  104 
  105 
  106 static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
  107 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RWTUN,
  108     &cxgb_tx_coalesce_enable_start, 0,
  109     "coalesce enable threshold");
  110 static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
  111 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RWTUN,
  112     &cxgb_tx_coalesce_enable_stop, 0,
  113     "coalesce disable threshold");
  114 static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
  115 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RWTUN,
  116     &cxgb_tx_reclaim_threshold, 0,
  117     "tx cleaning minimum threshold");
  118 
  119 /*
  120  * XXX don't re-enable this until TOE stops assuming
  121  * we have an m_ext
  122  */
  123 static int recycle_enable = 0;
  124 
  125 extern int cxgb_use_16k_clusters;
  126 extern int nmbjumbop;
  127 extern int nmbjumbo9;
  128 extern int nmbjumbo16;
  129 
  130 #define USE_GTS 0
  131 
  132 #define SGE_RX_SM_BUF_SIZE      1536
  133 #define SGE_RX_DROP_THRES       16
  134 #define SGE_RX_COPY_THRES       128
  135 
  136 /*
  137  * Period of the Tx buffer reclaim timer.  This timer does not need to run
  138  * frequently as Tx buffers are usually reclaimed by new Tx packets.
  139  */
  140 #define TX_RECLAIM_PERIOD       (hz >> 1)
  141 
  142 /* 
  143  * Values for sge_txq.flags
  144  */
  145 enum {
  146         TXQ_RUNNING     = 1 << 0,  /* fetch engine is running */
  147         TXQ_LAST_PKT_DB = 1 << 1,  /* last packet rang the doorbell */
  148 };
  149 
  150 struct tx_desc {
  151         uint64_t        flit[TX_DESC_FLITS];
  152 } __packed;
  153 
  154 struct rx_desc {
  155         uint32_t        addr_lo;
  156         uint32_t        len_gen;
  157         uint32_t        gen2;
  158         uint32_t        addr_hi;
  159 } __packed;
  160 
  161 struct rsp_desc {               /* response queue descriptor */
  162         struct rss_header       rss_hdr;
  163         uint32_t                flags;
  164         uint32_t                len_cq;
  165         uint8_t                 imm_data[47];
  166         uint8_t                 intr_gen;
  167 } __packed;
  168 
  169 #define RX_SW_DESC_MAP_CREATED  (1 << 0)
  170 #define TX_SW_DESC_MAP_CREATED  (1 << 1)
  171 #define RX_SW_DESC_INUSE        (1 << 3)
  172 #define TX_SW_DESC_MAPPED       (1 << 4)
  173 
  174 #define RSPQ_NSOP_NEOP           G_RSPD_SOP_EOP(0)
  175 #define RSPQ_EOP                 G_RSPD_SOP_EOP(F_RSPD_EOP)
  176 #define RSPQ_SOP                 G_RSPD_SOP_EOP(F_RSPD_SOP)
  177 #define RSPQ_SOP_EOP             G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
  178 
  179 struct tx_sw_desc {                /* SW state per Tx descriptor */
  180         struct mbuf     *m;
  181         bus_dmamap_t    map;
  182         int             flags;
  183 };
  184 
  185 struct rx_sw_desc {                /* SW state per Rx descriptor */
  186         caddr_t         rxsd_cl;
  187         struct mbuf     *m;
  188         bus_dmamap_t    map;
  189         int             flags;
  190 };
  191 
  192 struct txq_state {
  193         unsigned int    compl;
  194         unsigned int    gen;
  195         unsigned int    pidx;
  196 };
  197 
  198 struct refill_fl_cb_arg {
  199         int               error;
  200         bus_dma_segment_t seg;
  201         int               nseg;
  202 };
  203 
  204 
  205 /*
  206  * Maps a number of flits to the number of Tx descriptors that can hold them.
  207  * The formula is
  208  *
  209  * desc = 1 + (flits - 2) / (WR_FLITS - 1).
  210  *
  211  * HW allows up to 4 descriptors to be combined into a WR.
  212  */
  213 static uint8_t flit_desc_map[] = {
  214         0,
  215 #if SGE_NUM_GENBITS == 1
  216         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  217         2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  218         3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
  219         4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
  220 #elif SGE_NUM_GENBITS == 2
  221         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  222         2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  223         3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
  224         4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  225 #else
  226 # error "SGE_NUM_GENBITS must be 1 or 2"
  227 #endif
  228 };
  229 
  230 #define TXQ_LOCK_ASSERT(qs)     mtx_assert(&(qs)->lock, MA_OWNED)
  231 #define TXQ_TRYLOCK(qs)         mtx_trylock(&(qs)->lock)        
  232 #define TXQ_LOCK(qs)            mtx_lock(&(qs)->lock)   
  233 #define TXQ_UNLOCK(qs)          mtx_unlock(&(qs)->lock) 
  234 #define TXQ_RING_EMPTY(qs)      drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
  235 #define TXQ_RING_NEEDS_ENQUEUE(qs)                                      \
  236         drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
  237 #define TXQ_RING_FLUSH(qs)      drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
  238 #define TXQ_RING_DEQUEUE_COND(qs, func, arg)                            \
  239         drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
  240 #define TXQ_RING_DEQUEUE(qs) \
  241         drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
  242 
  243 int cxgb_debug = 0;
  244 
  245 static void sge_timer_cb(void *arg);
  246 static void sge_timer_reclaim(void *arg, int ncount);
  247 static void sge_txq_reclaim_handler(void *arg, int ncount);
  248 static void cxgb_start_locked(struct sge_qset *qs);
  249 
  250 /*
  251  * XXX need to cope with bursty scheduling by looking at a wider
  252  * window than we are now for determining the need for coalescing
  253  *
  254  */
  255 static __inline uint64_t
  256 check_pkt_coalesce(struct sge_qset *qs) 
  257 { 
  258         struct adapter *sc; 
  259         struct sge_txq *txq; 
  260         uint8_t *fill;
  261 
  262         if (__predict_false(cxgb_tx_coalesce_force))
  263                 return (1);
  264         txq = &qs->txq[TXQ_ETH]; 
  265         sc = qs->port->adapter; 
  266         fill = &sc->tunq_fill[qs->idx];
  267 
  268         if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
  269                 cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
  270         if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
  271                 cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
  272         /*
  273          * if the hardware transmit queue is more than 1/8 full
  274          * we mark it as coalescing - we drop back from coalescing
  275          * when we go below 1/32 full and there are no packets enqueued, 
  276          * this provides us with some degree of hysteresis
  277          */
  278         if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
  279             TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
  280                 *fill = 0; 
  281         else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
  282                 *fill = 1; 
  283 
  284         return (sc->tunq_coalesce);
  285 } 
  286 
  287 #ifdef __LP64__
  288 static void
  289 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
  290 {
  291         uint64_t wr_hilo;
  292 #if _BYTE_ORDER == _LITTLE_ENDIAN
  293         wr_hilo = wr_hi;
  294         wr_hilo |= (((uint64_t)wr_lo)<<32);
  295 #else
  296         wr_hilo = wr_lo;
  297         wr_hilo |= (((uint64_t)wr_hi)<<32);
  298 #endif  
  299         wrp->wrh_hilo = wr_hilo;
  300 }
  301 #else
  302 static void
  303 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
  304 {
  305 
  306         wrp->wrh_hi = wr_hi;
  307         wmb();
  308         wrp->wrh_lo = wr_lo;
  309 }
  310 #endif
  311 
  312 struct coalesce_info {
  313         int count;
  314         int nbytes;
  315 };
  316 
  317 static int
  318 coalesce_check(struct mbuf *m, void *arg)
  319 {
  320         struct coalesce_info *ci = arg;
  321         int *count = &ci->count;
  322         int *nbytes = &ci->nbytes;
  323 
  324         if ((*nbytes == 0) || ((*nbytes + m->m_len <= 10500) &&
  325                 (*count < 7) && (m->m_next == NULL))) {
  326                 *count += 1;
  327                 *nbytes += m->m_len;
  328                 return (1);
  329         }
  330         return (0);
  331 }
  332 
  333 static struct mbuf *
  334 cxgb_dequeue(struct sge_qset *qs)
  335 {
  336         struct mbuf *m, *m_head, *m_tail;
  337         struct coalesce_info ci;
  338 
  339         
  340         if (check_pkt_coalesce(qs) == 0) 
  341                 return TXQ_RING_DEQUEUE(qs);
  342 
  343         m_head = m_tail = NULL;
  344         ci.count = ci.nbytes = 0;
  345         do {
  346                 m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
  347                 if (m_head == NULL) {
  348                         m_tail = m_head = m;
  349                 } else if (m != NULL) {
  350                         m_tail->m_nextpkt = m;
  351                         m_tail = m;
  352                 }
  353         } while (m != NULL);
  354         if (ci.count > 7)
  355                 panic("trying to coalesce %d packets in to one WR", ci.count);
  356         return (m_head);
  357 }
  358         
  359 /**
  360  *      reclaim_completed_tx - reclaims completed Tx descriptors
  361  *      @adapter: the adapter
  362  *      @q: the Tx queue to reclaim completed descriptors from
  363  *
  364  *      Reclaims Tx descriptors that the SGE has indicated it has processed,
  365  *      and frees the associated buffers if possible.  Called with the Tx
  366  *      queue's lock held.
  367  */
  368 static __inline int
  369 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
  370 {
  371         struct sge_txq *q = &qs->txq[queue];
  372         int reclaim = desc_reclaimable(q);
  373 
  374         if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) ||
  375             (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN))
  376                 cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
  377 
  378         if (reclaim < reclaim_min)
  379                 return (0);
  380 
  381         mtx_assert(&qs->lock, MA_OWNED);
  382         if (reclaim > 0) {
  383                 t3_free_tx_desc(qs, reclaim, queue);
  384                 q->cleaned += reclaim;
  385                 q->in_use -= reclaim;
  386         }
  387         if (isset(&qs->txq_stopped, TXQ_ETH))
  388                 clrbit(&qs->txq_stopped, TXQ_ETH);
  389 
  390         return (reclaim);
  391 }
  392 
  393 /**
  394  *      should_restart_tx - are there enough resources to restart a Tx queue?
  395  *      @q: the Tx queue
  396  *
  397  *      Checks if there are enough descriptors to restart a suspended Tx queue.
  398  */
  399 static __inline int
  400 should_restart_tx(const struct sge_txq *q)
  401 {
  402         unsigned int r = q->processed - q->cleaned;
  403 
  404         return q->in_use - r < (q->size >> 1);
  405 }
  406 
  407 /**
  408  *      t3_sge_init - initialize SGE
  409  *      @adap: the adapter
  410  *      @p: the SGE parameters
  411  *
  412  *      Performs SGE initialization needed every time after a chip reset.
  413  *      We do not initialize any of the queue sets here, instead the driver
  414  *      top-level must request those individually.  We also do not enable DMA
  415  *      here, that should be done after the queues have been set up.
  416  */
  417 void
  418 t3_sge_init(adapter_t *adap, struct sge_params *p)
  419 {
  420         u_int ctrl, ups;
  421 
  422         ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
  423 
  424         ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
  425                F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
  426                V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
  427                V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
  428 #if SGE_NUM_GENBITS == 1
  429         ctrl |= F_EGRGENCTRL;
  430 #endif
  431         if (adap->params.rev > 0) {
  432                 if (!(adap->flags & (USING_MSIX | USING_MSI)))
  433                         ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
  434         }
  435         t3_write_reg(adap, A_SG_CONTROL, ctrl);
  436         t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
  437                      V_LORCQDRBTHRSH(512));
  438         t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
  439         t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
  440                      V_TIMEOUT(200 * core_ticks_per_usec(adap)));
  441         t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
  442                      adap->params.rev < T3_REV_C ? 1000 : 500);
  443         t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
  444         t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
  445         t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
  446         t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
  447         t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
  448 }
  449 
  450 
  451 /**
  452  *      sgl_len - calculates the size of an SGL of the given capacity
  453  *      @n: the number of SGL entries
  454  *
  455  *      Calculates the number of flits needed for a scatter/gather list that
  456  *      can hold the given number of entries.
  457  */
  458 static __inline unsigned int
  459 sgl_len(unsigned int n)
  460 {
  461         return ((3 * n) / 2 + (n & 1));
  462 }
  463 
  464 /**
  465  *      get_imm_packet - return the next ingress packet buffer from a response
  466  *      @resp: the response descriptor containing the packet data
  467  *
  468  *      Return a packet containing the immediate data of the given response.
  469  */
  470 static int
  471 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
  472 {
  473 
  474         if (resp->rss_hdr.opcode == CPL_RX_DATA) {
  475                 const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
  476                 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
  477         } else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
  478                 const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
  479                 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
  480         } else
  481                 m->m_len = IMMED_PKT_SIZE;
  482         m->m_ext.ext_buf = NULL;
  483         m->m_ext.ext_type = 0;
  484         memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len); 
  485         return (0);     
  486 }
  487 
  488 static __inline u_int
  489 flits_to_desc(u_int n)
  490 {
  491         return (flit_desc_map[n]);
  492 }
  493 
  494 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
  495                     F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
  496                     V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
  497                     F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
  498                     F_HIRCQPARITYERROR)
  499 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
  500 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
  501                       F_RSPQDISABLED)
  502 
  503 /**
  504  *      t3_sge_err_intr_handler - SGE async event interrupt handler
  505  *      @adapter: the adapter
  506  *
  507  *      Interrupt handler for SGE asynchronous (non-data) events.
  508  */
  509 void
  510 t3_sge_err_intr_handler(adapter_t *adapter)
  511 {
  512         unsigned int v, status;
  513 
  514         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
  515         if (status & SGE_PARERR)
  516                 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
  517                          status & SGE_PARERR);
  518         if (status & SGE_FRAMINGERR)
  519                 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
  520                          status & SGE_FRAMINGERR);
  521         if (status & F_RSPQCREDITOVERFOW)
  522                 CH_ALERT(adapter, "SGE response queue credit overflow\n");
  523 
  524         if (status & F_RSPQDISABLED) {
  525                 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
  526 
  527                 CH_ALERT(adapter,
  528                          "packet delivered to disabled response queue (0x%x)\n",
  529                          (v >> S_RSPQ0DISABLED) & 0xff);
  530         }
  531 
  532         t3_write_reg(adapter, A_SG_INT_CAUSE, status);
  533         if (status & SGE_FATALERR)
  534                 t3_fatal_err(adapter);
  535 }
  536 
  537 void
  538 t3_sge_prep(adapter_t *adap, struct sge_params *p)
  539 {
  540         int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
  541 
  542         nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
  543         nqsets *= adap->params.nports;
  544 
  545         fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
  546 
  547         while (!powerof2(fl_q_size))
  548                 fl_q_size--;
  549 
  550         use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
  551             is_offload(adap);
  552 
  553 #if __FreeBSD_version >= 700111
  554         if (use_16k) {
  555                 jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
  556                 jumbo_buf_size = MJUM16BYTES;
  557         } else {
  558                 jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
  559                 jumbo_buf_size = MJUM9BYTES;
  560         }
  561 #else
  562         jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
  563         jumbo_buf_size = MJUMPAGESIZE;
  564 #endif
  565         while (!powerof2(jumbo_q_size))
  566                 jumbo_q_size--;
  567 
  568         if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
  569                 device_printf(adap->dev,
  570                     "Insufficient clusters and/or jumbo buffers.\n");
  571 
  572         p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
  573 
  574         for (i = 0; i < SGE_QSETS; ++i) {
  575                 struct qset_params *q = p->qset + i;
  576 
  577                 if (adap->params.nports > 2) {
  578                         q->coalesce_usecs = 50;
  579                 } else {
  580 #ifdef INVARIANTS                       
  581                         q->coalesce_usecs = 10;
  582 #else
  583                         q->coalesce_usecs = 5;
  584 #endif                  
  585                 }
  586                 q->polling = 0;
  587                 q->rspq_size = RSPQ_Q_SIZE;
  588                 q->fl_size = fl_q_size;
  589                 q->jumbo_size = jumbo_q_size;
  590                 q->jumbo_buf_size = jumbo_buf_size;
  591                 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
  592                 q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
  593                 q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
  594                 q->cong_thres = 0;
  595         }
  596 }
  597 
  598 int
  599 t3_sge_alloc(adapter_t *sc)
  600 {
  601 
  602         /* The parent tag. */
  603         if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
  604                                 1, 0,                   /* algnmnt, boundary */
  605                                 BUS_SPACE_MAXADDR,      /* lowaddr */
  606                                 BUS_SPACE_MAXADDR,      /* highaddr */
  607                                 NULL, NULL,             /* filter, filterarg */
  608                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
  609                                 BUS_SPACE_UNRESTRICTED, /* nsegments */
  610                                 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
  611                                 0,                      /* flags */
  612                                 NULL, NULL,             /* lock, lockarg */
  613                                 &sc->parent_dmat)) {
  614                 device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
  615                 return (ENOMEM);
  616         }
  617 
  618         /*
  619          * DMA tag for normal sized RX frames
  620          */
  621         if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
  622                 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
  623                 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
  624                 device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
  625                 return (ENOMEM);
  626         }
  627 
  628         /* 
  629          * DMA tag for jumbo sized RX frames.
  630          */
  631         if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
  632                 BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
  633                 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
  634                 device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
  635                 return (ENOMEM);
  636         }
  637 
  638         /* 
  639          * DMA tag for TX frames.
  640          */
  641         if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
  642                 BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
  643                 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
  644                 NULL, NULL, &sc->tx_dmat)) {
  645                 device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
  646                 return (ENOMEM);
  647         }
  648 
  649         return (0);
  650 }
  651 
  652 int
  653 t3_sge_free(struct adapter * sc)
  654 {
  655 
  656         if (sc->tx_dmat != NULL)
  657                 bus_dma_tag_destroy(sc->tx_dmat);
  658 
  659         if (sc->rx_jumbo_dmat != NULL)
  660                 bus_dma_tag_destroy(sc->rx_jumbo_dmat);
  661 
  662         if (sc->rx_dmat != NULL)
  663                 bus_dma_tag_destroy(sc->rx_dmat);
  664 
  665         if (sc->parent_dmat != NULL)
  666                 bus_dma_tag_destroy(sc->parent_dmat);
  667 
  668         return (0);
  669 }
  670 
  671 void
  672 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  673 {
  674 
  675         qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
  676         qs->rspq.polling = 0 /* p->polling */;
  677 }
  678 
  679 #if !defined(__i386__) && !defined(__amd64__)
  680 static void
  681 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  682 {
  683         struct refill_fl_cb_arg *cb_arg = arg;
  684         
  685         cb_arg->error = error;
  686         cb_arg->seg = segs[0];
  687         cb_arg->nseg = nseg;
  688 
  689 }
  690 #endif
  691 /**
  692  *      refill_fl - refill an SGE free-buffer list
  693  *      @sc: the controller softc
  694  *      @q: the free-list to refill
  695  *      @n: the number of new buffers to allocate
  696  *
  697  *      (Re)populate an SGE free-buffer list with up to @n new packet buffers.
  698  *      The caller must assure that @n does not exceed the queue's capacity.
  699  */
  700 static void
  701 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
  702 {
  703         struct rx_sw_desc *sd = &q->sdesc[q->pidx];
  704         struct rx_desc *d = &q->desc[q->pidx];
  705         struct refill_fl_cb_arg cb_arg;
  706         struct mbuf *m;
  707         caddr_t cl;
  708         int err;
  709         
  710         cb_arg.error = 0;
  711         while (n--) {
  712                 /*
  713                  * We allocate an uninitialized mbuf + cluster, mbuf is
  714                  * initialized after rx.
  715                  */
  716                 if (q->zone == zone_pack) {
  717                         if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
  718                                 break;
  719                         cl = m->m_ext.ext_buf;                  
  720                 } else {
  721                         if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
  722                                 break;
  723                         if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
  724                                 uma_zfree(q->zone, cl);
  725                                 break;
  726                         }
  727                 }
  728                 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
  729                         if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
  730                                 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
  731                                 uma_zfree(q->zone, cl);
  732                                 goto done;
  733                         }
  734                         sd->flags |= RX_SW_DESC_MAP_CREATED;
  735                 }
  736 #if !defined(__i386__) && !defined(__amd64__)
  737                 err = bus_dmamap_load(q->entry_tag, sd->map,
  738                     cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
  739                 
  740                 if (err != 0 || cb_arg.error) {
  741                         if (q->zone != zone_pack)
  742                                 uma_zfree(q->zone, cl);
  743                         m_free(m);
  744                         goto done;
  745                 }
  746 #else
  747                 cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
  748 #endif          
  749                 sd->flags |= RX_SW_DESC_INUSE;
  750                 sd->rxsd_cl = cl;
  751                 sd->m = m;
  752                 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
  753                 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
  754                 d->len_gen = htobe32(V_FLD_GEN1(q->gen));
  755                 d->gen2 = htobe32(V_FLD_GEN2(q->gen));
  756 
  757                 d++;
  758                 sd++;
  759 
  760                 if (++q->pidx == q->size) {
  761                         q->pidx = 0;
  762                         q->gen ^= 1;
  763                         sd = q->sdesc;
  764                         d = q->desc;
  765                 }
  766                 q->credits++;
  767                 q->db_pending++;
  768         }
  769 
  770 done:
  771         if (q->db_pending >= 32) {
  772                 q->db_pending = 0;
  773                 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
  774         }
  775 }
  776 
  777 
  778 /**
  779  *      free_rx_bufs - free the Rx buffers on an SGE free list
  780  *      @sc: the controle softc
  781  *      @q: the SGE free list to clean up
  782  *
  783  *      Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
  784  *      this queue should be stopped before calling this function.
  785  */
  786 static void
  787 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
  788 {
  789         u_int cidx = q->cidx;
  790 
  791         while (q->credits--) {
  792                 struct rx_sw_desc *d = &q->sdesc[cidx];
  793 
  794                 if (d->flags & RX_SW_DESC_INUSE) {
  795                         bus_dmamap_unload(q->entry_tag, d->map);
  796                         bus_dmamap_destroy(q->entry_tag, d->map);
  797                         if (q->zone == zone_pack) {
  798                                 m_init(d->m, M_NOWAIT, MT_DATA, M_EXT);
  799                                 uma_zfree(zone_pack, d->m);
  800                         } else {
  801                                 m_init(d->m, M_NOWAIT, MT_DATA, 0);
  802                                 uma_zfree(zone_mbuf, d->m);
  803                                 uma_zfree(q->zone, d->rxsd_cl);
  804                         }                       
  805                 }
  806                 
  807                 d->rxsd_cl = NULL;
  808                 d->m = NULL;
  809                 if (++cidx == q->size)
  810                         cidx = 0;
  811         }
  812 }
  813 
  814 static __inline void
  815 __refill_fl(adapter_t *adap, struct sge_fl *fl)
  816 {
  817         refill_fl(adap, fl, min(16U, fl->size - fl->credits));
  818 }
  819 
  820 static __inline void
  821 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
  822 {
  823         uint32_t reclaimable = fl->size - fl->credits;
  824 
  825         if (reclaimable > 0)
  826                 refill_fl(adap, fl, min(max, reclaimable));
  827 }
  828 
  829 /**
  830  *      recycle_rx_buf - recycle a receive buffer
  831  *      @adapter: the adapter
  832  *      @q: the SGE free list
  833  *      @idx: index of buffer to recycle
  834  *
  835  *      Recycles the specified buffer on the given free list by adding it at
  836  *      the next available slot on the list.
  837  */
  838 static void
  839 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
  840 {
  841         struct rx_desc *from = &q->desc[idx];
  842         struct rx_desc *to   = &q->desc[q->pidx];
  843 
  844         q->sdesc[q->pidx] = q->sdesc[idx];
  845         to->addr_lo = from->addr_lo;        // already big endian
  846         to->addr_hi = from->addr_hi;        // likewise
  847         wmb();  /* necessary ? */
  848         to->len_gen = htobe32(V_FLD_GEN1(q->gen));
  849         to->gen2 = htobe32(V_FLD_GEN2(q->gen));
  850         q->credits++;
  851 
  852         if (++q->pidx == q->size) {
  853                 q->pidx = 0;
  854                 q->gen ^= 1;
  855         }
  856         t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
  857 }
  858 
  859 static void
  860 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  861 {
  862         uint32_t *addr;
  863 
  864         addr = arg;
  865         *addr = segs[0].ds_addr;
  866 }
  867 
  868 static int
  869 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
  870     bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
  871     bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
  872 {
  873         size_t len = nelem * elem_size;
  874         void *s = NULL;
  875         void *p = NULL;
  876         int err;
  877 
  878         if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
  879                                       BUS_SPACE_MAXADDR_32BIT,
  880                                       BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
  881                                       len, 0, NULL, NULL, tag)) != 0) {
  882                 device_printf(sc->dev, "Cannot allocate descriptor tag\n");
  883                 return (ENOMEM);
  884         }
  885 
  886         if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
  887                                     map)) != 0) {
  888                 device_printf(sc->dev, "Cannot allocate descriptor memory\n");
  889                 return (ENOMEM);
  890         }
  891 
  892         bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
  893         bzero(p, len);
  894         *(void **)desc = p;
  895 
  896         if (sw_size) {
  897                 len = nelem * sw_size;
  898                 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
  899                 *(void **)sdesc = s;
  900         }
  901         if (parent_entry_tag == NULL)
  902                 return (0);
  903             
  904         if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
  905                                       BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
  906                                       NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
  907                                       TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
  908                                       NULL, NULL, entry_tag)) != 0) {
  909                 device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
  910                 return (ENOMEM);
  911         }
  912         return (0);
  913 }
  914 
  915 static void
  916 sge_slow_intr_handler(void *arg, int ncount)
  917 {
  918         adapter_t *sc = arg;
  919 
  920         t3_slow_intr_handler(sc);
  921         t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
  922         (void) t3_read_reg(sc, A_PL_INT_ENABLE0);
  923 }
  924 
  925 /**
  926  *      sge_timer_cb - perform periodic maintenance of an SGE qset
  927  *      @data: the SGE queue set to maintain
  928  *
  929  *      Runs periodically from a timer to perform maintenance of an SGE queue
  930  *      set.  It performs two tasks:
  931  *
  932  *      a) Cleans up any completed Tx descriptors that may still be pending.
  933  *      Normal descriptor cleanup happens when new packets are added to a Tx
  934  *      queue so this timer is relatively infrequent and does any cleanup only
  935  *      if the Tx queue has not seen any new packets in a while.  We make a
  936  *      best effort attempt to reclaim descriptors, in that we don't wait
  937  *      around if we cannot get a queue's lock (which most likely is because
  938  *      someone else is queueing new packets and so will also handle the clean
  939  *      up).  Since control queues use immediate data exclusively we don't
  940  *      bother cleaning them up here.
  941  *
  942  *      b) Replenishes Rx queues that have run out due to memory shortage.
  943  *      Normally new Rx buffers are added when existing ones are consumed but
  944  *      when out of memory a queue can become empty.  We try to add only a few
  945  *      buffers here, the queue will be replenished fully as these new buffers
  946  *      are used up if memory shortage has subsided.
  947  *      
  948  *      c) Return coalesced response queue credits in case a response queue is
  949  *      starved.
  950  *
  951  *      d) Ring doorbells for T304 tunnel queues since we have seen doorbell 
  952  *      fifo overflows and the FW doesn't implement any recovery scheme yet.
  953  */
  954 static void
  955 sge_timer_cb(void *arg)
  956 {
  957         adapter_t *sc = arg;
  958         if ((sc->flags & USING_MSIX) == 0) {
  959                 
  960                 struct port_info *pi;
  961                 struct sge_qset *qs;
  962                 struct sge_txq  *txq;
  963                 int i, j;
  964                 int reclaim_ofl, refill_rx;
  965 
  966                 if (sc->open_device_map == 0) 
  967                         return;
  968 
  969                 for (i = 0; i < sc->params.nports; i++) {
  970                         pi = &sc->port[i];
  971                         for (j = 0; j < pi->nqsets; j++) {
  972                                 qs = &sc->sge.qs[pi->first_qset + j];
  973                                 txq = &qs->txq[0];
  974                                 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
  975                                 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) || 
  976                                     (qs->fl[1].credits < qs->fl[1].size));
  977                                 if (reclaim_ofl || refill_rx) {
  978                                         taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
  979                                         break;
  980                                 }
  981                         }
  982                 }
  983         }
  984         
  985         if (sc->params.nports > 2) {
  986                 int i;
  987 
  988                 for_each_port(sc, i) {
  989                         struct port_info *pi = &sc->port[i];
  990 
  991                         t3_write_reg(sc, A_SG_KDOORBELL, 
  992                                      F_SELEGRCNTX | 
  993                                      (FW_TUNNEL_SGEEC_START + pi->first_qset));
  994                 }
  995         }       
  996         if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
  997             sc->open_device_map != 0)
  998                 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
  999 }
 1000 
 1001 /*
 1002  * This is meant to be a catch-all function to keep sge state private
 1003  * to sge.c
 1004  *
 1005  */
 1006 int
 1007 t3_sge_init_adapter(adapter_t *sc)
 1008 {
 1009         callout_init(&sc->sge_timer_ch, 1);
 1010         callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
 1011         TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
 1012         return (0);
 1013 }
 1014 
 1015 int
 1016 t3_sge_reset_adapter(adapter_t *sc)
 1017 {
 1018         callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
 1019         return (0);
 1020 }
 1021 
 1022 int
 1023 t3_sge_init_port(struct port_info *pi)
 1024 {
 1025         TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
 1026         return (0);
 1027 }
 1028 
 1029 /**
 1030  *      refill_rspq - replenish an SGE response queue
 1031  *      @adapter: the adapter
 1032  *      @q: the response queue to replenish
 1033  *      @credits: how many new responses to make available
 1034  *
 1035  *      Replenishes a response queue by making the supplied number of responses
 1036  *      available to HW.
 1037  */
 1038 static __inline void
 1039 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
 1040 {
 1041 
 1042         /* mbufs are allocated on demand when a rspq entry is processed. */
 1043         t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
 1044                      V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
 1045 }
 1046 
 1047 static void
 1048 sge_txq_reclaim_handler(void *arg, int ncount)
 1049 {
 1050         struct sge_qset *qs = arg;
 1051         int i;
 1052 
 1053         for (i = 0; i < 3; i++)
 1054                 reclaim_completed_tx(qs, 16, i);
 1055 }
 1056 
 1057 static void
 1058 sge_timer_reclaim(void *arg, int ncount)
 1059 {
 1060         struct port_info *pi = arg;
 1061         int i, nqsets = pi->nqsets;
 1062         adapter_t *sc = pi->adapter;
 1063         struct sge_qset *qs;
 1064         struct mtx *lock;
 1065         
 1066         KASSERT((sc->flags & USING_MSIX) == 0,
 1067             ("can't call timer reclaim for msi-x"));
 1068 
 1069         for (i = 0; i < nqsets; i++) {
 1070                 qs = &sc->sge.qs[pi->first_qset + i];
 1071 
 1072                 reclaim_completed_tx(qs, 16, TXQ_OFLD);
 1073                 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
 1074                             &sc->sge.qs[0].rspq.lock;
 1075 
 1076                 if (mtx_trylock(lock)) {
 1077                         /* XXX currently assume that we are *NOT* polling */
 1078                         uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
 1079 
 1080                         if (qs->fl[0].credits < qs->fl[0].size - 16)
 1081                                 __refill_fl(sc, &qs->fl[0]);
 1082                         if (qs->fl[1].credits < qs->fl[1].size - 16)
 1083                                 __refill_fl(sc, &qs->fl[1]);
 1084                         
 1085                         if (status & (1 << qs->rspq.cntxt_id)) {
 1086                                 if (qs->rspq.credits) {
 1087                                         refill_rspq(sc, &qs->rspq, 1);
 1088                                         qs->rspq.credits--;
 1089                                         t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, 
 1090                                             1 << qs->rspq.cntxt_id);
 1091                                 }
 1092                         }
 1093                         mtx_unlock(lock);
 1094                 }
 1095         }
 1096 }
 1097 
 1098 /**
 1099  *      init_qset_cntxt - initialize an SGE queue set context info
 1100  *      @qs: the queue set
 1101  *      @id: the queue set id
 1102  *
 1103  *      Initializes the TIDs and context ids for the queues of a queue set.
 1104  */
 1105 static void
 1106 init_qset_cntxt(struct sge_qset *qs, u_int id)
 1107 {
 1108 
 1109         qs->rspq.cntxt_id = id;
 1110         qs->fl[0].cntxt_id = 2 * id;
 1111         qs->fl[1].cntxt_id = 2 * id + 1;
 1112         qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
 1113         qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
 1114         qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
 1115         qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
 1116         qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
 1117 
 1118         /* XXX: a sane limit is needed instead of INT_MAX */
 1119         mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX);
 1120         mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX);
 1121         mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX);
 1122 }
 1123 
 1124 
 1125 static void
 1126 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
 1127 {
 1128         txq->in_use += ndesc;
 1129         /*
 1130          * XXX we don't handle stopping of queue
 1131          * presumably start handles this when we bump against the end
 1132          */
 1133         txqs->gen = txq->gen;
 1134         txq->unacked += ndesc;
 1135         txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
 1136         txq->unacked &= 31;
 1137         txqs->pidx = txq->pidx;
 1138         txq->pidx += ndesc;
 1139 #ifdef INVARIANTS
 1140         if (((txqs->pidx > txq->cidx) &&
 1141                 (txq->pidx < txqs->pidx) &&
 1142                 (txq->pidx >= txq->cidx)) ||
 1143             ((txqs->pidx < txq->cidx) &&
 1144                 (txq->pidx >= txq-> cidx)) ||
 1145             ((txqs->pidx < txq->cidx) &&
 1146                 (txq->cidx < txqs->pidx)))
 1147                 panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
 1148                     txqs->pidx, txq->pidx, txq->cidx);
 1149 #endif
 1150         if (txq->pidx >= txq->size) {
 1151                 txq->pidx -= txq->size;
 1152                 txq->gen ^= 1;
 1153         }
 1154 
 1155 }
 1156 
 1157 /**
 1158  *      calc_tx_descs - calculate the number of Tx descriptors for a packet
 1159  *      @m: the packet mbufs
 1160  *      @nsegs: the number of segments 
 1161  *
 1162  *      Returns the number of Tx descriptors needed for the given Ethernet
 1163  *      packet.  Ethernet packets require addition of WR and CPL headers.
 1164  */
 1165 static __inline unsigned int
 1166 calc_tx_descs(const struct mbuf *m, int nsegs)
 1167 {
 1168         unsigned int flits;
 1169 
 1170         if (m->m_pkthdr.len <= PIO_LEN)
 1171                 return 1;
 1172 
 1173         flits = sgl_len(nsegs) + 2;
 1174         if (m->m_pkthdr.csum_flags & CSUM_TSO)
 1175                 flits++;
 1176 
 1177         return flits_to_desc(flits);
 1178 }
 1179 
 1180 /**
 1181  *      make_sgl - populate a scatter/gather list for a packet
 1182  *      @sgp: the SGL to populate
 1183  *      @segs: the packet dma segments
 1184  *      @nsegs: the number of segments
 1185  *
 1186  *      Generates a scatter/gather list for the buffers that make up a packet
 1187  *      and returns the SGL size in 8-byte words.  The caller must size the SGL
 1188  *      appropriately.
 1189  */
 1190 static __inline void
 1191 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
 1192 {
 1193         int i, idx;
 1194         
 1195         for (idx = 0, i = 0; i < nsegs; i++) {
 1196                 /*
 1197                  * firmware doesn't like empty segments
 1198                  */
 1199                 if (segs[i].ds_len == 0)
 1200                         continue;
 1201                 if (i && idx == 0) 
 1202                         ++sgp;
 1203                 
 1204                 sgp->len[idx] = htobe32(segs[i].ds_len);
 1205                 sgp->addr[idx] = htobe64(segs[i].ds_addr);
 1206                 idx ^= 1;
 1207         }
 1208         
 1209         if (idx) {
 1210                 sgp->len[idx] = 0;
 1211                 sgp->addr[idx] = 0;
 1212         }
 1213 }
 1214         
 1215 /**
 1216  *      check_ring_tx_db - check and potentially ring a Tx queue's doorbell
 1217  *      @adap: the adapter
 1218  *      @q: the Tx queue
 1219  *
 1220  *      Ring the doorbell if a Tx queue is asleep.  There is a natural race,
 1221  *      where the HW is going to sleep just after we checked, however,
 1222  *      then the interrupt handler will detect the outstanding TX packet
 1223  *      and ring the doorbell for us.
 1224  *
 1225  *      When GTS is disabled we unconditionally ring the doorbell.
 1226  */
 1227 static __inline void
 1228 check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
 1229 {
 1230 #if USE_GTS
 1231         clear_bit(TXQ_LAST_PKT_DB, &q->flags);
 1232         if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
 1233                 set_bit(TXQ_LAST_PKT_DB, &q->flags);
 1234 #ifdef T3_TRACE
 1235                 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
 1236                           q->cntxt_id);
 1237 #endif
 1238                 t3_write_reg(adap, A_SG_KDOORBELL,
 1239                              F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 1240         }
 1241 #else
 1242         if (mustring || ++q->db_pending >= 32) {
 1243                 wmb();            /* write descriptors before telling HW */
 1244                 t3_write_reg(adap, A_SG_KDOORBELL,
 1245                     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 1246                 q->db_pending = 0;
 1247         }
 1248 #endif
 1249 }
 1250 
 1251 static __inline void
 1252 wr_gen2(struct tx_desc *d, unsigned int gen)
 1253 {
 1254 #if SGE_NUM_GENBITS == 2
 1255         d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
 1256 #endif
 1257 }
 1258 
 1259 /**
 1260  *      write_wr_hdr_sgl - write a WR header and, optionally, SGL
 1261  *      @ndesc: number of Tx descriptors spanned by the SGL
 1262  *      @txd: first Tx descriptor to be written
 1263  *      @txqs: txq state (generation and producer index)
 1264  *      @txq: the SGE Tx queue
 1265  *      @sgl: the SGL
 1266  *      @flits: number of flits to the start of the SGL in the first descriptor
 1267  *      @sgl_flits: the SGL size in flits
 1268  *      @wr_hi: top 32 bits of WR header based on WR type (big endian)
 1269  *      @wr_lo: low 32 bits of WR header based on WR type (big endian)
 1270  *
 1271  *      Write a work request header and an associated SGL.  If the SGL is
 1272  *      small enough to fit into one Tx descriptor it has already been written
 1273  *      and we just need to write the WR header.  Otherwise we distribute the
 1274  *      SGL across the number of descriptors it spans.
 1275  */
 1276 static void
 1277 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
 1278     const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
 1279     unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
 1280 {
 1281 
 1282         struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
 1283         struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
 1284         
 1285         if (__predict_true(ndesc == 1)) {
 1286                 set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
 1287                     V_WR_SGLSFLT(flits)) | wr_hi,
 1288                     htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
 1289                     wr_lo);
 1290 
 1291                 wr_gen2(txd, txqs->gen);
 1292                 
 1293         } else {
 1294                 unsigned int ogen = txqs->gen;
 1295                 const uint64_t *fp = (const uint64_t *)sgl;
 1296                 struct work_request_hdr *wp = wrp;
 1297                 
 1298                 wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
 1299                     V_WR_SGLSFLT(flits)) | wr_hi;
 1300                 
 1301                 while (sgl_flits) {
 1302                         unsigned int avail = WR_FLITS - flits;
 1303 
 1304                         if (avail > sgl_flits)
 1305                                 avail = sgl_flits;
 1306                         memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
 1307                         sgl_flits -= avail;
 1308                         ndesc--;
 1309                         if (!sgl_flits)
 1310                                 break;
 1311                         
 1312                         fp += avail;
 1313                         txd++;
 1314                         txsd++;
 1315                         if (++txqs->pidx == txq->size) {
 1316                                 txqs->pidx = 0;
 1317                                 txqs->gen ^= 1;
 1318                                 txd = txq->desc;
 1319                                 txsd = txq->sdesc;
 1320                         }
 1321 
 1322                         /*
 1323                          * when the head of the mbuf chain
 1324                          * is freed all clusters will be freed
 1325                          * with it
 1326                          */
 1327                         wrp = (struct work_request_hdr *)txd;
 1328                         wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
 1329                             V_WR_SGLSFLT(1)) | wr_hi;
 1330                         wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
 1331                                     sgl_flits + 1)) |
 1332                             V_WR_GEN(txqs->gen)) | wr_lo;
 1333                         wr_gen2(txd, txqs->gen);
 1334                         flits = 1;
 1335                 }
 1336                 wrp->wrh_hi |= htonl(F_WR_EOP);
 1337                 wmb();
 1338                 wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
 1339                 wr_gen2((struct tx_desc *)wp, ogen);
 1340         }
 1341 }
 1342 
 1343 /* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
 1344 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
 1345 
 1346 #define GET_VTAG(cntrl, m) \
 1347 do { \
 1348         if ((m)->m_flags & M_VLANTAG)                                               \
 1349                 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
 1350 } while (0)
 1351 
 1352 static int
 1353 t3_encap(struct sge_qset *qs, struct mbuf **m)
 1354 {
 1355         adapter_t *sc;
 1356         struct mbuf *m0;
 1357         struct sge_txq *txq;
 1358         struct txq_state txqs;
 1359         struct port_info *pi;
 1360         unsigned int ndesc, flits, cntrl, mlen;
 1361         int err, nsegs, tso_info = 0;
 1362 
 1363         struct work_request_hdr *wrp;
 1364         struct tx_sw_desc *txsd;
 1365         struct sg_ent *sgp, *sgl;
 1366         uint32_t wr_hi, wr_lo, sgl_flits; 
 1367         bus_dma_segment_t segs[TX_MAX_SEGS];
 1368 
 1369         struct tx_desc *txd;
 1370                 
 1371         pi = qs->port;
 1372         sc = pi->adapter;
 1373         txq = &qs->txq[TXQ_ETH];
 1374         txd = &txq->desc[txq->pidx];
 1375         txsd = &txq->sdesc[txq->pidx];
 1376         sgl = txq->txq_sgl;
 1377 
 1378         prefetch(txd);
 1379         m0 = *m;
 1380 
 1381         mtx_assert(&qs->lock, MA_OWNED);
 1382         cntrl = V_TXPKT_INTF(pi->txpkt_intf);
 1383         KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
 1384         
 1385         if  (m0->m_nextpkt == NULL && m0->m_next != NULL &&
 1386             m0->m_pkthdr.csum_flags & (CSUM_TSO))
 1387                 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
 1388 
 1389         if (m0->m_nextpkt != NULL) {
 1390                 busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
 1391                 ndesc = 1;
 1392                 mlen = 0;
 1393         } else {
 1394                 if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
 1395                     &m0, segs, &nsegs))) {
 1396                         if (cxgb_debug)
 1397                                 printf("failed ... err=%d\n", err);
 1398                         return (err);
 1399                 }
 1400                 mlen = m0->m_pkthdr.len;
 1401                 ndesc = calc_tx_descs(m0, nsegs);
 1402         }
 1403         txq_prod(txq, ndesc, &txqs);
 1404 
 1405         KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
 1406         txsd->m = m0;
 1407 
 1408         if (m0->m_nextpkt != NULL) {
 1409                 struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
 1410                 int i, fidx;
 1411 
 1412                 if (nsegs > 7)
 1413                         panic("trying to coalesce %d packets in to one WR", nsegs);
 1414                 txq->txq_coalesced += nsegs;
 1415                 wrp = (struct work_request_hdr *)txd;
 1416                 flits = nsegs*2 + 1;
 1417 
 1418                 for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
 1419                         struct cpl_tx_pkt_batch_entry *cbe;
 1420                         uint64_t flit;
 1421                         uint32_t *hflit = (uint32_t *)&flit;
 1422                         int cflags = m0->m_pkthdr.csum_flags;
 1423 
 1424                         cntrl = V_TXPKT_INTF(pi->txpkt_intf);
 1425                         GET_VTAG(cntrl, m0);
 1426                         cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
 1427                         if (__predict_false(!(cflags & CSUM_IP)))
 1428                                 cntrl |= F_TXPKT_IPCSUM_DIS;
 1429                         if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP |
 1430                             CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
 1431                                 cntrl |= F_TXPKT_L4CSUM_DIS;
 1432 
 1433                         hflit[0] = htonl(cntrl);
 1434                         hflit[1] = htonl(segs[i].ds_len | 0x80000000);
 1435                         flit |= htobe64(1 << 24);
 1436                         cbe = &cpl_batch->pkt_entry[i];
 1437                         cbe->cntrl = hflit[0];
 1438                         cbe->len = hflit[1];
 1439                         cbe->addr = htobe64(segs[i].ds_addr);
 1440                 }
 1441 
 1442                 wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
 1443                     V_WR_SGLSFLT(flits)) |
 1444                     htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
 1445                 wr_lo = htonl(V_WR_LEN(flits) |
 1446                     V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
 1447                 set_wr_hdr(wrp, wr_hi, wr_lo);
 1448                 wmb();
 1449                 ETHER_BPF_MTAP(pi->ifp, m0);
 1450                 wr_gen2(txd, txqs.gen);
 1451                 check_ring_tx_db(sc, txq, 0);
 1452                 return (0);             
 1453         } else if (tso_info) {
 1454                 uint16_t eth_type;
 1455                 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
 1456                 struct ether_header *eh;
 1457                 void *l3hdr;
 1458                 struct tcphdr *tcp;
 1459 
 1460                 txd->flit[2] = 0;
 1461                 GET_VTAG(cntrl, m0);
 1462                 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
 1463                 hdr->cntrl = htonl(cntrl);
 1464                 hdr->len = htonl(mlen | 0x80000000);
 1465 
 1466                 if (__predict_false(mlen < TCPPKTHDRSIZE)) {
 1467                         printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x",
 1468                             m0, mlen, m0->m_pkthdr.tso_segsz,
 1469                             (int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags);
 1470                         panic("tx tso packet too small");
 1471                 }
 1472 
 1473                 /* Make sure that ether, ip, tcp headers are all in m0 */
 1474                 if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
 1475                         m0 = m_pullup(m0, TCPPKTHDRSIZE);
 1476                         if (__predict_false(m0 == NULL)) {
 1477                                 /* XXX panic probably an overreaction */
 1478                                 panic("couldn't fit header into mbuf");
 1479                         }
 1480                 }
 1481 
 1482                 eh = mtod(m0, struct ether_header *);
 1483                 eth_type = eh->ether_type;
 1484                 if (eth_type == htons(ETHERTYPE_VLAN)) {
 1485                         struct ether_vlan_header *evh = (void *)eh;
 1486 
 1487                         tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
 1488                         l3hdr = evh + 1;
 1489                         eth_type = evh->evl_proto;
 1490                 } else {
 1491                         tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
 1492                         l3hdr = eh + 1;
 1493                 }
 1494 
 1495                 if (eth_type == htons(ETHERTYPE_IP)) {
 1496                         struct ip *ip = l3hdr;
 1497 
 1498                         tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
 1499                         tcp = (struct tcphdr *)(ip + 1);
 1500                 } else if (eth_type == htons(ETHERTYPE_IPV6)) {
 1501                         struct ip6_hdr *ip6 = l3hdr;
 1502 
 1503                         KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
 1504                             ("%s: CSUM_TSO with ip6_nxt %d",
 1505                             __func__, ip6->ip6_nxt));
 1506 
 1507                         tso_info |= F_LSO_IPV6;
 1508                         tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
 1509                         tcp = (struct tcphdr *)(ip6 + 1);
 1510                 } else
 1511                         panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
 1512 
 1513                 tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
 1514                 hdr->lso_info = htonl(tso_info);
 1515 
 1516                 if (__predict_false(mlen <= PIO_LEN)) {
 1517                         /*
 1518                          * pkt not undersized but fits in PIO_LEN
 1519                          * Indicates a TSO bug at the higher levels.
 1520                          */
 1521                         txsd->m = NULL;
 1522                         m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
 1523                         flits = (mlen + 7) / 8 + 3;
 1524                         wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
 1525                                           V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
 1526                                           F_WR_SOP | F_WR_EOP | txqs.compl);
 1527                         wr_lo = htonl(V_WR_LEN(flits) |
 1528                             V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
 1529                         set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
 1530                         wmb();
 1531                         ETHER_BPF_MTAP(pi->ifp, m0);
 1532                         wr_gen2(txd, txqs.gen);
 1533                         check_ring_tx_db(sc, txq, 0);
 1534                         m_freem(m0);
 1535                         return (0);
 1536                 }
 1537                 flits = 3;      
 1538         } else {
 1539                 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
 1540                 
 1541                 GET_VTAG(cntrl, m0);
 1542                 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
 1543                 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
 1544                         cntrl |= F_TXPKT_IPCSUM_DIS;
 1545                 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP |
 1546                     CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
 1547                         cntrl |= F_TXPKT_L4CSUM_DIS;
 1548                 cpl->cntrl = htonl(cntrl);
 1549                 cpl->len = htonl(mlen | 0x80000000);
 1550 
 1551                 if (mlen <= PIO_LEN) {
 1552                         txsd->m = NULL;
 1553                         m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
 1554                         flits = (mlen + 7) / 8 + 2;
 1555                         
 1556                         wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
 1557                             V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
 1558                                           F_WR_SOP | F_WR_EOP | txqs.compl);
 1559                         wr_lo = htonl(V_WR_LEN(flits) |
 1560                             V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
 1561                         set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
 1562                         wmb();
 1563                         ETHER_BPF_MTAP(pi->ifp, m0);
 1564                         wr_gen2(txd, txqs.gen);
 1565                         check_ring_tx_db(sc, txq, 0);
 1566                         m_freem(m0);
 1567                         return (0);
 1568                 }
 1569                 flits = 2;
 1570         }
 1571         wrp = (struct work_request_hdr *)txd;
 1572         sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
 1573         make_sgl(sgp, segs, nsegs);
 1574 
 1575         sgl_flits = sgl_len(nsegs);
 1576 
 1577         ETHER_BPF_MTAP(pi->ifp, m0);
 1578 
 1579         KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
 1580         wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
 1581         wr_lo = htonl(V_WR_TID(txq->token));
 1582         write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
 1583             sgl_flits, wr_hi, wr_lo);
 1584         check_ring_tx_db(sc, txq, 0);
 1585 
 1586         return (0);
 1587 }
 1588 
 1589 void
 1590 cxgb_tx_watchdog(void *arg)
 1591 {
 1592         struct sge_qset *qs = arg;
 1593         struct sge_txq *txq = &qs->txq[TXQ_ETH];
 1594 
 1595         if (qs->coalescing != 0 &&
 1596             (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
 1597             TXQ_RING_EMPTY(qs))
 1598                 qs->coalescing = 0; 
 1599         else if (qs->coalescing == 0 &&
 1600             (txq->in_use >= cxgb_tx_coalesce_enable_start))
 1601                 qs->coalescing = 1;
 1602         if (TXQ_TRYLOCK(qs)) {
 1603                 qs->qs_flags |= QS_FLUSHING;
 1604                 cxgb_start_locked(qs);
 1605                 qs->qs_flags &= ~QS_FLUSHING;
 1606                 TXQ_UNLOCK(qs);
 1607         }
 1608         if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
 1609                 callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
 1610                     qs, txq->txq_watchdog.c_cpu);
 1611 }
 1612 
 1613 static void
 1614 cxgb_tx_timeout(void *arg)
 1615 {
 1616         struct sge_qset *qs = arg;
 1617         struct sge_txq *txq = &qs->txq[TXQ_ETH];
 1618 
 1619         if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
 1620                 qs->coalescing = 1;     
 1621         if (TXQ_TRYLOCK(qs)) {
 1622                 qs->qs_flags |= QS_TIMEOUT;
 1623                 cxgb_start_locked(qs);
 1624                 qs->qs_flags &= ~QS_TIMEOUT;
 1625                 TXQ_UNLOCK(qs);
 1626         }
 1627 }
 1628 
 1629 static void
 1630 cxgb_start_locked(struct sge_qset *qs)
 1631 {
 1632         struct mbuf *m_head = NULL;
 1633         struct sge_txq *txq = &qs->txq[TXQ_ETH];
 1634         struct port_info *pi = qs->port;
 1635         struct ifnet *ifp = pi->ifp;
 1636 
 1637         if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
 1638                 reclaim_completed_tx(qs, 0, TXQ_ETH);
 1639 
 1640         if (!pi->link_config.link_ok) {
 1641                 TXQ_RING_FLUSH(qs);
 1642                 return;
 1643         }
 1644         TXQ_LOCK_ASSERT(qs);
 1645         while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
 1646             pi->link_config.link_ok) {
 1647                 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
 1648 
 1649                 if (txq->size - txq->in_use <= TX_MAX_DESC)
 1650                         break;
 1651 
 1652                 if ((m_head = cxgb_dequeue(qs)) == NULL)
 1653                         break;
 1654                 /*
 1655                  *  Encapsulation can modify our pointer, and or make it
 1656                  *  NULL on failure.  In that event, we can't requeue.
 1657                  */
 1658                 if (t3_encap(qs, &m_head) || m_head == NULL)
 1659                         break;
 1660 
 1661                 m_head = NULL;
 1662         }
 1663 
 1664         if (txq->db_pending)
 1665                 check_ring_tx_db(pi->adapter, txq, 1);
 1666 
 1667         if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
 1668             pi->link_config.link_ok)
 1669                 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
 1670                     qs, txq->txq_timer.c_cpu);
 1671         if (m_head != NULL)
 1672                 m_freem(m_head);
 1673 }
 1674 
 1675 static int
 1676 cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
 1677 {
 1678         struct port_info *pi = qs->port;
 1679         struct sge_txq *txq = &qs->txq[TXQ_ETH];
 1680         struct buf_ring *br = txq->txq_mr;
 1681         int error, avail;
 1682 
 1683         avail = txq->size - txq->in_use;
 1684         TXQ_LOCK_ASSERT(qs);
 1685 
 1686         /*
 1687          * We can only do a direct transmit if the following are true:
 1688          * - we aren't coalescing (ring < 3/4 full)
 1689          * - the link is up -- checked in caller
 1690          * - there are no packets enqueued already
 1691          * - there is space in hardware transmit queue 
 1692          */
 1693         if (check_pkt_coalesce(qs) == 0 &&
 1694             !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
 1695                 if (t3_encap(qs, &m)) {
 1696                         if (m != NULL &&
 1697                             (error = drbr_enqueue(ifp, br, m)) != 0) 
 1698                                 return (error);
 1699                 } else {
 1700                         if (txq->db_pending)
 1701                                 check_ring_tx_db(pi->adapter, txq, 1);
 1702 
 1703                         /*
 1704                          * We've bypassed the buf ring so we need to update
 1705                          * the stats directly
 1706                          */
 1707                         txq->txq_direct_packets++;
 1708                         txq->txq_direct_bytes += m->m_pkthdr.len;
 1709                 }
 1710         } else if ((error = drbr_enqueue(ifp, br, m)) != 0)
 1711                 return (error);
 1712 
 1713         reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
 1714         if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
 1715             (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
 1716                 cxgb_start_locked(qs);
 1717         else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
 1718                 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
 1719                     qs, txq->txq_timer.c_cpu);
 1720         return (0);
 1721 }
 1722 
 1723 int
 1724 cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
 1725 {
 1726         struct sge_qset *qs;
 1727         struct port_info *pi = ifp->if_softc;
 1728         int error, qidx = pi->first_qset;
 1729 
 1730         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
 1731             ||(!pi->link_config.link_ok)) {
 1732                 m_freem(m);
 1733                 return (0);
 1734         }
 1735 
 1736         /* check if flowid is set */
 1737         if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)       
 1738                 qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
 1739 
 1740         qs = &pi->adapter->sge.qs[qidx];
 1741         
 1742         if (TXQ_TRYLOCK(qs)) {
 1743                 /* XXX running */
 1744                 error = cxgb_transmit_locked(ifp, qs, m);
 1745                 TXQ_UNLOCK(qs);
 1746         } else
 1747                 error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
 1748         return (error);
 1749 }
 1750 
 1751 void
 1752 cxgb_qflush(struct ifnet *ifp)
 1753 {
 1754         /*
 1755          * flush any enqueued mbufs in the buf_rings
 1756          * and in the transmit queues
 1757          * no-op for now
 1758          */
 1759         return;
 1760 }
 1761 
 1762 /**
 1763  *      write_imm - write a packet into a Tx descriptor as immediate data
 1764  *      @d: the Tx descriptor to write
 1765  *      @m: the packet
 1766  *      @len: the length of packet data to write as immediate data
 1767  *      @gen: the generation bit value to write
 1768  *
 1769  *      Writes a packet as immediate data into a Tx descriptor.  The packet
 1770  *      contains a work request at its beginning.  We must write the packet
 1771  *      carefully so the SGE doesn't read accidentally before it's written in
 1772  *      its entirety.
 1773  */
 1774 static __inline void
 1775 write_imm(struct tx_desc *d, caddr_t src,
 1776           unsigned int len, unsigned int gen)
 1777 {
 1778         struct work_request_hdr *from = (struct work_request_hdr *)src;
 1779         struct work_request_hdr *to = (struct work_request_hdr *)d;
 1780         uint32_t wr_hi, wr_lo;
 1781 
 1782         KASSERT(len <= WR_LEN && len >= sizeof(*from),
 1783             ("%s: invalid len %d", __func__, len));
 1784         
 1785         memcpy(&to[1], &from[1], len - sizeof(*from));
 1786         wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
 1787             V_WR_BCNTLFLT(len & 7));
 1788         wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
 1789         set_wr_hdr(to, wr_hi, wr_lo);
 1790         wmb();
 1791         wr_gen2(d, gen);
 1792 }
 1793 
 1794 /**
 1795  *      check_desc_avail - check descriptor availability on a send queue
 1796  *      @adap: the adapter
 1797  *      @q: the TX queue
 1798  *      @m: the packet needing the descriptors
 1799  *      @ndesc: the number of Tx descriptors needed
 1800  *      @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
 1801  *
 1802  *      Checks if the requested number of Tx descriptors is available on an
 1803  *      SGE send queue.  If the queue is already suspended or not enough
 1804  *      descriptors are available the packet is queued for later transmission.
 1805  *      Must be called with the Tx queue locked.
 1806  *
 1807  *      Returns 0 if enough descriptors are available, 1 if there aren't
 1808  *      enough descriptors and the packet has been queued, and 2 if the caller
 1809  *      needs to retry because there weren't enough descriptors at the
 1810  *      beginning of the call but some freed up in the mean time.
 1811  */
 1812 static __inline int
 1813 check_desc_avail(adapter_t *adap, struct sge_txq *q,
 1814                  struct mbuf *m, unsigned int ndesc,
 1815                  unsigned int qid)
 1816 {
 1817         /* 
 1818          * XXX We currently only use this for checking the control queue
 1819          * the control queue is only used for binding qsets which happens
 1820          * at init time so we are guaranteed enough descriptors
 1821          */
 1822         if (__predict_false(mbufq_len(&q->sendq))) {
 1823 addq_exit:      (void )mbufq_enqueue(&q->sendq, m);
 1824                 return 1;
 1825         }
 1826         if (__predict_false(q->size - q->in_use < ndesc)) {
 1827 
 1828                 struct sge_qset *qs = txq_to_qset(q, qid);
 1829 
 1830                 setbit(&qs->txq_stopped, qid);
 1831                 if (should_restart_tx(q) &&
 1832                     test_and_clear_bit(qid, &qs->txq_stopped))
 1833                         return 2;
 1834 
 1835                 q->stops++;
 1836                 goto addq_exit;
 1837         }
 1838         return 0;
 1839 }
 1840 
 1841 
 1842 /**
 1843  *      reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
 1844  *      @q: the SGE control Tx queue
 1845  *
 1846  *      This is a variant of reclaim_completed_tx() that is used for Tx queues
 1847  *      that send only immediate data (presently just the control queues) and
 1848  *      thus do not have any mbufs
 1849  */
 1850 static __inline void
 1851 reclaim_completed_tx_imm(struct sge_txq *q)
 1852 {
 1853         unsigned int reclaim = q->processed - q->cleaned;
 1854 
 1855         q->in_use -= reclaim;
 1856         q->cleaned += reclaim;
 1857 }
 1858 
 1859 /**
 1860  *      ctrl_xmit - send a packet through an SGE control Tx queue
 1861  *      @adap: the adapter
 1862  *      @q: the control queue
 1863  *      @m: the packet
 1864  *
 1865  *      Send a packet through an SGE control Tx queue.  Packets sent through
 1866  *      a control queue must fit entirely as immediate data in a single Tx
 1867  *      descriptor and have no page fragments.
 1868  */
 1869 static int
 1870 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
 1871 {
 1872         int ret;
 1873         struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
 1874         struct sge_txq *q = &qs->txq[TXQ_CTRL];
 1875         
 1876         KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
 1877 
 1878         wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
 1879         wrp->wrh_lo = htonl(V_WR_TID(q->token));
 1880 
 1881         TXQ_LOCK(qs);
 1882 again:  reclaim_completed_tx_imm(q);
 1883 
 1884         ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
 1885         if (__predict_false(ret)) {
 1886                 if (ret == 1) {
 1887                         TXQ_UNLOCK(qs);
 1888                         return (ENOSPC);
 1889                 }
 1890                 goto again;
 1891         }
 1892         write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
 1893         
 1894         q->in_use++;
 1895         if (++q->pidx >= q->size) {
 1896                 q->pidx = 0;
 1897                 q->gen ^= 1;
 1898         }
 1899         TXQ_UNLOCK(qs);
 1900         wmb();
 1901         t3_write_reg(adap, A_SG_KDOORBELL,
 1902             F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 1903 
 1904         m_free(m);
 1905         return (0);
 1906 }
 1907 
 1908 
 1909 /**
 1910  *      restart_ctrlq - restart a suspended control queue
 1911  *      @qs: the queue set cotaining the control queue
 1912  *
 1913  *      Resumes transmission on a suspended Tx control queue.
 1914  */
 1915 static void
 1916 restart_ctrlq(void *data, int npending)
 1917 {
 1918         struct mbuf *m;
 1919         struct sge_qset *qs = (struct sge_qset *)data;
 1920         struct sge_txq *q = &qs->txq[TXQ_CTRL];
 1921         adapter_t *adap = qs->port->adapter;
 1922 
 1923         TXQ_LOCK(qs);
 1924 again:  reclaim_completed_tx_imm(q);
 1925 
 1926         while (q->in_use < q->size &&
 1927                (m = mbufq_dequeue(&q->sendq)) != NULL) {
 1928 
 1929                 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
 1930                 m_free(m);
 1931 
 1932                 if (++q->pidx >= q->size) {
 1933                         q->pidx = 0;
 1934                         q->gen ^= 1;
 1935                 }
 1936                 q->in_use++;
 1937         }
 1938         if (mbufq_len(&q->sendq)) {
 1939                 setbit(&qs->txq_stopped, TXQ_CTRL);
 1940 
 1941                 if (should_restart_tx(q) &&
 1942                     test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
 1943                         goto again;
 1944                 q->stops++;
 1945         }
 1946         TXQ_UNLOCK(qs);
 1947         t3_write_reg(adap, A_SG_KDOORBELL,
 1948                      F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 1949 }
 1950 
 1951 
 1952 /*
 1953  * Send a management message through control queue 0
 1954  */
 1955 int
 1956 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
 1957 {
 1958         return ctrl_xmit(adap, &adap->sge.qs[0], m);
 1959 }
 1960 
 1961 /**
 1962  *      free_qset - free the resources of an SGE queue set
 1963  *      @sc: the controller owning the queue set
 1964  *      @q: the queue set
 1965  *
 1966  *      Release the HW and SW resources associated with an SGE queue set, such
 1967  *      as HW contexts, packet buffers, and descriptor rings.  Traffic to the
 1968  *      queue set must be quiesced prior to calling this.
 1969  */
 1970 static void
 1971 t3_free_qset(adapter_t *sc, struct sge_qset *q)
 1972 {
 1973         int i;
 1974         
 1975         reclaim_completed_tx(q, 0, TXQ_ETH);
 1976         if (q->txq[TXQ_ETH].txq_mr != NULL) 
 1977                 buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
 1978         if (q->txq[TXQ_ETH].txq_ifq != NULL) {
 1979                 ifq_delete(q->txq[TXQ_ETH].txq_ifq);
 1980                 free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
 1981         }
 1982 
 1983         for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
 1984                 if (q->fl[i].desc) {
 1985                         mtx_lock_spin(&sc->sge.reg_lock);
 1986                         t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
 1987                         mtx_unlock_spin(&sc->sge.reg_lock);
 1988                         bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
 1989                         bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
 1990                                         q->fl[i].desc_map);
 1991                         bus_dma_tag_destroy(q->fl[i].desc_tag);
 1992                         bus_dma_tag_destroy(q->fl[i].entry_tag);
 1993                 }
 1994                 if (q->fl[i].sdesc) {
 1995                         free_rx_bufs(sc, &q->fl[i]);
 1996                         free(q->fl[i].sdesc, M_DEVBUF);
 1997                 }
 1998         }
 1999 
 2000         mtx_unlock(&q->lock);
 2001         MTX_DESTROY(&q->lock);
 2002         for (i = 0; i < SGE_TXQ_PER_SET; i++) {
 2003                 if (q->txq[i].desc) {
 2004                         mtx_lock_spin(&sc->sge.reg_lock);
 2005                         t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
 2006                         mtx_unlock_spin(&sc->sge.reg_lock);
 2007                         bus_dmamap_unload(q->txq[i].desc_tag,
 2008                                         q->txq[i].desc_map);
 2009                         bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
 2010                                         q->txq[i].desc_map);
 2011                         bus_dma_tag_destroy(q->txq[i].desc_tag);
 2012                         bus_dma_tag_destroy(q->txq[i].entry_tag);
 2013                 }
 2014                 if (q->txq[i].sdesc) {
 2015                         free(q->txq[i].sdesc, M_DEVBUF);
 2016                 }
 2017         }
 2018 
 2019         if (q->rspq.desc) {
 2020                 mtx_lock_spin(&sc->sge.reg_lock);
 2021                 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
 2022                 mtx_unlock_spin(&sc->sge.reg_lock);
 2023                 
 2024                 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
 2025                 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
 2026                                 q->rspq.desc_map);
 2027                 bus_dma_tag_destroy(q->rspq.desc_tag);
 2028                 MTX_DESTROY(&q->rspq.lock);
 2029         }
 2030 
 2031 #if defined(INET6) || defined(INET)
 2032         tcp_lro_free(&q->lro.ctrl);
 2033 #endif
 2034 
 2035         bzero(q, sizeof(*q));
 2036 }
 2037 
 2038 /**
 2039  *      t3_free_sge_resources - free SGE resources
 2040  *      @sc: the adapter softc
 2041  *
 2042  *      Frees resources used by the SGE queue sets.
 2043  */
 2044 void
 2045 t3_free_sge_resources(adapter_t *sc, int nqsets)
 2046 {
 2047         int i;
 2048 
 2049         for (i = 0; i < nqsets; ++i) {
 2050                 TXQ_LOCK(&sc->sge.qs[i]);
 2051                 t3_free_qset(sc, &sc->sge.qs[i]);
 2052         }
 2053 }
 2054 
 2055 /**
 2056  *      t3_sge_start - enable SGE
 2057  *      @sc: the controller softc
 2058  *
 2059  *      Enables the SGE for DMAs.  This is the last step in starting packet
 2060  *      transfers.
 2061  */
 2062 void
 2063 t3_sge_start(adapter_t *sc)
 2064 {
 2065         t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
 2066 }
 2067 
 2068 /**
 2069  *      t3_sge_stop - disable SGE operation
 2070  *      @sc: the adapter
 2071  *
 2072  *      Disables the DMA engine.  This can be called in emeregencies (e.g.,
 2073  *      from error interrupts) or from normal process context.  In the latter
 2074  *      case it also disables any pending queue restart tasklets.  Note that
 2075  *      if it is called in interrupt context it cannot disable the restart
 2076  *      tasklets as it cannot wait, however the tasklets will have no effect
 2077  *      since the doorbells are disabled and the driver will call this again
 2078  *      later from process context, at which time the tasklets will be stopped
 2079  *      if they are still running.
 2080  */
 2081 void
 2082 t3_sge_stop(adapter_t *sc)
 2083 {
 2084         int i, nqsets;
 2085         
 2086         t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
 2087 
 2088         if (sc->tq == NULL)
 2089                 return;
 2090         
 2091         for (nqsets = i = 0; i < (sc)->params.nports; i++) 
 2092                 nqsets += sc->port[i].nqsets;
 2093 #ifdef notyet
 2094         /*
 2095          * 
 2096          * XXX
 2097          */
 2098         for (i = 0; i < nqsets; ++i) {
 2099                 struct sge_qset *qs = &sc->sge.qs[i];
 2100                 
 2101                 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
 2102                 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
 2103         }
 2104 #endif
 2105 }
 2106 
 2107 /**
 2108  *      t3_free_tx_desc - reclaims Tx descriptors and their buffers
 2109  *      @adapter: the adapter
 2110  *      @q: the Tx queue to reclaim descriptors from
 2111  *      @reclaimable: the number of descriptors to reclaim
 2112  *      @m_vec_size: maximum number of buffers to reclaim
 2113  *      @desc_reclaimed: returns the number of descriptors reclaimed
 2114  *
 2115  *      Reclaims Tx descriptors from an SGE Tx queue and frees the associated
 2116  *      Tx buffers.  Called with the Tx queue lock held.
 2117  *
 2118  *      Returns number of buffers of reclaimed   
 2119  */
 2120 void
 2121 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
 2122 {
 2123         struct tx_sw_desc *txsd;
 2124         unsigned int cidx, mask;
 2125         struct sge_txq *q = &qs->txq[queue];
 2126 
 2127 #ifdef T3_TRACE
 2128         T3_TRACE2(sc->tb[q->cntxt_id & 7],
 2129                   "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
 2130 #endif
 2131         cidx = q->cidx;
 2132         mask = q->size - 1;
 2133         txsd = &q->sdesc[cidx];
 2134 
 2135         mtx_assert(&qs->lock, MA_OWNED);
 2136         while (reclaimable--) {
 2137                 prefetch(q->sdesc[(cidx + 1) & mask].m);
 2138                 prefetch(q->sdesc[(cidx + 2) & mask].m);
 2139 
 2140                 if (txsd->m != NULL) {
 2141                         if (txsd->flags & TX_SW_DESC_MAPPED) {
 2142                                 bus_dmamap_unload(q->entry_tag, txsd->map);
 2143                                 txsd->flags &= ~TX_SW_DESC_MAPPED;
 2144                         }
 2145                         m_freem_list(txsd->m);
 2146                         txsd->m = NULL;
 2147                 } else
 2148                         q->txq_skipped++;
 2149                 
 2150                 ++txsd;
 2151                 if (++cidx == q->size) {
 2152                         cidx = 0;
 2153                         txsd = q->sdesc;
 2154                 }
 2155         }
 2156         q->cidx = cidx;
 2157 
 2158 }
 2159 
 2160 /**
 2161  *      is_new_response - check if a response is newly written
 2162  *      @r: the response descriptor
 2163  *      @q: the response queue
 2164  *
 2165  *      Returns true if a response descriptor contains a yet unprocessed
 2166  *      response.
 2167  */
 2168 static __inline int
 2169 is_new_response(const struct rsp_desc *r,
 2170     const struct sge_rspq *q)
 2171 {
 2172         return (r->intr_gen & F_RSPD_GEN2) == q->gen;
 2173 }
 2174 
 2175 #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
 2176 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
 2177                         V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
 2178                         V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
 2179                         V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
 2180 
 2181 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
 2182 #define NOMEM_INTR_DELAY 2500
 2183 
 2184 #ifdef TCP_OFFLOAD
 2185 /**
 2186  *      write_ofld_wr - write an offload work request
 2187  *      @adap: the adapter
 2188  *      @m: the packet to send
 2189  *      @q: the Tx queue
 2190  *      @pidx: index of the first Tx descriptor to write
 2191  *      @gen: the generation value to use
 2192  *      @ndesc: number of descriptors the packet will occupy
 2193  *
 2194  *      Write an offload work request to send the supplied packet.  The packet
 2195  *      data already carry the work request with most fields populated.
 2196  */
 2197 static void
 2198 write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
 2199     unsigned int pidx, unsigned int gen, unsigned int ndesc)
 2200 {
 2201         unsigned int sgl_flits, flits;
 2202         int i, idx, nsegs, wrlen;
 2203         struct work_request_hdr *from;
 2204         struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
 2205         struct tx_desc *d = &q->desc[pidx];
 2206         struct txq_state txqs;
 2207         struct sglist_seg *segs;
 2208         struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
 2209         struct sglist *sgl;
 2210 
 2211         from = (void *)(oh + 1);        /* Start of WR within mbuf */
 2212         wrlen = m->m_len - sizeof(*oh);
 2213 
 2214         if (!(oh->flags & F_HDR_SGL)) {
 2215                 write_imm(d, (caddr_t)from, wrlen, gen);
 2216 
 2217                 /*
 2218                  * mbuf with "real" immediate tx data will be enqueue_wr'd by
 2219                  * t3_push_frames and freed in wr_ack.  Others, like those sent
 2220                  * down by close_conn, t3_send_reset, etc. should be freed here.
 2221                  */
 2222                 if (!(oh->flags & F_HDR_DF))
 2223                         m_free(m);
 2224                 return;
 2225         }
 2226 
 2227         memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
 2228 
 2229         sgl = oh->sgl;
 2230         flits = wrlen / 8;
 2231         sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
 2232 
 2233         nsegs = sgl->sg_nseg;
 2234         segs = sgl->sg_segs;
 2235         for (idx = 0, i = 0; i < nsegs; i++) {
 2236                 KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
 2237                 if (i && idx == 0) 
 2238                         ++sgp;
 2239                 sgp->len[idx] = htobe32(segs[i].ss_len);
 2240                 sgp->addr[idx] = htobe64(segs[i].ss_paddr);
 2241                 idx ^= 1;
 2242         }
 2243         if (idx) {
 2244                 sgp->len[idx] = 0;
 2245                 sgp->addr[idx] = 0;
 2246         }
 2247 
 2248         sgl_flits = sgl_len(nsegs);
 2249         txqs.gen = gen;
 2250         txqs.pidx = pidx;
 2251         txqs.compl = 0;
 2252 
 2253         write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
 2254             from->wrh_hi, from->wrh_lo);
 2255 }
 2256 
 2257 /**
 2258  *      ofld_xmit - send a packet through an offload queue
 2259  *      @adap: the adapter
 2260  *      @q: the Tx offload queue
 2261  *      @m: the packet
 2262  *
 2263  *      Send an offload packet through an SGE offload queue.
 2264  */
 2265 static int
 2266 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
 2267 {
 2268         int ret;
 2269         unsigned int ndesc;
 2270         unsigned int pidx, gen;
 2271         struct sge_txq *q = &qs->txq[TXQ_OFLD];
 2272         struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
 2273 
 2274         ndesc = G_HDR_NDESC(oh->flags);
 2275 
 2276         TXQ_LOCK(qs);
 2277 again:  reclaim_completed_tx(qs, 16, TXQ_OFLD);
 2278         ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
 2279         if (__predict_false(ret)) {
 2280                 if (ret == 1) {
 2281                         TXQ_UNLOCK(qs);
 2282                         return (EINTR);
 2283                 }
 2284                 goto again;
 2285         }
 2286 
 2287         gen = q->gen;
 2288         q->in_use += ndesc;
 2289         pidx = q->pidx;
 2290         q->pidx += ndesc;
 2291         if (q->pidx >= q->size) {
 2292                 q->pidx -= q->size;
 2293                 q->gen ^= 1;
 2294         }
 2295 
 2296         write_ofld_wr(adap, m, q, pidx, gen, ndesc);
 2297         check_ring_tx_db(adap, q, 1);
 2298         TXQ_UNLOCK(qs);
 2299 
 2300         return (0);
 2301 }
 2302 
 2303 /**
 2304  *      restart_offloadq - restart a suspended offload queue
 2305  *      @qs: the queue set cotaining the offload queue
 2306  *
 2307  *      Resumes transmission on a suspended Tx offload queue.
 2308  */
 2309 static void
 2310 restart_offloadq(void *data, int npending)
 2311 {
 2312         struct mbuf *m;
 2313         struct sge_qset *qs = data;
 2314         struct sge_txq *q = &qs->txq[TXQ_OFLD];
 2315         adapter_t *adap = qs->port->adapter;
 2316         int cleaned;
 2317                 
 2318         TXQ_LOCK(qs);
 2319 again:  cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
 2320 
 2321         while ((m = mbufq_first(&q->sendq)) != NULL) {
 2322                 unsigned int gen, pidx;
 2323                 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
 2324                 unsigned int ndesc = G_HDR_NDESC(oh->flags);
 2325 
 2326                 if (__predict_false(q->size - q->in_use < ndesc)) {
 2327                         setbit(&qs->txq_stopped, TXQ_OFLD);
 2328                         if (should_restart_tx(q) &&
 2329                             test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
 2330                                 goto again;
 2331                         q->stops++;
 2332                         break;
 2333                 }
 2334 
 2335                 gen = q->gen;
 2336                 q->in_use += ndesc;
 2337                 pidx = q->pidx;
 2338                 q->pidx += ndesc;
 2339                 if (q->pidx >= q->size) {
 2340                         q->pidx -= q->size;
 2341                         q->gen ^= 1;
 2342                 }
 2343                 
 2344                 (void)mbufq_dequeue(&q->sendq);
 2345                 TXQ_UNLOCK(qs);
 2346                 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
 2347                 TXQ_LOCK(qs);
 2348         }
 2349 #if USE_GTS
 2350         set_bit(TXQ_RUNNING, &q->flags);
 2351         set_bit(TXQ_LAST_PKT_DB, &q->flags);
 2352 #endif
 2353         TXQ_UNLOCK(qs);
 2354         wmb();
 2355         t3_write_reg(adap, A_SG_KDOORBELL,
 2356                      F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
 2357 }
 2358 
 2359 /**
 2360  *      t3_offload_tx - send an offload packet
 2361  *      @m: the packet
 2362  *
 2363  *      Sends an offload packet.  We use the packet priority to select the
 2364  *      appropriate Tx queue as follows: bit 0 indicates whether the packet
 2365  *      should be sent as regular or control, bits 1-3 select the queue set.
 2366  */
 2367 int
 2368 t3_offload_tx(struct adapter *sc, struct mbuf *m)
 2369 {
 2370         struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
 2371         struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
 2372 
 2373         if (oh->flags & F_HDR_CTRL) {
 2374                 m_adj(m, sizeof (*oh)); /* trim ofld_hdr off */
 2375                 return (ctrl_xmit(sc, qs, m));
 2376         } else
 2377                 return (ofld_xmit(sc, qs, m));
 2378 }
 2379 #endif
 2380 
 2381 static void
 2382 restart_tx(struct sge_qset *qs)
 2383 {
 2384         struct adapter *sc = qs->port->adapter;
 2385 
 2386         if (isset(&qs->txq_stopped, TXQ_OFLD) &&
 2387             should_restart_tx(&qs->txq[TXQ_OFLD]) &&
 2388             test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
 2389                 qs->txq[TXQ_OFLD].restarts++;
 2390                 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
 2391         }
 2392 
 2393         if (isset(&qs->txq_stopped, TXQ_CTRL) &&
 2394             should_restart_tx(&qs->txq[TXQ_CTRL]) &&
 2395             test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
 2396                 qs->txq[TXQ_CTRL].restarts++;
 2397                 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
 2398         }
 2399 }
 2400 
 2401 /**
 2402  *      t3_sge_alloc_qset - initialize an SGE queue set
 2403  *      @sc: the controller softc
 2404  *      @id: the queue set id
 2405  *      @nports: how many Ethernet ports will be using this queue set
 2406  *      @irq_vec_idx: the IRQ vector index for response queue interrupts
 2407  *      @p: configuration parameters for this queue set
 2408  *      @ntxq: number of Tx queues for the queue set
 2409  *      @pi: port info for queue set
 2410  *
 2411  *      Allocate resources and initialize an SGE queue set.  A queue set
 2412  *      comprises a response queue, two Rx free-buffer queues, and up to 3
 2413  *      Tx queues.  The Tx queues are assigned roles in the order Ethernet
 2414  *      queue, offload queue, and control queue.
 2415  */
 2416 int
 2417 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
 2418                   const struct qset_params *p, int ntxq, struct port_info *pi)
 2419 {
 2420         struct sge_qset *q = &sc->sge.qs[id];
 2421         int i, ret = 0;
 2422 
 2423         MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
 2424         q->port = pi;
 2425         q->adap = sc;
 2426 
 2427         if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
 2428             M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
 2429                 device_printf(sc->dev, "failed to allocate mbuf ring\n");
 2430                 goto err;
 2431         }
 2432         if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
 2433             M_NOWAIT | M_ZERO)) == NULL) {
 2434                 device_printf(sc->dev, "failed to allocate ifq\n");
 2435                 goto err;
 2436         }
 2437         ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);     
 2438         callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
 2439         callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
 2440         q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
 2441         q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
 2442 
 2443         init_qset_cntxt(q, id);
 2444         q->idx = id;
 2445         if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
 2446                     sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
 2447                     &q->fl[0].desc, &q->fl[0].sdesc,
 2448                     &q->fl[0].desc_tag, &q->fl[0].desc_map,
 2449                     sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
 2450                 printf("error %d from alloc ring fl0\n", ret);
 2451                 goto err;
 2452         }
 2453 
 2454         if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
 2455                     sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
 2456                     &q->fl[1].desc, &q->fl[1].sdesc,
 2457                     &q->fl[1].desc_tag, &q->fl[1].desc_map,
 2458                     sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
 2459                 printf("error %d from alloc ring fl1\n", ret);
 2460                 goto err;
 2461         }
 2462 
 2463         if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
 2464                     &q->rspq.phys_addr, &q->rspq.desc, NULL,
 2465                     &q->rspq.desc_tag, &q->rspq.desc_map,
 2466                     NULL, NULL)) != 0) {
 2467                 printf("error %d from alloc ring rspq\n", ret);
 2468                 goto err;
 2469         }
 2470 
 2471         snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
 2472             device_get_unit(sc->dev), irq_vec_idx);
 2473         MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
 2474 
 2475         for (i = 0; i < ntxq; ++i) {
 2476                 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
 2477 
 2478                 if ((ret = alloc_ring(sc, p->txq_size[i],
 2479                             sizeof(struct tx_desc), sz,
 2480                             &q->txq[i].phys_addr, &q->txq[i].desc,
 2481                             &q->txq[i].sdesc, &q->txq[i].desc_tag,
 2482                             &q->txq[i].desc_map,
 2483                             sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
 2484                         printf("error %d from alloc ring tx %i\n", ret, i);
 2485                         goto err;
 2486                 }
 2487                 mbufq_init(&q->txq[i].sendq, INT_MAX);
 2488                 q->txq[i].gen = 1;
 2489                 q->txq[i].size = p->txq_size[i];
 2490         }
 2491 
 2492 #ifdef TCP_OFFLOAD
 2493         TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
 2494 #endif
 2495         TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
 2496         TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
 2497         TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
 2498 
 2499         q->fl[0].gen = q->fl[1].gen = 1;
 2500         q->fl[0].size = p->fl_size;
 2501         q->fl[1].size = p->jumbo_size;
 2502 
 2503         q->rspq.gen = 1;
 2504         q->rspq.cidx = 0;
 2505         q->rspq.size = p->rspq_size;
 2506 
 2507         q->txq[TXQ_ETH].stop_thres = nports *
 2508             flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
 2509 
 2510         q->fl[0].buf_size = MCLBYTES;
 2511         q->fl[0].zone = zone_pack;
 2512         q->fl[0].type = EXT_PACKET;
 2513 
 2514         if (p->jumbo_buf_size ==  MJUM16BYTES) {
 2515                 q->fl[1].zone = zone_jumbo16;
 2516                 q->fl[1].type = EXT_JUMBO16;
 2517         } else if (p->jumbo_buf_size ==  MJUM9BYTES) {
 2518                 q->fl[1].zone = zone_jumbo9;
 2519                 q->fl[1].type = EXT_JUMBO9;             
 2520         } else if (p->jumbo_buf_size ==  MJUMPAGESIZE) {
 2521                 q->fl[1].zone = zone_jumbop;
 2522                 q->fl[1].type = EXT_JUMBOP;
 2523         } else {
 2524                 KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
 2525                 ret = EDOOFUS;
 2526                 goto err;
 2527         }
 2528         q->fl[1].buf_size = p->jumbo_buf_size;
 2529 
 2530         /* Allocate and setup the lro_ctrl structure */
 2531         q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
 2532 #if defined(INET6) || defined(INET)
 2533         ret = tcp_lro_init(&q->lro.ctrl);
 2534         if (ret) {
 2535                 printf("error %d from tcp_lro_init\n", ret);
 2536                 goto err;
 2537         }
 2538 #endif
 2539         q->lro.ctrl.ifp = pi->ifp;
 2540 
 2541         mtx_lock_spin(&sc->sge.reg_lock);
 2542         ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
 2543                                    q->rspq.phys_addr, q->rspq.size,
 2544                                    q->fl[0].buf_size, 1, 0);
 2545         if (ret) {
 2546                 printf("error %d from t3_sge_init_rspcntxt\n", ret);
 2547                 goto err_unlock;
 2548         }
 2549 
 2550         for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
 2551                 ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
 2552                                           q->fl[i].phys_addr, q->fl[i].size,
 2553                                           q->fl[i].buf_size, p->cong_thres, 1,
 2554                                           0);
 2555                 if (ret) {
 2556                         printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
 2557                         goto err_unlock;
 2558                 }
 2559         }
 2560 
 2561         ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
 2562                                  SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
 2563                                  q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
 2564                                  1, 0);
 2565         if (ret) {
 2566                 printf("error %d from t3_sge_init_ecntxt\n", ret);
 2567                 goto err_unlock;
 2568         }
 2569 
 2570         if (ntxq > 1) {
 2571                 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
 2572                                          USE_GTS, SGE_CNTXT_OFLD, id,
 2573                                          q->txq[TXQ_OFLD].phys_addr,
 2574                                          q->txq[TXQ_OFLD].size, 0, 1, 0);
 2575                 if (ret) {
 2576                         printf("error %d from t3_sge_init_ecntxt\n", ret);
 2577                         goto err_unlock;
 2578                 }
 2579         }
 2580 
 2581         if (ntxq > 2) {
 2582                 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
 2583                                          SGE_CNTXT_CTRL, id,
 2584                                          q->txq[TXQ_CTRL].phys_addr,
 2585                                          q->txq[TXQ_CTRL].size,
 2586                                          q->txq[TXQ_CTRL].token, 1, 0);
 2587                 if (ret) {
 2588                         printf("error %d from t3_sge_init_ecntxt\n", ret);
 2589                         goto err_unlock;
 2590                 }
 2591         }
 2592 
 2593         mtx_unlock_spin(&sc->sge.reg_lock);
 2594         t3_update_qset_coalesce(q, p);
 2595 
 2596         refill_fl(sc, &q->fl[0], q->fl[0].size);
 2597         refill_fl(sc, &q->fl[1], q->fl[1].size);
 2598         refill_rspq(sc, &q->rspq, q->rspq.size - 1);
 2599 
 2600         t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
 2601                      V_NEWTIMER(q->rspq.holdoff_tmr));
 2602 
 2603         return (0);
 2604 
 2605 err_unlock:
 2606         mtx_unlock_spin(&sc->sge.reg_lock);
 2607 err:    
 2608         TXQ_LOCK(q);
 2609         t3_free_qset(sc, q);
 2610 
 2611         return (ret);
 2612 }
 2613 
 2614 /*
 2615  * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
 2616  * ethernet data.  Hardware assistance with various checksums and any vlan tag
 2617  * will also be taken into account here.
 2618  */
 2619 void
 2620 t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
 2621 {
 2622         struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
 2623         struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
 2624         struct ifnet *ifp = pi->ifp;
 2625         
 2626         if (cpl->vlan_valid) {
 2627                 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
 2628                 m->m_flags |= M_VLANTAG;
 2629         } 
 2630 
 2631         m->m_pkthdr.rcvif = ifp;
 2632         /*
 2633          * adjust after conversion to mbuf chain
 2634          */
 2635         m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
 2636         m->m_len -= (sizeof(*cpl) + ethpad);
 2637         m->m_data += (sizeof(*cpl) + ethpad);
 2638 
 2639         if (!cpl->fragment && cpl->csum_valid && cpl->csum == 0xffff) {
 2640                 struct ether_header *eh = mtod(m, void *);
 2641                 uint16_t eh_type;
 2642 
 2643                 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
 2644                         struct ether_vlan_header *evh = mtod(m, void *);
 2645 
 2646                         eh_type = evh->evl_proto;
 2647                 } else
 2648                         eh_type = eh->ether_type;
 2649 
 2650                 if (ifp->if_capenable & IFCAP_RXCSUM &&
 2651                     eh_type == htons(ETHERTYPE_IP)) {
 2652                         m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
 2653                             CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 2654                         m->m_pkthdr.csum_data = 0xffff;
 2655                 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
 2656                     eh_type == htons(ETHERTYPE_IPV6)) {
 2657                         m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
 2658                             CSUM_PSEUDO_HDR);
 2659                         m->m_pkthdr.csum_data = 0xffff;
 2660                 }
 2661         }
 2662 }
 2663 
 2664 /**
 2665  *      get_packet - return the next ingress packet buffer from a free list
 2666  *      @adap: the adapter that received the packet
 2667  *      @drop_thres: # of remaining buffers before we start dropping packets
 2668  *      @qs: the qset that the SGE free list holding the packet belongs to
 2669  *      @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
 2670  *      @r: response descriptor 
 2671  *
 2672  *      Get the next packet from a free list and complete setup of the
 2673  *      sk_buff.  If the packet is small we make a copy and recycle the
 2674  *      original buffer, otherwise we use the original buffer itself.  If a
 2675  *      positive drop threshold is supplied packets are dropped and their
 2676  *      buffers recycled if (a) the number of remaining buffers is under the
 2677  *      threshold and the packet is too big to copy, or (b) the packet should
 2678  *      be copied but there is no memory for the copy.
 2679  */
 2680 static int
 2681 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
 2682     struct t3_mbuf_hdr *mh, struct rsp_desc *r)
 2683 {
 2684 
 2685         unsigned int len_cq =  ntohl(r->len_cq);
 2686         struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
 2687         int mask, cidx = fl->cidx;
 2688         struct rx_sw_desc *sd = &fl->sdesc[cidx];
 2689         uint32_t len = G_RSPD_LEN(len_cq);
 2690         uint32_t flags = M_EXT;
 2691         uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
 2692         caddr_t cl;
 2693         struct mbuf *m;
 2694         int ret = 0;
 2695 
 2696         mask = fl->size - 1;
 2697         prefetch(fl->sdesc[(cidx + 1) & mask].m);
 2698         prefetch(fl->sdesc[(cidx + 2) & mask].m);
 2699         prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
 2700         prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl); 
 2701 
 2702         fl->credits--;
 2703         bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
 2704         
 2705         if (recycle_enable && len <= SGE_RX_COPY_THRES &&
 2706             sopeop == RSPQ_SOP_EOP) {
 2707                 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
 2708                         goto skip_recycle;
 2709                 cl = mtod(m, void *);
 2710                 memcpy(cl, sd->rxsd_cl, len);
 2711                 recycle_rx_buf(adap, fl, fl->cidx);
 2712                 m->m_pkthdr.len = m->m_len = len;
 2713                 m->m_flags = 0;
 2714                 mh->mh_head = mh->mh_tail = m;
 2715                 ret = 1;
 2716                 goto done;
 2717         } else {
 2718         skip_recycle:
 2719                 bus_dmamap_unload(fl->entry_tag, sd->map);
 2720                 cl = sd->rxsd_cl;
 2721                 m = sd->m;
 2722 
 2723                 if ((sopeop == RSPQ_SOP_EOP) ||
 2724                     (sopeop == RSPQ_SOP))
 2725                         flags |= M_PKTHDR;
 2726                 m_init(m, M_NOWAIT, MT_DATA, flags);
 2727                 if (fl->zone == zone_pack) {
 2728                         /*
 2729                          * restore clobbered data pointer
 2730                          */
 2731                         m->m_data = m->m_ext.ext_buf;
 2732                 } else {
 2733                         m_cljset(m, cl, fl->type);
 2734                 }
 2735                 m->m_len = len;
 2736         }               
 2737         switch(sopeop) {
 2738         case RSPQ_SOP_EOP:
 2739                 ret = 1;
 2740                 /* FALLTHROUGH */
 2741         case RSPQ_SOP:
 2742                 mh->mh_head = mh->mh_tail = m;
 2743                 m->m_pkthdr.len = len;
 2744                 break;
 2745         case RSPQ_EOP:
 2746                 ret = 1;
 2747                 /* FALLTHROUGH */
 2748         case RSPQ_NSOP_NEOP:
 2749                 if (mh->mh_tail == NULL) {
 2750                         log(LOG_ERR, "discarding intermediate descriptor entry\n");
 2751                         m_freem(m);
 2752                         break;
 2753                 }
 2754                 mh->mh_tail->m_next = m;
 2755                 mh->mh_tail = m;
 2756                 mh->mh_head->m_pkthdr.len += len;
 2757                 break;
 2758         }
 2759         if (cxgb_debug)
 2760                 printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
 2761 done:
 2762         if (++fl->cidx == fl->size)
 2763                 fl->cidx = 0;
 2764 
 2765         return (ret);
 2766 }
 2767 
 2768 /**
 2769  *      handle_rsp_cntrl_info - handles control information in a response
 2770  *      @qs: the queue set corresponding to the response
 2771  *      @flags: the response control flags
 2772  *
 2773  *      Handles the control information of an SGE response, such as GTS
 2774  *      indications and completion credits for the queue set's Tx queues.
 2775  *      HW coalesces credits, we don't do any extra SW coalescing.
 2776  */
 2777 static __inline void
 2778 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
 2779 {
 2780         unsigned int credits;
 2781 
 2782 #if USE_GTS
 2783         if (flags & F_RSPD_TXQ0_GTS)
 2784                 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
 2785 #endif
 2786         credits = G_RSPD_TXQ0_CR(flags);
 2787         if (credits) 
 2788                 qs->txq[TXQ_ETH].processed += credits;
 2789 
 2790         credits = G_RSPD_TXQ2_CR(flags);
 2791         if (credits)
 2792                 qs->txq[TXQ_CTRL].processed += credits;
 2793 
 2794 # if USE_GTS
 2795         if (flags & F_RSPD_TXQ1_GTS)
 2796                 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
 2797 # endif
 2798         credits = G_RSPD_TXQ1_CR(flags);
 2799         if (credits)
 2800                 qs->txq[TXQ_OFLD].processed += credits;
 2801 
 2802 }
 2803 
 2804 static void
 2805 check_ring_db(adapter_t *adap, struct sge_qset *qs,
 2806     unsigned int sleeping)
 2807 {
 2808         ;
 2809 }
 2810 
 2811 /**
 2812  *      process_responses - process responses from an SGE response queue
 2813  *      @adap: the adapter
 2814  *      @qs: the queue set to which the response queue belongs
 2815  *      @budget: how many responses can be processed in this round
 2816  *
 2817  *      Process responses from an SGE response queue up to the supplied budget.
 2818  *      Responses include received packets as well as credits and other events
 2819  *      for the queues that belong to the response queue's queue set.
 2820  *      A negative budget is effectively unlimited.
 2821  *
 2822  *      Additionally choose the interrupt holdoff time for the next interrupt
 2823  *      on this queue.  If the system is under memory shortage use a fairly
 2824  *      long delay to help recovery.
 2825  */
 2826 static int
 2827 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
 2828 {
 2829         struct sge_rspq *rspq = &qs->rspq;
 2830         struct rsp_desc *r = &rspq->desc[rspq->cidx];
 2831         int budget_left = budget;
 2832         unsigned int sleeping = 0;
 2833 #if defined(INET6) || defined(INET)
 2834         int lro_enabled = qs->lro.enabled;
 2835         int skip_lro;
 2836         struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
 2837 #endif
 2838         struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
 2839 #ifdef DEBUG    
 2840         static int last_holdoff = 0;
 2841         if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
 2842                 printf("next_holdoff=%d\n", rspq->holdoff_tmr);
 2843                 last_holdoff = rspq->holdoff_tmr;
 2844         }
 2845 #endif
 2846         rspq->next_holdoff = rspq->holdoff_tmr;
 2847 
 2848         while (__predict_true(budget_left && is_new_response(r, rspq))) {
 2849                 int eth, eop = 0, ethpad = 0;
 2850                 uint32_t flags = ntohl(r->flags);
 2851                 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
 2852                 uint8_t opcode = r->rss_hdr.opcode;
 2853                 
 2854                 eth = (opcode == CPL_RX_PKT);
 2855                 
 2856                 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
 2857                         struct mbuf *m;
 2858 
 2859                         if (cxgb_debug)
 2860                                 printf("async notification\n");
 2861 
 2862                         if (mh->mh_head == NULL) {
 2863                                 mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA);
 2864                                 m = mh->mh_head;
 2865                         } else {
 2866                                 m = m_gethdr(M_NOWAIT, MT_DATA);
 2867                         }
 2868                         if (m == NULL)
 2869                                 goto no_mem;
 2870 
 2871                         memcpy(mtod(m, char *), r, AN_PKT_SIZE);
 2872                         m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
 2873                         *mtod(m, uint8_t *) = CPL_ASYNC_NOTIF;
 2874                         opcode = CPL_ASYNC_NOTIF;
 2875                         eop = 1;
 2876                         rspq->async_notif++;
 2877                         goto skip;
 2878                 } else if  (flags & F_RSPD_IMM_DATA_VALID) {
 2879                         struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
 2880 
 2881                         if (m == NULL) {        
 2882                 no_mem:
 2883                                 rspq->next_holdoff = NOMEM_INTR_DELAY;
 2884                                 budget_left--;
 2885                                 break;
 2886                         }
 2887                         if (mh->mh_head == NULL)
 2888                                 mh->mh_head = m;
 2889                         else 
 2890                                 mh->mh_tail->m_next = m;
 2891                         mh->mh_tail = m;
 2892 
 2893                         get_imm_packet(adap, r, m);
 2894                         mh->mh_head->m_pkthdr.len += m->m_len;
 2895                         eop = 1;
 2896                         rspq->imm_data++;
 2897                 } else if (r->len_cq) {
 2898                         int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
 2899                         
 2900                         eop = get_packet(adap, drop_thresh, qs, mh, r);
 2901                         if (eop) {
 2902                                 if (r->rss_hdr.hash_type && !adap->timestamp) {
 2903                                         M_HASHTYPE_SET(mh->mh_head,
 2904                                             M_HASHTYPE_OPAQUE_HASH);
 2905                                         mh->mh_head->m_pkthdr.flowid = rss_hash;
 2906                                 }
 2907                         }
 2908                         
 2909                         ethpad = 2;
 2910                 } else {
 2911                         rspq->pure_rsps++;
 2912                 }
 2913         skip:
 2914                 if (flags & RSPD_CTRL_MASK) {
 2915                         sleeping |= flags & RSPD_GTS_MASK;
 2916                         handle_rsp_cntrl_info(qs, flags);
 2917                 }
 2918 
 2919                 if (!eth && eop) {
 2920                         rspq->offload_pkts++;
 2921 #ifdef TCP_OFFLOAD
 2922                         adap->cpl_handler[opcode](qs, r, mh->mh_head);
 2923 #else
 2924                         m_freem(mh->mh_head);
 2925 #endif
 2926                         mh->mh_head = NULL;
 2927                 } else if (eth && eop) {
 2928                         struct mbuf *m = mh->mh_head;
 2929 
 2930                         t3_rx_eth(adap, m, ethpad);
 2931 
 2932                         /*
 2933                          * The T304 sends incoming packets on any qset.  If LRO
 2934                          * is also enabled, we could end up sending packet up
 2935                          * lro_ctrl->ifp's input.  That is incorrect.
 2936                          *
 2937                          * The mbuf's rcvif was derived from the cpl header and
 2938                          * is accurate.  Skip LRO and just use that.
 2939                          */
 2940 #if defined(INET6) || defined(INET)
 2941                         skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
 2942 
 2943                         if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
 2944                             && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
 2945                             ) {
 2946                                 /* successfully queue'd for LRO */
 2947                         } else
 2948 #endif
 2949                         {
 2950                                 /*
 2951                                  * LRO not enabled, packet unsuitable for LRO,
 2952                                  * or unable to queue.  Pass it up right now in
 2953                                  * either case.
 2954                                  */
 2955                                 struct ifnet *ifp = m->m_pkthdr.rcvif;
 2956                                 (*ifp->if_input)(ifp, m);
 2957                         }
 2958                         mh->mh_head = NULL;
 2959 
 2960                 }
 2961 
 2962                 r++;
 2963                 if (__predict_false(++rspq->cidx == rspq->size)) {
 2964                         rspq->cidx = 0;
 2965                         rspq->gen ^= 1;
 2966                         r = rspq->desc;
 2967                 }
 2968 
 2969                 if (++rspq->credits >= 64) {
 2970                         refill_rspq(adap, rspq, rspq->credits);
 2971                         rspq->credits = 0;
 2972                 }
 2973                 __refill_fl_lt(adap, &qs->fl[0], 32);
 2974                 __refill_fl_lt(adap, &qs->fl[1], 32);
 2975                 --budget_left;
 2976         }
 2977 
 2978 #if defined(INET6) || defined(INET)
 2979         /* Flush LRO */
 2980         tcp_lro_flush_all(lro_ctrl);
 2981 #endif
 2982 
 2983         if (sleeping)
 2984                 check_ring_db(adap, qs, sleeping);
 2985 
 2986         mb();  /* commit Tx queue processed updates */
 2987         if (__predict_false(qs->txq_stopped > 1))
 2988                 restart_tx(qs);
 2989 
 2990         __refill_fl_lt(adap, &qs->fl[0], 512);
 2991         __refill_fl_lt(adap, &qs->fl[1], 512);
 2992         budget -= budget_left;
 2993         return (budget);
 2994 }
 2995 
 2996 /*
 2997  * A helper function that processes responses and issues GTS.
 2998  */
 2999 static __inline int
 3000 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
 3001 {
 3002         int work;
 3003         static int last_holdoff = 0;
 3004         
 3005         work = process_responses(adap, rspq_to_qset(rq), -1);
 3006 
 3007         if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
 3008                 printf("next_holdoff=%d\n", rq->next_holdoff);
 3009                 last_holdoff = rq->next_holdoff;
 3010         }
 3011         t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
 3012             V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
 3013         
 3014         return (work);
 3015 }
 3016 
 3017 
 3018 /*
 3019  * Interrupt handler for legacy INTx interrupts for T3B-based cards.
 3020  * Handles data events from SGE response queues as well as error and other
 3021  * async events as they all use the same interrupt pin.  We use one SGE
 3022  * response queue per port in this mode and protect all response queues with
 3023  * queue 0's lock.
 3024  */
 3025 void
 3026 t3b_intr(void *data)
 3027 {
 3028         uint32_t i, map;
 3029         adapter_t *adap = data;
 3030         struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
 3031         
 3032         t3_write_reg(adap, A_PL_CLI, 0);
 3033         map = t3_read_reg(adap, A_SG_DATA_INTR);
 3034 
 3035         if (!map) 
 3036                 return;
 3037 
 3038         if (__predict_false(map & F_ERRINTR)) {
 3039                 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
 3040                 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
 3041                 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
 3042         }
 3043 
 3044         mtx_lock(&q0->lock);
 3045         for_each_port(adap, i)
 3046             if (map & (1 << i))
 3047                         process_responses_gts(adap, &adap->sge.qs[i].rspq);
 3048         mtx_unlock(&q0->lock);
 3049 }
 3050 
 3051 /*
 3052  * The MSI interrupt handler.  This needs to handle data events from SGE
 3053  * response queues as well as error and other async events as they all use
 3054  * the same MSI vector.  We use one SGE response queue per port in this mode
 3055  * and protect all response queues with queue 0's lock.
 3056  */
 3057 void
 3058 t3_intr_msi(void *data)
 3059 {
 3060         adapter_t *adap = data;
 3061         struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
 3062         int i, new_packets = 0;
 3063 
 3064         mtx_lock(&q0->lock);
 3065 
 3066         for_each_port(adap, i)
 3067             if (process_responses_gts(adap, &adap->sge.qs[i].rspq)) 
 3068                     new_packets = 1;
 3069         mtx_unlock(&q0->lock);
 3070         if (new_packets == 0) {
 3071                 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
 3072                 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
 3073                 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
 3074         }
 3075 }
 3076 
 3077 void
 3078 t3_intr_msix(void *data)
 3079 {
 3080         struct sge_qset *qs = data;
 3081         adapter_t *adap = qs->port->adapter;
 3082         struct sge_rspq *rspq = &qs->rspq;
 3083 
 3084         if (process_responses_gts(adap, rspq) == 0)
 3085                 rspq->unhandled_irqs++;
 3086 }
 3087 
 3088 #define QDUMP_SBUF_SIZE         32 * 400
 3089 static int
 3090 t3_dump_rspq(SYSCTL_HANDLER_ARGS)
 3091 {
 3092         struct sge_rspq *rspq;
 3093         struct sge_qset *qs;
 3094         int i, err, dump_end, idx;
 3095         struct sbuf *sb;
 3096         struct rsp_desc *rspd;
 3097         uint32_t data[4];
 3098         
 3099         rspq = arg1;
 3100         qs = rspq_to_qset(rspq);
 3101         if (rspq->rspq_dump_count == 0) 
 3102                 return (0);
 3103         if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
 3104                 log(LOG_WARNING,
 3105                     "dump count is too large %d\n", rspq->rspq_dump_count);
 3106                 rspq->rspq_dump_count = 0;
 3107                 return (EINVAL);
 3108         }
 3109         if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
 3110                 log(LOG_WARNING,
 3111                     "dump start of %d is greater than queue size\n",
 3112                     rspq->rspq_dump_start);
 3113                 rspq->rspq_dump_start = 0;
 3114                 return (EINVAL);
 3115         }
 3116         err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
 3117         if (err)
 3118                 return (err);
 3119         err = sysctl_wire_old_buffer(req, 0);
 3120         if (err)
 3121                 return (err);
 3122         sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
 3123 
 3124         sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
 3125             (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
 3126             ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
 3127         sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
 3128             ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
 3129         
 3130         sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
 3131             (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
 3132         
 3133         dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
 3134         for (i = rspq->rspq_dump_start; i < dump_end; i++) {
 3135                 idx = i & (RSPQ_Q_SIZE-1);
 3136                 
 3137                 rspd = &rspq->desc[idx];
 3138                 sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
 3139                     idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
 3140                     rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
 3141                 sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
 3142                     rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
 3143                     be32toh(rspd->len_cq), rspd->intr_gen);
 3144         }
 3145 
 3146         err = sbuf_finish(sb);
 3147         sbuf_delete(sb);
 3148         return (err);
 3149 }       
 3150 
 3151 static int
 3152 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
 3153 {
 3154         struct sge_txq *txq;
 3155         struct sge_qset *qs;
 3156         int i, j, err, dump_end;
 3157         struct sbuf *sb;
 3158         struct tx_desc *txd;
 3159         uint32_t *WR, wr_hi, wr_lo, gen;
 3160         uint32_t data[4];
 3161         
 3162         txq = arg1;
 3163         qs = txq_to_qset(txq, TXQ_ETH);
 3164         if (txq->txq_dump_count == 0) {
 3165                 return (0);
 3166         }
 3167         if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
 3168                 log(LOG_WARNING,
 3169                     "dump count is too large %d\n", txq->txq_dump_count);
 3170                 txq->txq_dump_count = 1;
 3171                 return (EINVAL);
 3172         }
 3173         if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
 3174                 log(LOG_WARNING,
 3175                     "dump start of %d is greater than queue size\n",
 3176                     txq->txq_dump_start);
 3177                 txq->txq_dump_start = 0;
 3178                 return (EINVAL);
 3179         }
 3180         err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
 3181         if (err)
 3182                 return (err);
 3183         err = sysctl_wire_old_buffer(req, 0);
 3184         if (err)
 3185                 return (err);
 3186         sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
 3187 
 3188         sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
 3189             (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16), 
 3190             (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
 3191         sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
 3192             ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
 3193             ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
 3194         sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
 3195             txq->txq_dump_start,
 3196             (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
 3197 
 3198         dump_end = txq->txq_dump_start + txq->txq_dump_count;
 3199         for (i = txq->txq_dump_start; i < dump_end; i++) {
 3200                 txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
 3201                 WR = (uint32_t *)txd->flit;
 3202                 wr_hi = ntohl(WR[0]);
 3203                 wr_lo = ntohl(WR[1]);           
 3204                 gen = G_WR_GEN(wr_lo);
 3205                 
 3206                 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
 3207                     wr_hi, wr_lo, gen);
 3208                 for (j = 2; j < 30; j += 4) 
 3209                         sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
 3210                             WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
 3211 
 3212         }
 3213         err = sbuf_finish(sb);
 3214         sbuf_delete(sb);
 3215         return (err);
 3216 }
 3217 
 3218 static int
 3219 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
 3220 {
 3221         struct sge_txq *txq;
 3222         struct sge_qset *qs;
 3223         int i, j, err, dump_end;
 3224         struct sbuf *sb;
 3225         struct tx_desc *txd;
 3226         uint32_t *WR, wr_hi, wr_lo, gen;
 3227         
 3228         txq = arg1;
 3229         qs = txq_to_qset(txq, TXQ_CTRL);
 3230         if (txq->txq_dump_count == 0) {
 3231                 return (0);
 3232         }
 3233         if (txq->txq_dump_count > 256) {
 3234                 log(LOG_WARNING,
 3235                     "dump count is too large %d\n", txq->txq_dump_count);
 3236                 txq->txq_dump_count = 1;
 3237                 return (EINVAL);
 3238         }
 3239         if (txq->txq_dump_start > 255) {
 3240                 log(LOG_WARNING,
 3241                     "dump start of %d is greater than queue size\n",
 3242                     txq->txq_dump_start);
 3243                 txq->txq_dump_start = 0;
 3244                 return (EINVAL);
 3245         }
 3246 
 3247         err = sysctl_wire_old_buffer(req, 0);
 3248         if (err != 0)
 3249                 return (err);
 3250         sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
 3251         sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
 3252             txq->txq_dump_start,
 3253             (txq->txq_dump_start + txq->txq_dump_count) & 255);
 3254 
 3255         dump_end = txq->txq_dump_start + txq->txq_dump_count;
 3256         for (i = txq->txq_dump_start; i < dump_end; i++) {
 3257                 txd = &txq->desc[i & (255)];
 3258                 WR = (uint32_t *)txd->flit;
 3259                 wr_hi = ntohl(WR[0]);
 3260                 wr_lo = ntohl(WR[1]);           
 3261                 gen = G_WR_GEN(wr_lo);
 3262                 
 3263                 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
 3264                     wr_hi, wr_lo, gen);
 3265                 for (j = 2; j < 30; j += 4) 
 3266                         sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
 3267                             WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
 3268 
 3269         }
 3270         err = sbuf_finish(sb);
 3271         sbuf_delete(sb);
 3272         return (err);
 3273 }
 3274 
 3275 static int
 3276 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
 3277 {
 3278         adapter_t *sc = arg1;
 3279         struct qset_params *qsp = &sc->params.sge.qset[0]; 
 3280         int coalesce_usecs;     
 3281         struct sge_qset *qs;
 3282         int i, j, err, nqsets = 0;
 3283         struct mtx *lock;
 3284 
 3285         if ((sc->flags & FULL_INIT_DONE) == 0)
 3286                 return (ENXIO);
 3287                 
 3288         coalesce_usecs = qsp->coalesce_usecs;
 3289         err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
 3290 
 3291         if (err != 0) {
 3292                 return (err);
 3293         }
 3294         if (coalesce_usecs == qsp->coalesce_usecs)
 3295                 return (0);
 3296 
 3297         for (i = 0; i < sc->params.nports; i++) 
 3298                 for (j = 0; j < sc->port[i].nqsets; j++)
 3299                         nqsets++;
 3300 
 3301         coalesce_usecs = max(1, coalesce_usecs);
 3302 
 3303         for (i = 0; i < nqsets; i++) {
 3304                 qs = &sc->sge.qs[i];
 3305                 qsp = &sc->params.sge.qset[i];
 3306                 qsp->coalesce_usecs = coalesce_usecs;
 3307                 
 3308                 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
 3309                             &sc->sge.qs[0].rspq.lock;
 3310 
 3311                 mtx_lock(lock);
 3312                 t3_update_qset_coalesce(qs, qsp);
 3313                 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
 3314                     V_NEWTIMER(qs->rspq.holdoff_tmr));
 3315                 mtx_unlock(lock);
 3316         }
 3317 
 3318         return (0);
 3319 }
 3320 
 3321 static int
 3322 t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
 3323 {
 3324         adapter_t *sc = arg1;
 3325         int rc, timestamp;
 3326 
 3327         if ((sc->flags & FULL_INIT_DONE) == 0)
 3328                 return (ENXIO);
 3329 
 3330         timestamp = sc->timestamp;
 3331         rc = sysctl_handle_int(oidp, &timestamp, arg2, req);
 3332 
 3333         if (rc != 0)
 3334                 return (rc);
 3335 
 3336         if (timestamp != sc->timestamp) {
 3337                 t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS,
 3338                     timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
 3339                 sc->timestamp = timestamp;
 3340         }
 3341 
 3342         return (0);
 3343 }
 3344 
 3345 void
 3346 t3_add_attach_sysctls(adapter_t *sc)
 3347 {
 3348         struct sysctl_ctx_list *ctx;
 3349         struct sysctl_oid_list *children;
 3350 
 3351         ctx = device_get_sysctl_ctx(sc->dev);
 3352         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 3353 
 3354         /* random information */
 3355         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, 
 3356             "firmware_version",
 3357             CTLFLAG_RD, sc->fw_version,
 3358             0, "firmware version");
 3359         SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
 3360             "hw_revision",
 3361             CTLFLAG_RD, &sc->params.rev,
 3362             0, "chip model");
 3363         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, 
 3364             "port_types",
 3365             CTLFLAG_RD, sc->port_types,
 3366             0, "type of ports");
 3367         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3368             "enable_debug",
 3369             CTLFLAG_RW, &cxgb_debug,
 3370             0, "enable verbose debugging output");
 3371         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
 3372             CTLFLAG_RD, &sc->tunq_coalesce,
 3373             "#tunneled packets freed");
 3374         SYSCTL_ADD_INT(ctx, children, OID_AUTO, 
 3375             "txq_overrun",
 3376             CTLFLAG_RD, &txq_fills,
 3377             0, "#times txq overrun");
 3378         SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
 3379             "core_clock",
 3380             CTLFLAG_RD, &sc->params.vpd.cclk,
 3381             0, "core clock frequency (in KHz)");
 3382 }
 3383 
 3384 
 3385 static const char *rspq_name = "rspq";
 3386 static const char *txq_names[] =
 3387 {
 3388         "txq_eth",
 3389         "txq_ofld",
 3390         "txq_ctrl"      
 3391 };
 3392 
 3393 static int
 3394 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
 3395 {
 3396         struct port_info *p = arg1;
 3397         uint64_t *parg;
 3398 
 3399         if (!p)
 3400                 return (EINVAL);
 3401 
 3402         cxgb_refresh_stats(p);
 3403         parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
 3404 
 3405         return (sysctl_handle_64(oidp, parg, 0, req));
 3406 }
 3407 
 3408 void
 3409 t3_add_configured_sysctls(adapter_t *sc)
 3410 {
 3411         struct sysctl_ctx_list *ctx;
 3412         struct sysctl_oid_list *children;
 3413         int i, j;
 3414         
 3415         ctx = device_get_sysctl_ctx(sc->dev);
 3416         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
 3417 
 3418         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 
 3419             "intr_coal",
 3420             CTLTYPE_INT|CTLFLAG_RW, sc,
 3421             0, t3_set_coalesce_usecs,
 3422             "I", "interrupt coalescing timer (us)");
 3423 
 3424         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 
 3425             "pkt_timestamp",
 3426             CTLTYPE_INT | CTLFLAG_RW, sc,
 3427             0, t3_pkt_timestamp,
 3428             "I", "provide packet timestamp instead of connection hash");
 3429 
 3430         for (i = 0; i < sc->params.nports; i++) {
 3431                 struct port_info *pi = &sc->port[i];
 3432                 struct sysctl_oid *poid;
 3433                 struct sysctl_oid_list *poidlist;
 3434                 struct mac_stats *mstats = &pi->mac.stats;
 3435                 
 3436                 snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
 3437                 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, 
 3438                     pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
 3439                 poidlist = SYSCTL_CHILDREN(poid);
 3440                 SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
 3441                     "nqsets", CTLFLAG_RD, &pi->nqsets,
 3442                     0, "#queue sets");
 3443 
 3444                 for (j = 0; j < pi->nqsets; j++) {
 3445                         struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
 3446                         struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
 3447                                           *ctrlqpoid, *lropoid;
 3448                         struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
 3449                                                *txqpoidlist, *ctrlqpoidlist,
 3450                                                *lropoidlist;
 3451                         struct sge_txq *txq = &qs->txq[TXQ_ETH];
 3452                         
 3453                         snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
 3454                         
 3455                         qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, 
 3456                             qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
 3457                         qspoidlist = SYSCTL_CHILDREN(qspoid);
 3458 
 3459                         SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
 3460                                         CTLFLAG_RD, &qs->fl[0].empty, 0,
 3461                                         "freelist #0 empty");
 3462                         SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
 3463                                         CTLFLAG_RD, &qs->fl[1].empty, 0,
 3464                                         "freelist #1 empty");
 3465 
 3466                         rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 
 3467                             rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
 3468                         rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
 3469 
 3470                         txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 
 3471                             txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
 3472                         txqpoidlist = SYSCTL_CHILDREN(txqpoid);
 3473 
 3474                         ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 
 3475                             txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
 3476                         ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
 3477 
 3478                         lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO, 
 3479                             "lro_stats", CTLFLAG_RD, NULL, "LRO statistics");
 3480                         lropoidlist = SYSCTL_CHILDREN(lropoid);
 3481 
 3482                         SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
 3483                             CTLFLAG_RD, &qs->rspq.size,
 3484                             0, "#entries in response queue");
 3485                         SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
 3486                             CTLFLAG_RD, &qs->rspq.cidx,
 3487                             0, "consumer index");
 3488                         SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
 3489                             CTLFLAG_RD, &qs->rspq.credits,
 3490                             0, "#credits");
 3491                         SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
 3492                             CTLFLAG_RD, &qs->rspq.starved,
 3493                             0, "#times starved");
 3494                         SYSCTL_ADD_UAUTO(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
 3495                             CTLFLAG_RD, &qs->rspq.phys_addr,
 3496                             "physical_address_of the queue");
 3497                         SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
 3498                             CTLFLAG_RW, &qs->rspq.rspq_dump_start,
 3499                             0, "start rspq dump entry");
 3500                         SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
 3501                             CTLFLAG_RW, &qs->rspq.rspq_dump_count,
 3502                             0, "#rspq entries to dump");
 3503                         SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
 3504                             CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
 3505                             0, t3_dump_rspq, "A", "dump of the response queue");
 3506 
 3507                         SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
 3508                             CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
 3509                             "#tunneled packets dropped");
 3510                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
 3511                             CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len,
 3512                             0, "#tunneled packets waiting to be sent");
 3513 #if 0                   
 3514                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
 3515                             CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
 3516                             0, "#tunneled packets queue producer index");
 3517                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
 3518                             CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
 3519                             0, "#tunneled packets queue consumer index");
 3520 #endif                  
 3521                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
 3522                             CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
 3523                             0, "#tunneled packets processed by the card");
 3524                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
 3525                             CTLFLAG_RD, &txq->cleaned,
 3526                             0, "#tunneled packets cleaned");
 3527                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
 3528                             CTLFLAG_RD, &txq->in_use,
 3529                             0, "#tunneled packet slots in use");
 3530                         SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "frees",
 3531                             CTLFLAG_RD, &txq->txq_frees,
 3532                             "#tunneled packets freed");
 3533                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
 3534                             CTLFLAG_RD, &txq->txq_skipped,
 3535                             0, "#tunneled packet descriptors skipped");
 3536                         SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
 3537                             CTLFLAG_RD, &txq->txq_coalesced,
 3538                             "#tunneled packets coalesced");
 3539                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
 3540                             CTLFLAG_RD, &txq->txq_enqueued,
 3541                             0, "#tunneled packets enqueued to hardware");
 3542                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
 3543                             CTLFLAG_RD, &qs->txq_stopped,
 3544                             0, "tx queues stopped");
 3545                         SYSCTL_ADD_UAUTO(ctx, txqpoidlist, OID_AUTO, "phys_addr",
 3546                             CTLFLAG_RD, &txq->phys_addr,
 3547                             "physical_address_of the queue");
 3548                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
 3549                             CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
 3550                             0, "txq generation");
 3551                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
 3552                             CTLFLAG_RD, &txq->cidx,
 3553                             0, "hardware queue cidx");                  
 3554                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
 3555                             CTLFLAG_RD, &txq->pidx,
 3556                             0, "hardware queue pidx");
 3557                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
 3558                             CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
 3559                             0, "txq start idx for dump");
 3560                         SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
 3561                             CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
 3562                             0, "txq #entries to dump");                 
 3563                         SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
 3564                             CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
 3565                             0, t3_dump_txq_eth, "A", "dump of the transmit queue");
 3566 
 3567                         SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
 3568                             CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
 3569                             0, "ctrlq start idx for dump");
 3570                         SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
 3571                             CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
 3572                             0, "ctrl #entries to dump");                        
 3573                         SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
 3574                             CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
 3575                             0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
 3576 
 3577                         SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_queued",
 3578                             CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
 3579                         SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_flushed",
 3580                             CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
 3581                         SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
 3582                             CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
 3583                         SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
 3584                             CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
 3585                 }
 3586 
 3587                 /* Now add a node for mac stats. */
 3588                 poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
 3589                     CTLFLAG_RD, NULL, "MAC statistics");
 3590                 poidlist = SYSCTL_CHILDREN(poid);
 3591 
 3592                 /*
 3593                  * We (ab)use the length argument (arg2) to pass on the offset
 3594                  * of the data that we are interested in.  This is only required
 3595                  * for the quad counters that are updated from the hardware (we
 3596                  * make sure that we return the latest value).
 3597                  * sysctl_handle_macstat first updates *all* the counters from
 3598                  * the hardware, and then returns the latest value of the
 3599                  * requested counter.  Best would be to update only the
 3600                  * requested counter from hardware, but t3_mac_update_stats()
 3601                  * hides all the register details and we don't want to dive into
 3602                  * all that here.
 3603                  */
 3604 #define CXGB_SYSCTL_ADD_QUAD(a) SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
 3605     (CTLTYPE_U64 | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \
 3606     sysctl_handle_macstat, "QU", 0)
 3607                 CXGB_SYSCTL_ADD_QUAD(tx_octets);
 3608                 CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
 3609                 CXGB_SYSCTL_ADD_QUAD(tx_frames);
 3610                 CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
 3611                 CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
 3612                 CXGB_SYSCTL_ADD_QUAD(tx_pause);
 3613                 CXGB_SYSCTL_ADD_QUAD(tx_deferred);
 3614                 CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
 3615                 CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
 3616                 CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
 3617                 CXGB_SYSCTL_ADD_QUAD(tx_underrun);
 3618                 CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
 3619                 CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
 3620                 CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
 3621                 CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
 3622                 CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
 3623                 CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
 3624                 CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
 3625                 CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
 3626                 CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
 3627                 CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
 3628                 CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
 3629                 CXGB_SYSCTL_ADD_QUAD(rx_octets);
 3630                 CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
 3631                 CXGB_SYSCTL_ADD_QUAD(rx_frames);
 3632                 CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
 3633                 CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
 3634                 CXGB_SYSCTL_ADD_QUAD(rx_pause);
 3635                 CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
 3636                 CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
 3637                 CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
 3638                 CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
 3639                 CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
 3640                 CXGB_SYSCTL_ADD_QUAD(rx_runt);
 3641                 CXGB_SYSCTL_ADD_QUAD(rx_jabber);
 3642                 CXGB_SYSCTL_ADD_QUAD(rx_short);
 3643                 CXGB_SYSCTL_ADD_QUAD(rx_too_long);
 3644                 CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
 3645                 CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
 3646                 CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
 3647                 CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
 3648                 CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
 3649                 CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
 3650                 CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
 3651                 CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
 3652                 CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
 3653 #undef CXGB_SYSCTL_ADD_QUAD
 3654 
 3655 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
 3656     CTLFLAG_RD, &mstats->a, 0)
 3657                 CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
 3658                 CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
 3659                 CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
 3660                 CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
 3661                 CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
 3662                 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
 3663                 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
 3664                 CXGB_SYSCTL_ADD_ULONG(num_toggled);
 3665                 CXGB_SYSCTL_ADD_ULONG(num_resets);
 3666                 CXGB_SYSCTL_ADD_ULONG(link_faults);
 3667 #undef CXGB_SYSCTL_ADD_ULONG
 3668         }
 3669 }
 3670         
 3671 /**
 3672  *      t3_get_desc - dump an SGE descriptor for debugging purposes
 3673  *      @qs: the queue set
 3674  *      @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
 3675  *      @idx: the descriptor index in the queue
 3676  *      @data: where to dump the descriptor contents
 3677  *
 3678  *      Dumps the contents of a HW descriptor of an SGE queue.  Returns the
 3679  *      size of the descriptor.
 3680  */
 3681 int
 3682 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
 3683                 unsigned char *data)
 3684 {
 3685         if (qnum >= 6)
 3686                 return (EINVAL);
 3687 
 3688         if (qnum < 3) {
 3689                 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
 3690                         return -EINVAL;
 3691                 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
 3692                 return sizeof(struct tx_desc);
 3693         }
 3694 
 3695         if (qnum == 3) {
 3696                 if (!qs->rspq.desc || idx >= qs->rspq.size)
 3697                         return (EINVAL);
 3698                 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
 3699                 return sizeof(struct rsp_desc);
 3700         }
 3701 
 3702         qnum -= 4;
 3703         if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
 3704                 return (EINVAL);
 3705         memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
 3706         return sizeof(struct rx_desc);
 3707 }

Cache object: 86499635eba57c0b3b6d5a076c5232e7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.