1 /**************************************************************************
2
3 Copyright (c) 2007-2009, Chelsio Inc.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27
28 ***************************************************************************/
29 #define DEBUG_BUFRING
30
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/7.3/sys/dev/cxgb/cxgb_sge.c 202775 2010-01-22 02:35:40Z np $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/bus.h>
40 #include <sys/conf.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/bus_dma.h>
44 #include <sys/rman.h>
45 #include <sys/queue.h>
46 #include <sys/sysctl.h>
47 #include <sys/taskqueue.h>
48
49 #include <sys/proc.h>
50 #include <sys/sbuf.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/systm.h>
54 #include <sys/syslog.h>
55
56 #include <netinet/in_systm.h>
57 #include <netinet/in.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63
64 #include <vm/vm.h>
65 #include <vm/pmap.h>
66
67 #ifdef CONFIG_DEFINED
68 #include <cxgb_include.h>
69 #include <sys/mvec.h>
70 #else
71 #include <dev/cxgb/cxgb_include.h>
72 #include <dev/cxgb/sys/mvec.h>
73 #endif
74
75 int txq_fills = 0;
76 /*
77 * XXX don't re-enable this until TOE stops assuming
78 * we have an m_ext
79 */
80 static int recycle_enable = 0;
81 extern int cxgb_txq_buf_ring_size;
82 int cxgb_cached_allocations;
83 int cxgb_cached;
84 int cxgb_ext_freed = 0;
85 int cxgb_ext_inited = 0;
86 int fl_q_size = 0;
87 int jumbo_q_size = 0;
88
89 extern int cxgb_use_16k_clusters;
90 extern int cxgb_pcpu_cache_enable;
91 extern int nmbjumbop;
92 extern int nmbjumbo9;
93 extern int nmbjumbo16;
94
95
96
97
98 #define USE_GTS 0
99
100 #define SGE_RX_SM_BUF_SIZE 1536
101 #define SGE_RX_DROP_THRES 16
102 #define SGE_RX_COPY_THRES 128
103
104 /*
105 * Period of the Tx buffer reclaim timer. This timer does not need to run
106 * frequently as Tx buffers are usually reclaimed by new Tx packets.
107 */
108 #define TX_RECLAIM_PERIOD (hz >> 1)
109
110 /*
111 * Values for sge_txq.flags
112 */
113 enum {
114 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
115 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
116 };
117
118 struct tx_desc {
119 uint64_t flit[TX_DESC_FLITS];
120 } __packed;
121
122 struct rx_desc {
123 uint32_t addr_lo;
124 uint32_t len_gen;
125 uint32_t gen2;
126 uint32_t addr_hi;
127 } __packed;;
128
129 struct rsp_desc { /* response queue descriptor */
130 struct rss_header rss_hdr;
131 uint32_t flags;
132 uint32_t len_cq;
133 uint8_t imm_data[47];
134 uint8_t intr_gen;
135 } __packed;
136
137 #define RX_SW_DESC_MAP_CREATED (1 << 0)
138 #define TX_SW_DESC_MAP_CREATED (1 << 1)
139 #define RX_SW_DESC_INUSE (1 << 3)
140 #define TX_SW_DESC_MAPPED (1 << 4)
141
142 #define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0)
143 #define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP)
144 #define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP)
145 #define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
146
147 struct tx_sw_desc { /* SW state per Tx descriptor */
148 struct mbuf_iovec mi;
149 bus_dmamap_t map;
150 int flags;
151 };
152
153 struct rx_sw_desc { /* SW state per Rx descriptor */
154 caddr_t rxsd_cl;
155 caddr_t data;
156 bus_dmamap_t map;
157 int flags;
158 };
159
160 struct txq_state {
161 unsigned int compl;
162 unsigned int gen;
163 unsigned int pidx;
164 };
165
166 struct refill_fl_cb_arg {
167 int error;
168 bus_dma_segment_t seg;
169 int nseg;
170 };
171
172 /*
173 * Maps a number of flits to the number of Tx descriptors that can hold them.
174 * The formula is
175 *
176 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
177 *
178 * HW allows up to 4 descriptors to be combined into a WR.
179 */
180 static uint8_t flit_desc_map[] = {
181 0,
182 #if SGE_NUM_GENBITS == 1
183 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
184 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
185 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
186 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
187 #elif SGE_NUM_GENBITS == 2
188 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
189 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
190 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
191 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
192 #else
193 # error "SGE_NUM_GENBITS must be 1 or 2"
194 #endif
195 };
196
197
198 int cxgb_debug = 0;
199
200 static void sge_timer_cb(void *arg);
201 static void sge_timer_reclaim(void *arg, int ncount);
202 static void sge_txq_reclaim_handler(void *arg, int ncount);
203
204 /**
205 * reclaim_completed_tx - reclaims completed Tx descriptors
206 * @adapter: the adapter
207 * @q: the Tx queue to reclaim completed descriptors from
208 *
209 * Reclaims Tx descriptors that the SGE has indicated it has processed,
210 * and frees the associated buffers if possible. Called with the Tx
211 * queue's lock held.
212 */
213 static __inline int
214 reclaim_completed_tx_(struct sge_txq *q, int reclaim_min)
215 {
216 int reclaim = desc_reclaimable(q);
217
218 if (reclaim < reclaim_min)
219 return (0);
220
221 mtx_assert(&q->lock, MA_OWNED);
222 if (reclaim > 0) {
223 t3_free_tx_desc(q, reclaim);
224 q->cleaned += reclaim;
225 q->in_use -= reclaim;
226 }
227 return (reclaim);
228 }
229
230 /**
231 * should_restart_tx - are there enough resources to restart a Tx queue?
232 * @q: the Tx queue
233 *
234 * Checks if there are enough descriptors to restart a suspended Tx queue.
235 */
236 static __inline int
237 should_restart_tx(const struct sge_txq *q)
238 {
239 unsigned int r = q->processed - q->cleaned;
240
241 return q->in_use - r < (q->size >> 1);
242 }
243
244 /**
245 * t3_sge_init - initialize SGE
246 * @adap: the adapter
247 * @p: the SGE parameters
248 *
249 * Performs SGE initialization needed every time after a chip reset.
250 * We do not initialize any of the queue sets here, instead the driver
251 * top-level must request those individually. We also do not enable DMA
252 * here, that should be done after the queues have been set up.
253 */
254 void
255 t3_sge_init(adapter_t *adap, struct sge_params *p)
256 {
257 u_int ctrl, ups;
258
259 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
260
261 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
262 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
263 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
264 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
265 #if SGE_NUM_GENBITS == 1
266 ctrl |= F_EGRGENCTRL;
267 #endif
268 if (adap->params.rev > 0) {
269 if (!(adap->flags & (USING_MSIX | USING_MSI)))
270 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
271 }
272 t3_write_reg(adap, A_SG_CONTROL, ctrl);
273 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
274 V_LORCQDRBTHRSH(512));
275 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
276 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
277 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
278 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
279 adap->params.rev < T3_REV_C ? 1000 : 500);
280 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
281 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
282 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
283 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
284 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
285 }
286
287
288 /**
289 * sgl_len - calculates the size of an SGL of the given capacity
290 * @n: the number of SGL entries
291 *
292 * Calculates the number of flits needed for a scatter/gather list that
293 * can hold the given number of entries.
294 */
295 static __inline unsigned int
296 sgl_len(unsigned int n)
297 {
298 return ((3 * n) / 2 + (n & 1));
299 }
300
301 /**
302 * get_imm_packet - return the next ingress packet buffer from a response
303 * @resp: the response descriptor containing the packet data
304 *
305 * Return a packet containing the immediate data of the given response.
306 */
307 static int
308 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
309 {
310
311 m->m_len = m->m_pkthdr.len = IMMED_PKT_SIZE;
312 m->m_ext.ext_buf = NULL;
313 m->m_ext.ext_type = 0;
314 memcpy(mtod(m, uint8_t *), resp->imm_data, IMMED_PKT_SIZE);
315 return (0);
316 }
317
318 static __inline u_int
319 flits_to_desc(u_int n)
320 {
321 return (flit_desc_map[n]);
322 }
323
324 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
325 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
326 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
327 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
328 F_HIRCQPARITYERROR)
329 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
330 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
331 F_RSPQDISABLED)
332
333 /**
334 * t3_sge_err_intr_handler - SGE async event interrupt handler
335 * @adapter: the adapter
336 *
337 * Interrupt handler for SGE asynchronous (non-data) events.
338 */
339 void
340 t3_sge_err_intr_handler(adapter_t *adapter)
341 {
342 unsigned int v, status;
343
344 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
345 if (status & SGE_PARERR)
346 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
347 status & SGE_PARERR);
348 if (status & SGE_FRAMINGERR)
349 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
350 status & SGE_FRAMINGERR);
351 if (status & F_RSPQCREDITOVERFOW)
352 CH_ALERT(adapter, "SGE response queue credit overflow\n");
353
354 if (status & F_RSPQDISABLED) {
355 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
356
357 CH_ALERT(adapter,
358 "packet delivered to disabled response queue (0x%x)\n",
359 (v >> S_RSPQ0DISABLED) & 0xff);
360 }
361
362 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
363 if (status & SGE_FATALERR)
364 t3_fatal_err(adapter);
365 }
366
367 void
368 t3_sge_prep(adapter_t *adap, struct sge_params *p)
369 {
370 int i, nqsets;
371
372 nqsets = min(SGE_QSETS, mp_ncpus*4);
373
374 fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
375
376 while (!powerof2(fl_q_size))
377 fl_q_size--;
378 #if __FreeBSD_version >= 700111
379 if (cxgb_use_16k_clusters)
380 jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
381 else
382 jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
383 #else
384 jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
385 #endif
386 while (!powerof2(jumbo_q_size))
387 jumbo_q_size--;
388
389 if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
390 device_printf(adap->dev,
391 "Insufficient clusters and/or jumbo buffers.\n");
392
393 /* XXX Does ETHER_ALIGN need to be accounted for here? */
394 p->max_pkt_size = adap->sge.qs[0].fl[1].buf_size - sizeof(struct cpl_rx_data);
395
396 for (i = 0; i < SGE_QSETS; ++i) {
397 struct qset_params *q = p->qset + i;
398
399 if (adap->params.nports > 2) {
400 q->coalesce_usecs = 50;
401 } else {
402 #ifdef INVARIANTS
403 q->coalesce_usecs = 10;
404 #else
405 q->coalesce_usecs = 5;
406 #endif
407 }
408 q->polling = 0;
409 q->rspq_size = RSPQ_Q_SIZE;
410 q->fl_size = fl_q_size;
411 q->jumbo_size = jumbo_q_size;
412 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
413 q->txq_size[TXQ_OFLD] = 1024;
414 q->txq_size[TXQ_CTRL] = 256;
415 q->cong_thres = 0;
416 }
417 }
418
419 int
420 t3_sge_alloc(adapter_t *sc)
421 {
422
423 /* The parent tag. */
424 if (bus_dma_tag_create( NULL, /* parent */
425 1, 0, /* algnmnt, boundary */
426 BUS_SPACE_MAXADDR, /* lowaddr */
427 BUS_SPACE_MAXADDR, /* highaddr */
428 NULL, NULL, /* filter, filterarg */
429 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
430 BUS_SPACE_UNRESTRICTED, /* nsegments */
431 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
432 0, /* flags */
433 NULL, NULL, /* lock, lockarg */
434 &sc->parent_dmat)) {
435 device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
436 return (ENOMEM);
437 }
438
439 /*
440 * DMA tag for normal sized RX frames
441 */
442 if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
443 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
444 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
445 device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
446 return (ENOMEM);
447 }
448
449 /*
450 * DMA tag for jumbo sized RX frames.
451 */
452 if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
453 BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
454 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
455 device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
456 return (ENOMEM);
457 }
458
459 /*
460 * DMA tag for TX frames.
461 */
462 if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
463 BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
464 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
465 NULL, NULL, &sc->tx_dmat)) {
466 device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
467 return (ENOMEM);
468 }
469
470 return (0);
471 }
472
473 int
474 t3_sge_free(struct adapter * sc)
475 {
476
477 if (sc->tx_dmat != NULL)
478 bus_dma_tag_destroy(sc->tx_dmat);
479
480 if (sc->rx_jumbo_dmat != NULL)
481 bus_dma_tag_destroy(sc->rx_jumbo_dmat);
482
483 if (sc->rx_dmat != NULL)
484 bus_dma_tag_destroy(sc->rx_dmat);
485
486 if (sc->parent_dmat != NULL)
487 bus_dma_tag_destroy(sc->parent_dmat);
488
489 return (0);
490 }
491
492 void
493 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
494 {
495
496 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
497 qs->rspq.polling = 0 /* p->polling */;
498 }
499
500 #if !defined(__i386__) && !defined(__amd64__)
501 static void
502 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
503 {
504 struct refill_fl_cb_arg *cb_arg = arg;
505
506 cb_arg->error = error;
507 cb_arg->seg = segs[0];
508 cb_arg->nseg = nseg;
509
510 }
511 #endif
512 /**
513 * refill_fl - refill an SGE free-buffer list
514 * @sc: the controller softc
515 * @q: the free-list to refill
516 * @n: the number of new buffers to allocate
517 *
518 * (Re)populate an SGE free-buffer list with up to @n new packet buffers.
519 * The caller must assure that @n does not exceed the queue's capacity.
520 */
521 static void
522 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
523 {
524 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
525 struct rx_desc *d = &q->desc[q->pidx];
526 struct refill_fl_cb_arg cb_arg;
527 caddr_t cl;
528 int err, count = 0;
529 int header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) + sizeof(struct m_ext_) + sizeof(uint32_t);
530
531 cb_arg.error = 0;
532 while (n--) {
533 /*
534 * We only allocate a cluster, mbuf allocation happens after rx
535 */
536 if ((cl = cxgb_cache_get(q->zone)) == NULL) {
537 log(LOG_WARNING, "Failed to allocate cluster\n");
538 goto done;
539 }
540
541 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
542 if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
543 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
544 uma_zfree(q->zone, cl);
545 goto done;
546 }
547 sd->flags |= RX_SW_DESC_MAP_CREATED;
548 }
549 #if !defined(__i386__) && !defined(__amd64__)
550 err = bus_dmamap_load(q->entry_tag, sd->map,
551 cl + header_size, q->buf_size,
552 refill_fl_cb, &cb_arg, 0);
553
554 if (err != 0 || cb_arg.error) {
555 log(LOG_WARNING, "failure in refill_fl %d\n", cb_arg.error);
556 /*
557 * XXX free cluster
558 */
559 return;
560 }
561 #else
562 cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)(cl + header_size));
563 #endif
564 sd->flags |= RX_SW_DESC_INUSE;
565 sd->rxsd_cl = cl;
566 sd->data = cl + header_size;
567 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
568 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
569 d->len_gen = htobe32(V_FLD_GEN1(q->gen));
570 d->gen2 = htobe32(V_FLD_GEN2(q->gen));
571
572 d++;
573 sd++;
574
575 if (++q->pidx == q->size) {
576 q->pidx = 0;
577 q->gen ^= 1;
578 sd = q->sdesc;
579 d = q->desc;
580 }
581 q->credits++;
582 count++;
583 }
584
585 done:
586 if (count)
587 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
588 }
589
590
591 /**
592 * free_rx_bufs - free the Rx buffers on an SGE free list
593 * @sc: the controle softc
594 * @q: the SGE free list to clean up
595 *
596 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
597 * this queue should be stopped before calling this function.
598 */
599 static void
600 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
601 {
602 u_int cidx = q->cidx;
603
604 while (q->credits--) {
605 struct rx_sw_desc *d = &q->sdesc[cidx];
606
607 if (d->flags & RX_SW_DESC_INUSE) {
608 bus_dmamap_unload(q->entry_tag, d->map);
609 bus_dmamap_destroy(q->entry_tag, d->map);
610 uma_zfree(q->zone, d->rxsd_cl);
611 }
612 d->rxsd_cl = NULL;
613 if (++cidx == q->size)
614 cidx = 0;
615 }
616 }
617
618 static __inline void
619 __refill_fl(adapter_t *adap, struct sge_fl *fl)
620 {
621 refill_fl(adap, fl, min(16U, fl->size - fl->credits));
622 }
623
624 static __inline void
625 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
626 {
627 if ((fl->size - fl->credits) < max)
628 refill_fl(adap, fl, min(max, fl->size - fl->credits));
629 }
630
631 void
632 refill_fl_service(adapter_t *adap, struct sge_fl *fl)
633 {
634 __refill_fl_lt(adap, fl, 512);
635 }
636
637 /**
638 * recycle_rx_buf - recycle a receive buffer
639 * @adapter: the adapter
640 * @q: the SGE free list
641 * @idx: index of buffer to recycle
642 *
643 * Recycles the specified buffer on the given free list by adding it at
644 * the next available slot on the list.
645 */
646 static void
647 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
648 {
649 struct rx_desc *from = &q->desc[idx];
650 struct rx_desc *to = &q->desc[q->pidx];
651
652 q->sdesc[q->pidx] = q->sdesc[idx];
653 to->addr_lo = from->addr_lo; // already big endian
654 to->addr_hi = from->addr_hi; // likewise
655 wmb();
656 to->len_gen = htobe32(V_FLD_GEN1(q->gen));
657 to->gen2 = htobe32(V_FLD_GEN2(q->gen));
658 q->credits++;
659
660 if (++q->pidx == q->size) {
661 q->pidx = 0;
662 q->gen ^= 1;
663 }
664 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
665 }
666
667 static void
668 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
669 {
670 uint32_t *addr;
671
672 addr = arg;
673 *addr = segs[0].ds_addr;
674 }
675
676 static int
677 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
678 bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
679 bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
680 {
681 size_t len = nelem * elem_size;
682 void *s = NULL;
683 void *p = NULL;
684 int err;
685
686 if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
687 BUS_SPACE_MAXADDR_32BIT,
688 BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
689 len, 0, NULL, NULL, tag)) != 0) {
690 device_printf(sc->dev, "Cannot allocate descriptor tag\n");
691 return (ENOMEM);
692 }
693
694 if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
695 map)) != 0) {
696 device_printf(sc->dev, "Cannot allocate descriptor memory\n");
697 return (ENOMEM);
698 }
699
700 bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
701 bzero(p, len);
702 *(void **)desc = p;
703
704 if (sw_size) {
705 len = nelem * sw_size;
706 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
707 *(void **)sdesc = s;
708 }
709 if (parent_entry_tag == NULL)
710 return (0);
711
712 if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
713 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
714 NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
715 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
716 NULL, NULL, entry_tag)) != 0) {
717 device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
718 return (ENOMEM);
719 }
720 return (0);
721 }
722
723 static void
724 sge_slow_intr_handler(void *arg, int ncount)
725 {
726 adapter_t *sc = arg;
727
728 t3_slow_intr_handler(sc);
729 }
730
731 /**
732 * sge_timer_cb - perform periodic maintenance of an SGE qset
733 * @data: the SGE queue set to maintain
734 *
735 * Runs periodically from a timer to perform maintenance of an SGE queue
736 * set. It performs two tasks:
737 *
738 * a) Cleans up any completed Tx descriptors that may still be pending.
739 * Normal descriptor cleanup happens when new packets are added to a Tx
740 * queue so this timer is relatively infrequent and does any cleanup only
741 * if the Tx queue has not seen any new packets in a while. We make a
742 * best effort attempt to reclaim descriptors, in that we don't wait
743 * around if we cannot get a queue's lock (which most likely is because
744 * someone else is queueing new packets and so will also handle the clean
745 * up). Since control queues use immediate data exclusively we don't
746 * bother cleaning them up here.
747 *
748 * b) Replenishes Rx queues that have run out due to memory shortage.
749 * Normally new Rx buffers are added when existing ones are consumed but
750 * when out of memory a queue can become empty. We try to add only a few
751 * buffers here, the queue will be replenished fully as these new buffers
752 * are used up if memory shortage has subsided.
753 *
754 * c) Return coalesced response queue credits in case a response queue is
755 * starved.
756 *
757 * d) Ring doorbells for T304 tunnel queues since we have seen doorbell
758 * fifo overflows and the FW doesn't implement any recovery scheme yet.
759 */
760 static void
761 sge_timer_cb(void *arg)
762 {
763 adapter_t *sc = arg;
764 #ifndef IFNET_MULTIQUEUE
765 struct port_info *pi;
766 struct sge_qset *qs;
767 struct sge_txq *txq;
768 int i, j;
769 int reclaim_ofl, refill_rx;
770
771 for (i = 0; i < sc->params.nports; i++) {
772 pi = &sc->port[i];
773 for (j = 0; j < pi->nqsets; j++) {
774 qs = &sc->sge.qs[pi->first_qset + j];
775 txq = &qs->txq[0];
776 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
777 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
778 (qs->fl[1].credits < qs->fl[1].size));
779 if (reclaim_ofl || refill_rx) {
780 taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
781 break;
782 }
783 }
784 }
785 #endif
786 if (sc->params.nports > 2) {
787 int i;
788
789 for_each_port(sc, i) {
790 struct port_info *pi = &sc->port[i];
791
792 t3_write_reg(sc, A_SG_KDOORBELL,
793 F_SELEGRCNTX |
794 (FW_TUNNEL_SGEEC_START + pi->first_qset));
795 }
796 }
797 if (sc->open_device_map != 0)
798 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
799 }
800
801 /*
802 * This is meant to be a catch-all function to keep sge state private
803 * to sge.c
804 *
805 */
806 int
807 t3_sge_init_adapter(adapter_t *sc)
808 {
809 callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE);
810 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
811 TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
812 mi_init();
813 cxgb_cache_init();
814 return (0);
815 }
816
817 int
818 t3_sge_reset_adapter(adapter_t *sc)
819 {
820 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
821 return (0);
822 }
823
824 int
825 t3_sge_init_port(struct port_info *pi)
826 {
827 TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
828 return (0);
829 }
830
831 void
832 t3_sge_deinit_sw(adapter_t *sc)
833 {
834
835 mi_deinit();
836 }
837
838 /**
839 * refill_rspq - replenish an SGE response queue
840 * @adapter: the adapter
841 * @q: the response queue to replenish
842 * @credits: how many new responses to make available
843 *
844 * Replenishes a response queue by making the supplied number of responses
845 * available to HW.
846 */
847 static __inline void
848 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
849 {
850
851 /* mbufs are allocated on demand when a rspq entry is processed. */
852 t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
853 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
854 }
855
856 static __inline void
857 sge_txq_reclaim_(struct sge_txq *txq, int force)
858 {
859
860 if (desc_reclaimable(txq) < 16)
861 return;
862 if (mtx_trylock(&txq->lock) == 0)
863 return;
864 reclaim_completed_tx_(txq, 16);
865 mtx_unlock(&txq->lock);
866
867 }
868
869 static void
870 sge_txq_reclaim_handler(void *arg, int ncount)
871 {
872 struct sge_txq *q = arg;
873
874 sge_txq_reclaim_(q, TRUE);
875 }
876
877
878
879 static void
880 sge_timer_reclaim(void *arg, int ncount)
881 {
882 struct port_info *pi = arg;
883 int i, nqsets = pi->nqsets;
884 adapter_t *sc = pi->adapter;
885 struct sge_qset *qs;
886 struct sge_txq *txq;
887 struct mtx *lock;
888
889 #ifdef IFNET_MULTIQUEUE
890 panic("%s should not be called with multiqueue support\n", __FUNCTION__);
891 #endif
892 for (i = 0; i < nqsets; i++) {
893 qs = &sc->sge.qs[pi->first_qset + i];
894
895 txq = &qs->txq[TXQ_OFLD];
896 sge_txq_reclaim_(txq, FALSE);
897
898 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
899 &sc->sge.qs[0].rspq.lock;
900
901 if (mtx_trylock(lock)) {
902 /* XXX currently assume that we are *NOT* polling */
903 uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
904
905 if (qs->fl[0].credits < qs->fl[0].size - 16)
906 __refill_fl(sc, &qs->fl[0]);
907 if (qs->fl[1].credits < qs->fl[1].size - 16)
908 __refill_fl(sc, &qs->fl[1]);
909
910 if (status & (1 << qs->rspq.cntxt_id)) {
911 if (qs->rspq.credits) {
912 refill_rspq(sc, &qs->rspq, 1);
913 qs->rspq.credits--;
914 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
915 1 << qs->rspq.cntxt_id);
916 }
917 }
918 mtx_unlock(lock);
919 }
920 }
921 }
922
923 /**
924 * init_qset_cntxt - initialize an SGE queue set context info
925 * @qs: the queue set
926 * @id: the queue set id
927 *
928 * Initializes the TIDs and context ids for the queues of a queue set.
929 */
930 static void
931 init_qset_cntxt(struct sge_qset *qs, u_int id)
932 {
933
934 qs->rspq.cntxt_id = id;
935 qs->fl[0].cntxt_id = 2 * id;
936 qs->fl[1].cntxt_id = 2 * id + 1;
937 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
938 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
939 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
940 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
941 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
942
943 mbufq_init(&qs->txq[TXQ_ETH].sendq);
944 mbufq_init(&qs->txq[TXQ_OFLD].sendq);
945 mbufq_init(&qs->txq[TXQ_CTRL].sendq);
946 }
947
948
949 static void
950 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
951 {
952 txq->in_use += ndesc;
953 /*
954 * XXX we don't handle stopping of queue
955 * presumably start handles this when we bump against the end
956 */
957 txqs->gen = txq->gen;
958 txq->unacked += ndesc;
959 txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
960 txq->unacked &= 31;
961 txqs->pidx = txq->pidx;
962 txq->pidx += ndesc;
963 #ifdef INVARIANTS
964 if (((txqs->pidx > txq->cidx) &&
965 (txq->pidx < txqs->pidx) &&
966 (txq->pidx >= txq->cidx)) ||
967 ((txqs->pidx < txq->cidx) &&
968 (txq->pidx >= txq-> cidx)) ||
969 ((txqs->pidx < txq->cidx) &&
970 (txq->cidx < txqs->pidx)))
971 panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
972 txqs->pidx, txq->pidx, txq->cidx);
973 #endif
974 if (txq->pidx >= txq->size) {
975 txq->pidx -= txq->size;
976 txq->gen ^= 1;
977 }
978
979 }
980
981 /**
982 * calc_tx_descs - calculate the number of Tx descriptors for a packet
983 * @m: the packet mbufs
984 * @nsegs: the number of segments
985 *
986 * Returns the number of Tx descriptors needed for the given Ethernet
987 * packet. Ethernet packets require addition of WR and CPL headers.
988 */
989 static __inline unsigned int
990 calc_tx_descs(const struct mbuf *m, int nsegs)
991 {
992 unsigned int flits;
993
994 if (m->m_pkthdr.len <= WR_LEN - sizeof(struct cpl_tx_pkt))
995 return 1;
996
997 flits = sgl_len(nsegs) + 2;
998 #ifdef TSO_SUPPORTED
999 if (m->m_pkthdr.csum_flags & CSUM_TSO)
1000 flits++;
1001 #endif
1002 return flits_to_desc(flits);
1003 }
1004
1005 static unsigned int
1006 busdma_map_mbufs(struct mbuf **m, struct sge_txq *txq,
1007 struct tx_sw_desc *txsd, bus_dma_segment_t *segs, int *nsegs)
1008 {
1009 struct mbuf *m0;
1010 int err, pktlen, pass = 0;
1011
1012 retry:
1013 err = 0;
1014 m0 = *m;
1015 pktlen = m0->m_pkthdr.len;
1016 #if defined(__i386__) || defined(__amd64__)
1017 if (busdma_map_sg_collapse(m, segs, nsegs) == 0) {
1018 goto done;
1019 } else
1020 #endif
1021 err = bus_dmamap_load_mbuf_sg(txq->entry_tag, txsd->map, m0, segs, nsegs, 0);
1022
1023 if (err == 0) {
1024 goto done;
1025 }
1026 if (err == EFBIG && pass == 0) {
1027 pass = 1;
1028 /* Too many segments, try to defrag */
1029 m0 = m_defrag(m0, M_DONTWAIT);
1030 if (m0 == NULL) {
1031 m_freem(*m);
1032 *m = NULL;
1033 return (ENOBUFS);
1034 }
1035 *m = m0;
1036 goto retry;
1037 } else if (err == ENOMEM) {
1038 return (err);
1039 } if (err) {
1040 if (cxgb_debug)
1041 printf("map failure err=%d pktlen=%d\n", err, pktlen);
1042 m_freem(m0);
1043 *m = NULL;
1044 return (err);
1045 }
1046 done:
1047 #if !defined(__i386__) && !defined(__amd64__)
1048 bus_dmamap_sync(txq->entry_tag, txsd->map, BUS_DMASYNC_PREWRITE);
1049 #endif
1050 txsd->flags |= TX_SW_DESC_MAPPED;
1051
1052 return (0);
1053 }
1054
1055 /**
1056 * make_sgl - populate a scatter/gather list for a packet
1057 * @sgp: the SGL to populate
1058 * @segs: the packet dma segments
1059 * @nsegs: the number of segments
1060 *
1061 * Generates a scatter/gather list for the buffers that make up a packet
1062 * and returns the SGL size in 8-byte words. The caller must size the SGL
1063 * appropriately.
1064 */
1065 static __inline void
1066 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1067 {
1068 int i, idx;
1069
1070 for (idx = 0, i = 0; i < nsegs; i++) {
1071 /*
1072 * firmware doesn't like empty segments
1073 */
1074 if (segs[i].ds_len == 0)
1075 continue;
1076 if (i && idx == 0)
1077 ++sgp;
1078
1079 sgp->len[idx] = htobe32(segs[i].ds_len);
1080 sgp->addr[idx] = htobe64(segs[i].ds_addr);
1081 idx ^= 1;
1082 }
1083
1084 if (idx) {
1085 sgp->len[idx] = 0;
1086 sgp->addr[idx] = 0;
1087 }
1088 }
1089
1090 /**
1091 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1092 * @adap: the adapter
1093 * @q: the Tx queue
1094 *
1095 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1096 * where the HW is going to sleep just after we checked, however,
1097 * then the interrupt handler will detect the outstanding TX packet
1098 * and ring the doorbell for us.
1099 *
1100 * When GTS is disabled we unconditionally ring the doorbell.
1101 */
1102 static __inline void
1103 check_ring_tx_db(adapter_t *adap, struct sge_txq *q)
1104 {
1105 #if USE_GTS
1106 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1107 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1108 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1109 #ifdef T3_TRACE
1110 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1111 q->cntxt_id);
1112 #endif
1113 t3_write_reg(adap, A_SG_KDOORBELL,
1114 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1115 }
1116 #else
1117 wmb(); /* write descriptors before telling HW */
1118 t3_write_reg(adap, A_SG_KDOORBELL,
1119 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1120 #endif
1121 }
1122
1123 static __inline void
1124 wr_gen2(struct tx_desc *d, unsigned int gen)
1125 {
1126 #if SGE_NUM_GENBITS == 2
1127 d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1128 #endif
1129 }
1130
1131 /**
1132 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1133 * @ndesc: number of Tx descriptors spanned by the SGL
1134 * @txd: first Tx descriptor to be written
1135 * @txqs: txq state (generation and producer index)
1136 * @txq: the SGE Tx queue
1137 * @sgl: the SGL
1138 * @flits: number of flits to the start of the SGL in the first descriptor
1139 * @sgl_flits: the SGL size in flits
1140 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1141 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1142 *
1143 * Write a work request header and an associated SGL. If the SGL is
1144 * small enough to fit into one Tx descriptor it has already been written
1145 * and we just need to write the WR header. Otherwise we distribute the
1146 * SGL across the number of descriptors it spans.
1147 */
1148 static void
1149 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1150 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1151 unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1152 {
1153
1154 struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1155 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1156
1157 if (__predict_true(ndesc == 1)) {
1158 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1159 V_WR_SGLSFLT(flits)) | wr_hi;
1160 wmb();
1161 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1162 V_WR_GEN(txqs->gen)) | wr_lo;
1163 /* XXX gen? */
1164 wr_gen2(txd, txqs->gen);
1165
1166 } else {
1167 unsigned int ogen = txqs->gen;
1168 const uint64_t *fp = (const uint64_t *)sgl;
1169 struct work_request_hdr *wp = wrp;
1170
1171 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1172 V_WR_SGLSFLT(flits)) | wr_hi;
1173
1174 while (sgl_flits) {
1175 unsigned int avail = WR_FLITS - flits;
1176
1177 if (avail > sgl_flits)
1178 avail = sgl_flits;
1179 memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1180 sgl_flits -= avail;
1181 ndesc--;
1182 if (!sgl_flits)
1183 break;
1184
1185 fp += avail;
1186 txd++;
1187 txsd++;
1188 if (++txqs->pidx == txq->size) {
1189 txqs->pidx = 0;
1190 txqs->gen ^= 1;
1191 txd = txq->desc;
1192 txsd = txq->sdesc;
1193 }
1194
1195 /*
1196 * when the head of the mbuf chain
1197 * is freed all clusters will be freed
1198 * with it
1199 */
1200 KASSERT(txsd->mi.mi_base == NULL,
1201 ("overwriting valid entry mi_base==%p", txsd->mi.mi_base));
1202 wrp = (struct work_request_hdr *)txd;
1203 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1204 V_WR_SGLSFLT(1)) | wr_hi;
1205 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1206 sgl_flits + 1)) |
1207 V_WR_GEN(txqs->gen)) | wr_lo;
1208 wr_gen2(txd, txqs->gen);
1209 flits = 1;
1210 }
1211 wrp->wr_hi |= htonl(F_WR_EOP);
1212 wmb();
1213 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1214 wr_gen2((struct tx_desc *)wp, ogen);
1215 }
1216 }
1217
1218 /* sizeof(*eh) + sizeof(*vhdr) + sizeof(*ip) + sizeof(*tcp) */
1219 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 20 + 20)
1220
1221 #ifdef VLAN_SUPPORTED
1222 #define GET_VTAG(cntrl, m) \
1223 do { \
1224 if ((m)->m_flags & M_VLANTAG) \
1225 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1226 } while (0)
1227
1228 #define GET_VTAG_MI(cntrl, mi) \
1229 do { \
1230 if ((mi)->mi_flags & M_VLANTAG) \
1231 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((mi)->mi_ether_vtag); \
1232 } while (0)
1233 #else
1234 #define GET_VTAG(cntrl, m)
1235 #define GET_VTAG_MI(cntrl, m)
1236 #endif
1237
1238 int
1239 t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
1240 {
1241 adapter_t *sc;
1242 struct mbuf *m0;
1243 struct sge_txq *txq;
1244 struct txq_state txqs;
1245 struct port_info *pi;
1246 unsigned int ndesc, flits, cntrl, mlen;
1247 int err, nsegs, tso_info = 0;
1248
1249 struct work_request_hdr *wrp;
1250 struct tx_sw_desc *txsd;
1251 struct sg_ent *sgp, *sgl;
1252 uint32_t wr_hi, wr_lo, sgl_flits;
1253 bus_dma_segment_t segs[TX_MAX_SEGS];
1254
1255 struct tx_desc *txd;
1256 struct mbuf_vec *mv;
1257 struct mbuf_iovec *mi;
1258
1259 DPRINTF("t3_encap cpu=%d ", curcpu);
1260
1261 mi = NULL;
1262 pi = qs->port;
1263 sc = pi->adapter;
1264 txq = &qs->txq[TXQ_ETH];
1265 txd = &txq->desc[txq->pidx];
1266 txsd = &txq->sdesc[txq->pidx];
1267 sgl = txq->txq_sgl;
1268 m0 = *m;
1269
1270 DPRINTF("t3_encap port_id=%d qsidx=%d ", pi->port_id, pi->first_qset);
1271 DPRINTF("mlen=%d txpkt_intf=%d tx_chan=%d\n", m[0]->m_pkthdr.len, pi->txpkt_intf, pi->tx_chan);
1272 if (cxgb_debug)
1273 printf("mi_base=%p cidx=%d pidx=%d\n\n", txsd->mi.mi_base, txq->cidx, txq->pidx);
1274
1275 mtx_assert(&txq->lock, MA_OWNED);
1276 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1277 /*
1278 * XXX need to add VLAN support for 6.x
1279 */
1280 #ifdef VLAN_SUPPORTED
1281 if (m0->m_pkthdr.csum_flags & (CSUM_TSO))
1282 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1283 #endif
1284 KASSERT(txsd->mi.mi_base == NULL,
1285 ("overwriting valid entry mi_base==%p", txsd->mi.mi_base));
1286 if (count > 1) {
1287 panic("count > 1 not support in CVS\n");
1288 if ((err = busdma_map_sg_vec(m, &m0, segs, count)))
1289 return (err);
1290 nsegs = count;
1291 } else if ((err = busdma_map_sg_collapse(&m0, segs, &nsegs))) {
1292 if (cxgb_debug)
1293 printf("failed ... err=%d\n", err);
1294 return (err);
1295 }
1296 KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d count=%d", nsegs, count));
1297
1298 if (!(m0->m_pkthdr.len <= PIO_LEN)) {
1299 mi_collapse_mbuf(&txsd->mi, m0);
1300 mi = &txsd->mi;
1301 }
1302 if (count > 1) {
1303 struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1304 int i, fidx;
1305 struct mbuf_iovec *batchmi;
1306
1307 mv = mtomv(m0);
1308 batchmi = mv->mv_vec;
1309
1310 wrp = (struct work_request_hdr *)txd;
1311
1312 flits = count*2 + 1;
1313 txq_prod(txq, 1, &txqs);
1314
1315 for (fidx = 1, i = 0; i < count; i++, batchmi++, fidx += 2) {
1316 struct cpl_tx_pkt_batch_entry *cbe = &cpl_batch->pkt_entry[i];
1317
1318 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1319 GET_VTAG_MI(cntrl, batchmi);
1320 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1321 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1322 cntrl |= F_TXPKT_IPCSUM_DIS;
1323 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))))
1324 cntrl |= F_TXPKT_L4CSUM_DIS;
1325 cbe->cntrl = htonl(cntrl);
1326 cbe->len = htonl(batchmi->mi_len | 0x80000000);
1327 cbe->addr = htobe64(segs[i].ds_addr);
1328 txd->flit[fidx] |= htobe64(1 << 24);
1329 }
1330
1331 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1332 V_WR_SGLSFLT(flits)) | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1333 wmb();
1334 wrp->wr_lo = htonl(V_WR_LEN(flits) |
1335 V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1336 /* XXX gen? */
1337 wr_gen2(txd, txqs.gen);
1338 check_ring_tx_db(sc, txq);
1339
1340 return (0);
1341 } else if (tso_info) {
1342 int min_size = TCPPKTHDRSIZE, eth_type, tagged;
1343 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1344 struct ip *ip;
1345 struct tcphdr *tcp;
1346 char *pkthdr;
1347
1348 txd->flit[2] = 0;
1349 GET_VTAG(cntrl, m0);
1350 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1351 hdr->cntrl = htonl(cntrl);
1352 mlen = m0->m_pkthdr.len;
1353 hdr->len = htonl(mlen | 0x80000000);
1354
1355 DPRINTF("tso buf len=%d\n", mlen);
1356
1357 tagged = m0->m_flags & M_VLANTAG;
1358 if (!tagged)
1359 min_size -= ETHER_VLAN_ENCAP_LEN;
1360
1361 if (__predict_false(mlen < min_size)) {
1362 printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%#x,flags=%#x",
1363 m0, mlen, m0->m_pkthdr.tso_segsz,
1364 m0->m_pkthdr.csum_flags, m0->m_flags);
1365 panic("tx tso packet too small");
1366 }
1367
1368 /* Make sure that ether, ip, tcp headers are all in m0 */
1369 if (__predict_false(m0->m_len < min_size)) {
1370 m0 = m_pullup(m0, min_size);
1371 if (__predict_false(m0 == NULL)) {
1372 /* XXX panic probably an overreaction */
1373 panic("couldn't fit header into mbuf");
1374 }
1375 }
1376 pkthdr = m0->m_data;
1377
1378 if (tagged) {
1379 eth_type = CPL_ETH_II_VLAN;
1380 ip = (struct ip *)(pkthdr + ETHER_HDR_LEN +
1381 ETHER_VLAN_ENCAP_LEN);
1382 } else {
1383 eth_type = CPL_ETH_II;
1384 ip = (struct ip *)(pkthdr + ETHER_HDR_LEN);
1385 }
1386 tcp = (struct tcphdr *)((uint8_t *)ip +
1387 sizeof(*ip));
1388
1389 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1390 V_LSO_IPHDR_WORDS(ip->ip_hl) |
1391 V_LSO_TCPHDR_WORDS(tcp->th_off);
1392 hdr->lso_info = htonl(tso_info);
1393
1394 if (__predict_false(mlen <= PIO_LEN)) {
1395 /* pkt not undersized but fits in PIO_LEN
1396 * Indicates a TSO bug at the higher levels.
1397 */
1398 DPRINTF("**5592 Fix** mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%#x,flags=%#x",
1399 m0, mlen, m0->m_pkthdr.tso_segsz, m0->m_pkthdr.csum_flags, m0->m_flags);
1400 txq_prod(txq, 1, &txqs);
1401 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1402 m_freem(m0);
1403 m0 = NULL;
1404 flits = (mlen + 7) / 8 + 3;
1405 hdr->wr.wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1406 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1407 F_WR_SOP | F_WR_EOP | txqs.compl);
1408 wmb();
1409 hdr->wr.wr_lo = htonl(V_WR_LEN(flits) |
1410 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1411
1412 wr_gen2(txd, txqs.gen);
1413 check_ring_tx_db(sc, txq);
1414 return (0);
1415 }
1416 flits = 3;
1417 } else {
1418 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1419
1420 GET_VTAG(cntrl, m0);
1421 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1422 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1423 cntrl |= F_TXPKT_IPCSUM_DIS;
1424 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))))
1425 cntrl |= F_TXPKT_L4CSUM_DIS;
1426 cpl->cntrl = htonl(cntrl);
1427 mlen = m0->m_pkthdr.len;
1428 cpl->len = htonl(mlen | 0x80000000);
1429
1430 if (mlen <= PIO_LEN) {
1431 txq_prod(txq, 1, &txqs);
1432 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1433 m_freem(m0);
1434 m0 = NULL;
1435 flits = (mlen + 7) / 8 + 2;
1436 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1437 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1438 F_WR_SOP | F_WR_EOP | txqs.compl);
1439 wmb();
1440 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) |
1441 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1442
1443 wr_gen2(txd, txqs.gen);
1444 check_ring_tx_db(sc, txq);
1445 DPRINTF("pio buf\n");
1446 return (0);
1447 }
1448 DPRINTF("regular buf\n");
1449 flits = 2;
1450 }
1451 wrp = (struct work_request_hdr *)txd;
1452
1453 #ifdef nomore
1454 /*
1455 * XXX need to move into one of the helper routines above
1456 *
1457 */
1458 if ((err = busdma_map_mbufs(m, txq, txsd, segs, &nsegs)) != 0)
1459 return (err);
1460 m0 = *m;
1461 #endif
1462 ndesc = calc_tx_descs(m0, nsegs);
1463
1464 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1465 make_sgl(sgp, segs, nsegs);
1466
1467 sgl_flits = sgl_len(nsegs);
1468
1469 DPRINTF("make_sgl success nsegs==%d ndesc==%d\n", nsegs, ndesc);
1470 txq_prod(txq, ndesc, &txqs);
1471 wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1472 wr_lo = htonl(V_WR_TID(txq->token));
1473 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits, sgl_flits, wr_hi, wr_lo);
1474 check_ring_tx_db(pi->adapter, txq);
1475
1476 if ((m0->m_type == MT_DATA) &&
1477 ((m0->m_flags & (M_EXT|M_NOFREE)) == M_EXT) &&
1478 (m0->m_ext.ext_type != EXT_PACKET)) {
1479 m0->m_flags &= ~M_EXT ;
1480 cxgb_mbufs_outstanding--;
1481 m_free(m0);
1482 }
1483
1484 return (0);
1485 }
1486
1487
1488 /**
1489 * write_imm - write a packet into a Tx descriptor as immediate data
1490 * @d: the Tx descriptor to write
1491 * @m: the packet
1492 * @len: the length of packet data to write as immediate data
1493 * @gen: the generation bit value to write
1494 *
1495 * Writes a packet as immediate data into a Tx descriptor. The packet
1496 * contains a work request at its beginning. We must write the packet
1497 * carefully so the SGE doesn't read accidentally before it's written in
1498 * its entirety.
1499 */
1500 static __inline void
1501 write_imm(struct tx_desc *d, struct mbuf *m,
1502 unsigned int len, unsigned int gen)
1503 {
1504 struct work_request_hdr *from = mtod(m, struct work_request_hdr *);
1505 struct work_request_hdr *to = (struct work_request_hdr *)d;
1506
1507 if (len > WR_LEN)
1508 panic("len too big %d\n", len);
1509 if (len < sizeof(*from))
1510 panic("len too small %d", len);
1511
1512 memcpy(&to[1], &from[1], len - sizeof(*from));
1513 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1514 V_WR_BCNTLFLT(len & 7));
1515 wmb();
1516 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1517 V_WR_LEN((len + 7) / 8));
1518 wr_gen2(d, gen);
1519
1520 /*
1521 * This check is a hack we should really fix the logic so
1522 * that this can't happen
1523 */
1524 if (m->m_type != MT_DONTFREE)
1525 m_freem(m);
1526
1527 }
1528
1529 /**
1530 * check_desc_avail - check descriptor availability on a send queue
1531 * @adap: the adapter
1532 * @q: the TX queue
1533 * @m: the packet needing the descriptors
1534 * @ndesc: the number of Tx descriptors needed
1535 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1536 *
1537 * Checks if the requested number of Tx descriptors is available on an
1538 * SGE send queue. If the queue is already suspended or not enough
1539 * descriptors are available the packet is queued for later transmission.
1540 * Must be called with the Tx queue locked.
1541 *
1542 * Returns 0 if enough descriptors are available, 1 if there aren't
1543 * enough descriptors and the packet has been queued, and 2 if the caller
1544 * needs to retry because there weren't enough descriptors at the
1545 * beginning of the call but some freed up in the mean time.
1546 */
1547 static __inline int
1548 check_desc_avail(adapter_t *adap, struct sge_txq *q,
1549 struct mbuf *m, unsigned int ndesc,
1550 unsigned int qid)
1551 {
1552 /*
1553 * XXX We currently only use this for checking the control queue
1554 * the control queue is only used for binding qsets which happens
1555 * at init time so we are guaranteed enough descriptors
1556 */
1557 if (__predict_false(!mbufq_empty(&q->sendq))) {
1558 addq_exit: mbufq_tail(&q->sendq, m);
1559 return 1;
1560 }
1561 if (__predict_false(q->size - q->in_use < ndesc)) {
1562
1563 struct sge_qset *qs = txq_to_qset(q, qid);
1564
1565 setbit(&qs->txq_stopped, qid);
1566 smp_mb();
1567
1568 if (should_restart_tx(q) &&
1569 test_and_clear_bit(qid, &qs->txq_stopped))
1570 return 2;
1571
1572 q->stops++;
1573 goto addq_exit;
1574 }
1575 return 0;
1576 }
1577
1578
1579 /**
1580 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1581 * @q: the SGE control Tx queue
1582 *
1583 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1584 * that send only immediate data (presently just the control queues) and
1585 * thus do not have any mbufs
1586 */
1587 static __inline void
1588 reclaim_completed_tx_imm(struct sge_txq *q)
1589 {
1590 unsigned int reclaim = q->processed - q->cleaned;
1591
1592 mtx_assert(&q->lock, MA_OWNED);
1593
1594 q->in_use -= reclaim;
1595 q->cleaned += reclaim;
1596 }
1597
1598 static __inline int
1599 immediate(const struct mbuf *m)
1600 {
1601 return m->m_len <= WR_LEN && m->m_pkthdr.len <= WR_LEN ;
1602 }
1603
1604 /**
1605 * ctrl_xmit - send a packet through an SGE control Tx queue
1606 * @adap: the adapter
1607 * @q: the control queue
1608 * @m: the packet
1609 *
1610 * Send a packet through an SGE control Tx queue. Packets sent through
1611 * a control queue must fit entirely as immediate data in a single Tx
1612 * descriptor and have no page fragments.
1613 */
1614 static int
1615 ctrl_xmit(adapter_t *adap, struct sge_txq *q, struct mbuf *m)
1616 {
1617 int ret;
1618 struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1619
1620 if (__predict_false(!immediate(m))) {
1621 m_freem(m);
1622 return 0;
1623 }
1624
1625 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1626 wrp->wr_lo = htonl(V_WR_TID(q->token));
1627
1628 mtx_lock(&q->lock);
1629 again: reclaim_completed_tx_imm(q);
1630
1631 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1632 if (__predict_false(ret)) {
1633 if (ret == 1) {
1634 mtx_unlock(&q->lock);
1635 return (ENOSPC);
1636 }
1637 goto again;
1638 }
1639 write_imm(&q->desc[q->pidx], m, m->m_len, q->gen);
1640
1641 q->in_use++;
1642 if (++q->pidx >= q->size) {
1643 q->pidx = 0;
1644 q->gen ^= 1;
1645 }
1646 mtx_unlock(&q->lock);
1647 wmb();
1648 wmb();
1649 t3_write_reg(adap, A_SG_KDOORBELL,
1650 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1651 return (0);
1652 }
1653
1654
1655 /**
1656 * restart_ctrlq - restart a suspended control queue
1657 * @qs: the queue set cotaining the control queue
1658 *
1659 * Resumes transmission on a suspended Tx control queue.
1660 */
1661 static void
1662 restart_ctrlq(void *data, int npending)
1663 {
1664 struct mbuf *m;
1665 struct sge_qset *qs = (struct sge_qset *)data;
1666 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1667 adapter_t *adap = qs->port->adapter;
1668
1669 mtx_lock(&q->lock);
1670 again: reclaim_completed_tx_imm(q);
1671
1672 while (q->in_use < q->size &&
1673 (m = mbufq_dequeue(&q->sendq)) != NULL) {
1674
1675 write_imm(&q->desc[q->pidx], m, m->m_len, q->gen);
1676
1677 if (++q->pidx >= q->size) {
1678 q->pidx = 0;
1679 q->gen ^= 1;
1680 }
1681 q->in_use++;
1682 }
1683 if (!mbufq_empty(&q->sendq)) {
1684 setbit(&qs->txq_stopped, TXQ_CTRL);
1685 smp_mb();
1686
1687 if (should_restart_tx(q) &&
1688 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1689 goto again;
1690 q->stops++;
1691 }
1692 mtx_unlock(&q->lock);
1693 wmb();
1694 t3_write_reg(adap, A_SG_KDOORBELL,
1695 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1696 }
1697
1698
1699 /*
1700 * Send a management message through control queue 0
1701 */
1702 int
1703 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1704 {
1705 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], m);
1706 }
1707
1708
1709 /**
1710 * free_qset - free the resources of an SGE queue set
1711 * @sc: the controller owning the queue set
1712 * @q: the queue set
1713 *
1714 * Release the HW and SW resources associated with an SGE queue set, such
1715 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
1716 * queue set must be quiesced prior to calling this.
1717 */
1718 void
1719 t3_free_qset(adapter_t *sc, struct sge_qset *q)
1720 {
1721 int i;
1722
1723 t3_free_tx_desc_all(&q->txq[TXQ_ETH]);
1724
1725 for (i = 0; i < SGE_TXQ_PER_SET; i++)
1726 if (q->txq[i].txq_mr.br_ring != NULL) {
1727 free(q->txq[i].txq_mr.br_ring, M_DEVBUF);
1728 mtx_destroy(&q->txq[i].txq_mr.br_lock);
1729 }
1730 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1731 if (q->fl[i].desc) {
1732 mtx_lock_spin(&sc->sge.reg_lock);
1733 t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1734 mtx_unlock_spin(&sc->sge.reg_lock);
1735 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1736 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1737 q->fl[i].desc_map);
1738 bus_dma_tag_destroy(q->fl[i].desc_tag);
1739 bus_dma_tag_destroy(q->fl[i].entry_tag);
1740 }
1741 if (q->fl[i].sdesc) {
1742 free_rx_bufs(sc, &q->fl[i]);
1743 free(q->fl[i].sdesc, M_DEVBUF);
1744 }
1745 }
1746
1747 for (i = 0; i < SGE_TXQ_PER_SET; i++) {
1748 if (q->txq[i].desc) {
1749 mtx_lock_spin(&sc->sge.reg_lock);
1750 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
1751 mtx_unlock_spin(&sc->sge.reg_lock);
1752 bus_dmamap_unload(q->txq[i].desc_tag,
1753 q->txq[i].desc_map);
1754 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
1755 q->txq[i].desc_map);
1756 bus_dma_tag_destroy(q->txq[i].desc_tag);
1757 bus_dma_tag_destroy(q->txq[i].entry_tag);
1758 MTX_DESTROY(&q->txq[i].lock);
1759 }
1760 if (q->txq[i].sdesc) {
1761 free(q->txq[i].sdesc, M_DEVBUF);
1762 }
1763 }
1764
1765 if (q->rspq.desc) {
1766 mtx_lock_spin(&sc->sge.reg_lock);
1767 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
1768 mtx_unlock_spin(&sc->sge.reg_lock);
1769
1770 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
1771 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
1772 q->rspq.desc_map);
1773 bus_dma_tag_destroy(q->rspq.desc_tag);
1774 MTX_DESTROY(&q->rspq.lock);
1775 }
1776
1777 tcp_lro_free(&q->lro.ctrl);
1778
1779 bzero(q, sizeof(*q));
1780 }
1781
1782 /**
1783 * t3_free_sge_resources - free SGE resources
1784 * @sc: the adapter softc
1785 *
1786 * Frees resources used by the SGE queue sets.
1787 */
1788 void
1789 t3_free_sge_resources(adapter_t *sc)
1790 {
1791 int i, nqsets;
1792
1793 #ifdef IFNET_MULTIQUEUE
1794 panic("%s should not be called when IFNET_MULTIQUEUE is defined", __FUNCTION__);
1795 #endif
1796 for (nqsets = i = 0; i < (sc)->params.nports; i++)
1797 nqsets += sc->port[i].nqsets;
1798
1799 for (i = 0; i < nqsets; ++i)
1800 t3_free_qset(sc, &sc->sge.qs[i]);
1801 }
1802
1803 /**
1804 * t3_sge_start - enable SGE
1805 * @sc: the controller softc
1806 *
1807 * Enables the SGE for DMAs. This is the last step in starting packet
1808 * transfers.
1809 */
1810 void
1811 t3_sge_start(adapter_t *sc)
1812 {
1813 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
1814 }
1815
1816 /**
1817 * t3_sge_stop - disable SGE operation
1818 * @sc: the adapter
1819 *
1820 * Disables the DMA engine. This can be called in emeregencies (e.g.,
1821 * from error interrupts) or from normal process context. In the latter
1822 * case it also disables any pending queue restart tasklets. Note that
1823 * if it is called in interrupt context it cannot disable the restart
1824 * tasklets as it cannot wait, however the tasklets will have no effect
1825 * since the doorbells are disabled and the driver will call this again
1826 * later from process context, at which time the tasklets will be stopped
1827 * if they are still running.
1828 */
1829 void
1830 t3_sge_stop(adapter_t *sc)
1831 {
1832 int i, nqsets;
1833
1834 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
1835
1836 if (sc->tq == NULL)
1837 return;
1838
1839 for (nqsets = i = 0; i < (sc)->params.nports; i++)
1840 nqsets += sc->port[i].nqsets;
1841 #ifdef notyet
1842 /*
1843 *
1844 * XXX
1845 */
1846 for (i = 0; i < nqsets; ++i) {
1847 struct sge_qset *qs = &sc->sge.qs[i];
1848
1849 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
1850 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
1851 }
1852 #endif
1853 }
1854
1855 /**
1856 * t3_free_tx_desc - reclaims Tx descriptors and their buffers
1857 * @adapter: the adapter
1858 * @q: the Tx queue to reclaim descriptors from
1859 * @reclaimable: the number of descriptors to reclaim
1860 * @m_vec_size: maximum number of buffers to reclaim
1861 * @desc_reclaimed: returns the number of descriptors reclaimed
1862 *
1863 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
1864 * Tx buffers. Called with the Tx queue lock held.
1865 *
1866 * Returns number of buffers of reclaimed
1867 */
1868 void
1869 t3_free_tx_desc(struct sge_txq *q, int reclaimable)
1870 {
1871 struct tx_sw_desc *txsd;
1872 unsigned int cidx;
1873
1874 #ifdef T3_TRACE
1875 T3_TRACE2(sc->tb[q->cntxt_id & 7],
1876 "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
1877 #endif
1878 cidx = q->cidx;
1879 txsd = &q->sdesc[cidx];
1880 DPRINTF("reclaiming %d WR\n", reclaimable);
1881 mtx_assert(&q->lock, MA_OWNED);
1882 while (reclaimable--) {
1883 DPRINTF("cidx=%d d=%p\n", cidx, txsd);
1884 if (txsd->mi.mi_base != NULL) {
1885 if (txsd->flags & TX_SW_DESC_MAPPED) {
1886 bus_dmamap_unload(q->entry_tag, txsd->map);
1887 txsd->flags &= ~TX_SW_DESC_MAPPED;
1888 }
1889 m_freem_iovec(&txsd->mi);
1890 buf_ring_scan(&q->txq_mr, txsd->mi.mi_base, __FILE__, __LINE__);
1891 txsd->mi.mi_base = NULL;
1892 /*
1893 * XXX check for cache hit rate here
1894 *
1895 */
1896 q->port->ifp->if_opackets++;
1897 #if defined(DIAGNOSTIC) && 0
1898 if (m_get_priority(txsd->m[0]) != cidx)
1899 printf("pri=%d cidx=%d\n",
1900 (int)m_get_priority(txsd->m[0]), cidx);
1901 #endif
1902
1903 } else
1904 q->txq_skipped++;
1905
1906 ++txsd;
1907 if (++cidx == q->size) {
1908 cidx = 0;
1909 txsd = q->sdesc;
1910 }
1911 }
1912 q->cidx = cidx;
1913
1914 }
1915
1916 void
1917 t3_free_tx_desc_all(struct sge_txq *q)
1918 {
1919 int i;
1920 struct tx_sw_desc *txsd;
1921
1922 for (i = 0; i < q->size; i++) {
1923 txsd = &q->sdesc[i];
1924 if (txsd->mi.mi_base != NULL) {
1925 if (txsd->flags & TX_SW_DESC_MAPPED) {
1926 bus_dmamap_unload(q->entry_tag, txsd->map);
1927 txsd->flags &= ~TX_SW_DESC_MAPPED;
1928 }
1929 m_freem_iovec(&txsd->mi);
1930 bzero(&txsd->mi, sizeof(txsd->mi));
1931 }
1932 }
1933 }
1934
1935 /**
1936 * is_new_response - check if a response is newly written
1937 * @r: the response descriptor
1938 * @q: the response queue
1939 *
1940 * Returns true if a response descriptor contains a yet unprocessed
1941 * response.
1942 */
1943 static __inline int
1944 is_new_response(const struct rsp_desc *r,
1945 const struct sge_rspq *q)
1946 {
1947 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1948 }
1949
1950 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1951 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1952 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1953 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1954 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1955
1956 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1957 #define NOMEM_INTR_DELAY 2500
1958
1959 /**
1960 * write_ofld_wr - write an offload work request
1961 * @adap: the adapter
1962 * @m: the packet to send
1963 * @q: the Tx queue
1964 * @pidx: index of the first Tx descriptor to write
1965 * @gen: the generation value to use
1966 * @ndesc: number of descriptors the packet will occupy
1967 *
1968 * Write an offload work request to send the supplied packet. The packet
1969 * data already carry the work request with most fields populated.
1970 */
1971 static void
1972 write_ofld_wr(adapter_t *adap, struct mbuf *m,
1973 struct sge_txq *q, unsigned int pidx,
1974 unsigned int gen, unsigned int ndesc,
1975 bus_dma_segment_t *segs, unsigned int nsegs)
1976 {
1977 unsigned int sgl_flits, flits;
1978 struct work_request_hdr *from;
1979 struct sg_ent *sgp, sgl[TX_MAX_SEGS / 2 + 1];
1980 struct tx_desc *d = &q->desc[pidx];
1981 struct txq_state txqs;
1982
1983 if (immediate(m) && nsegs == 0) {
1984 write_imm(d, m, m->m_len, gen);
1985 return;
1986 }
1987
1988 /* Only TX_DATA builds SGLs */
1989 from = mtod(m, struct work_request_hdr *);
1990 memcpy(&d->flit[1], &from[1], m->m_len - sizeof(*from));
1991
1992 flits = m->m_len / 8;
1993 sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : sgl;
1994
1995 make_sgl(sgp, segs, nsegs);
1996 sgl_flits = sgl_len(nsegs);
1997
1998 txqs.gen = gen;
1999 txqs.pidx = pidx;
2000 txqs.compl = 0;
2001
2002 write_wr_hdr_sgl(ndesc, d, &txqs, q, sgl, flits, sgl_flits,
2003 from->wr_hi, from->wr_lo);
2004 }
2005
2006 /**
2007 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
2008 * @m: the packet
2009 *
2010 * Returns the number of Tx descriptors needed for the given offload
2011 * packet. These packets are already fully constructed.
2012 */
2013 static __inline unsigned int
2014 calc_tx_descs_ofld(struct mbuf *m, unsigned int nsegs)
2015 {
2016 unsigned int flits, cnt = 0;
2017 int ndescs;
2018
2019 if (m->m_len <= WR_LEN && nsegs == 0)
2020 return (1); /* packet fits as immediate data */
2021
2022 if (m->m_flags & M_IOVEC)
2023 cnt = mtomv(m)->mv_count;
2024 else
2025 cnt = nsegs;
2026
2027 /* headers */
2028 flits = m->m_len / 8;
2029
2030 ndescs = flits_to_desc(flits + sgl_len(cnt));
2031
2032 CTR4(KTR_CXGB, "flits=%d sgl_len=%d nsegs=%d ndescs=%d",
2033 flits, sgl_len(cnt), nsegs, ndescs);
2034
2035 return (ndescs);
2036 }
2037
2038 /**
2039 * ofld_xmit - send a packet through an offload queue
2040 * @adap: the adapter
2041 * @q: the Tx offload queue
2042 * @m: the packet
2043 *
2044 * Send an offload packet through an SGE offload queue.
2045 */
2046 static int
2047 ofld_xmit(adapter_t *adap, struct sge_txq *q, struct mbuf *m)
2048 {
2049 int ret, nsegs;
2050 unsigned int ndesc;
2051 unsigned int pidx, gen;
2052 bus_dma_segment_t segs[TX_MAX_SEGS], *vsegs;
2053 struct tx_sw_desc *stx;
2054
2055 nsegs = m_get_sgllen(m);
2056 vsegs = m_get_sgl(m);
2057 ndesc = calc_tx_descs_ofld(m, nsegs);
2058 busdma_map_sgl(vsegs, segs, nsegs);
2059
2060 stx = &q->sdesc[q->pidx];
2061 KASSERT(stx->mi.mi_base == NULL, ("mi_base set"));
2062
2063 mtx_lock(&q->lock);
2064 again: reclaim_completed_tx_(q, 16);
2065 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2066 if (__predict_false(ret)) {
2067 if (ret == 1) {
2068 printf("no ofld desc avail\n");
2069
2070 m_set_priority(m, ndesc); /* save for restart */
2071 mtx_unlock(&q->lock);
2072 return (EINTR);
2073 }
2074 goto again;
2075 }
2076
2077 gen = q->gen;
2078 q->in_use += ndesc;
2079 pidx = q->pidx;
2080 q->pidx += ndesc;
2081 if (q->pidx >= q->size) {
2082 q->pidx -= q->size;
2083 q->gen ^= 1;
2084 }
2085 #ifdef T3_TRACE
2086 T3_TRACE5(adap->tb[q->cntxt_id & 7],
2087 "ofld_xmit: ndesc %u, pidx %u, len %u, main %u, frags %u",
2088 ndesc, pidx, skb->len, skb->len - skb->data_len,
2089 skb_shinfo(skb)->nr_frags);
2090 #endif
2091 mtx_unlock(&q->lock);
2092
2093 write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs);
2094 check_ring_tx_db(adap, q);
2095 return (0);
2096 }
2097
2098 /**
2099 * restart_offloadq - restart a suspended offload queue
2100 * @qs: the queue set cotaining the offload queue
2101 *
2102 * Resumes transmission on a suspended Tx offload queue.
2103 */
2104 static void
2105 restart_offloadq(void *data, int npending)
2106 {
2107 struct mbuf *m;
2108 struct sge_qset *qs = data;
2109 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2110 adapter_t *adap = qs->port->adapter;
2111 bus_dma_segment_t segs[TX_MAX_SEGS];
2112 struct tx_sw_desc *stx = &q->sdesc[q->pidx];
2113 int nsegs, cleaned;
2114
2115 mtx_lock(&q->lock);
2116 again: cleaned = reclaim_completed_tx_(q, 16);
2117
2118 while ((m = mbufq_peek(&q->sendq)) != NULL) {
2119 unsigned int gen, pidx;
2120 unsigned int ndesc = m_get_priority(m);
2121
2122 if (__predict_false(q->size - q->in_use < ndesc)) {
2123 setbit(&qs->txq_stopped, TXQ_OFLD);
2124 smp_mb();
2125
2126 if (should_restart_tx(q) &&
2127 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2128 goto again;
2129 q->stops++;
2130 break;
2131 }
2132
2133 gen = q->gen;
2134 q->in_use += ndesc;
2135 pidx = q->pidx;
2136 q->pidx += ndesc;
2137 if (q->pidx >= q->size) {
2138 q->pidx -= q->size;
2139 q->gen ^= 1;
2140 }
2141
2142 (void)mbufq_dequeue(&q->sendq);
2143 busdma_map_mbufs(&m, q, stx, segs, &nsegs);
2144 mtx_unlock(&q->lock);
2145 write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs);
2146 mtx_lock(&q->lock);
2147 }
2148 mtx_unlock(&q->lock);
2149
2150 #if USE_GTS
2151 set_bit(TXQ_RUNNING, &q->flags);
2152 set_bit(TXQ_LAST_PKT_DB, &q->flags);
2153 #endif
2154 wmb();
2155 t3_write_reg(adap, A_SG_KDOORBELL,
2156 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2157 }
2158
2159 /**
2160 * queue_set - return the queue set a packet should use
2161 * @m: the packet
2162 *
2163 * Maps a packet to the SGE queue set it should use. The desired queue
2164 * set is carried in bits 1-3 in the packet's priority.
2165 */
2166 static __inline int
2167 queue_set(const struct mbuf *m)
2168 {
2169 return m_get_priority(m) >> 1;
2170 }
2171
2172 /**
2173 * is_ctrl_pkt - return whether an offload packet is a control packet
2174 * @m: the packet
2175 *
2176 * Determines whether an offload packet should use an OFLD or a CTRL
2177 * Tx queue. This is indicated by bit 0 in the packet's priority.
2178 */
2179 static __inline int
2180 is_ctrl_pkt(const struct mbuf *m)
2181 {
2182 return m_get_priority(m) & 1;
2183 }
2184
2185 /**
2186 * t3_offload_tx - send an offload packet
2187 * @tdev: the offload device to send to
2188 * @m: the packet
2189 *
2190 * Sends an offload packet. We use the packet priority to select the
2191 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2192 * should be sent as regular or control, bits 1-3 select the queue set.
2193 */
2194 int
2195 t3_offload_tx(struct t3cdev *tdev, struct mbuf *m)
2196 {
2197 adapter_t *adap = tdev2adap(tdev);
2198 struct sge_qset *qs = &adap->sge.qs[queue_set(m)];
2199
2200 if (__predict_false(is_ctrl_pkt(m)))
2201 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], m);
2202
2203 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], m);
2204 }
2205
2206 /**
2207 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
2208 * @tdev: the offload device that will be receiving the packets
2209 * @q: the SGE response queue that assembled the bundle
2210 * @m: the partial bundle
2211 * @n: the number of packets in the bundle
2212 *
2213 * Delivers a (partial) bundle of Rx offload packets to an offload device.
2214 */
2215 static __inline void
2216 deliver_partial_bundle(struct t3cdev *tdev,
2217 struct sge_rspq *q,
2218 struct mbuf *mbufs[], int n)
2219 {
2220 if (n) {
2221 q->offload_bundles++;
2222 cxgb_ofld_recv(tdev, mbufs, n);
2223 }
2224 }
2225
2226 static __inline int
2227 rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
2228 struct mbuf *m, struct mbuf *rx_gather[],
2229 unsigned int gather_idx)
2230 {
2231
2232 rq->offload_pkts++;
2233 m->m_pkthdr.header = mtod(m, void *);
2234 rx_gather[gather_idx++] = m;
2235 if (gather_idx == RX_BUNDLE_SIZE) {
2236 cxgb_ofld_recv(tdev, rx_gather, RX_BUNDLE_SIZE);
2237 gather_idx = 0;
2238 rq->offload_bundles++;
2239 }
2240 return (gather_idx);
2241 }
2242
2243 static void
2244 restart_tx(struct sge_qset *qs)
2245 {
2246 struct adapter *sc = qs->port->adapter;
2247
2248
2249 if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2250 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2251 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2252 qs->txq[TXQ_OFLD].restarts++;
2253 DPRINTF("restarting TXQ_OFLD\n");
2254 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2255 }
2256 DPRINTF("stopped=0x%x restart=%d processed=%d cleaned=%d in_use=%d\n",
2257 qs->txq_stopped, should_restart_tx(&qs->txq[TXQ_CTRL]),
2258 qs->txq[TXQ_CTRL].processed, qs->txq[TXQ_CTRL].cleaned,
2259 qs->txq[TXQ_CTRL].in_use);
2260
2261 if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2262 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2263 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2264 qs->txq[TXQ_CTRL].restarts++;
2265 DPRINTF("restarting TXQ_CTRL\n");
2266 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2267 }
2268 }
2269
2270 /**
2271 * t3_sge_alloc_qset - initialize an SGE queue set
2272 * @sc: the controller softc
2273 * @id: the queue set id
2274 * @nports: how many Ethernet ports will be using this queue set
2275 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2276 * @p: configuration parameters for this queue set
2277 * @ntxq: number of Tx queues for the queue set
2278 * @pi: port info for queue set
2279 *
2280 * Allocate resources and initialize an SGE queue set. A queue set
2281 * comprises a response queue, two Rx free-buffer queues, and up to 3
2282 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2283 * queue, offload queue, and control queue.
2284 */
2285 int
2286 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2287 const struct qset_params *p, int ntxq, struct port_info *pi)
2288 {
2289 struct sge_qset *q = &sc->sge.qs[id];
2290 int i, header_size, ret = 0;
2291
2292 for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2293 if ((q->txq[i].txq_mr.br_ring = malloc(cxgb_txq_buf_ring_size*sizeof(struct mbuf *),
2294 M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) {
2295 device_printf(sc->dev, "failed to allocate mbuf ring\n");
2296 goto err;
2297 }
2298 q->txq[i].txq_mr.br_prod = q->txq[i].txq_mr.br_cons = 0;
2299 q->txq[i].txq_mr.br_size = cxgb_txq_buf_ring_size;
2300 mtx_init(&q->txq[i].txq_mr.br_lock, "txq mbuf ring", NULL, MTX_DEF);
2301 }
2302
2303 init_qset_cntxt(q, id);
2304 q->idx = id;
2305
2306 if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2307 sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2308 &q->fl[0].desc, &q->fl[0].sdesc,
2309 &q->fl[0].desc_tag, &q->fl[0].desc_map,
2310 sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2311 printf("error %d from alloc ring fl0\n", ret);
2312 goto err;
2313 }
2314
2315 if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2316 sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2317 &q->fl[1].desc, &q->fl[1].sdesc,
2318 &q->fl[1].desc_tag, &q->fl[1].desc_map,
2319 sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2320 printf("error %d from alloc ring fl1\n", ret);
2321 goto err;
2322 }
2323
2324 if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2325 &q->rspq.phys_addr, &q->rspq.desc, NULL,
2326 &q->rspq.desc_tag, &q->rspq.desc_map,
2327 NULL, NULL)) != 0) {
2328 printf("error %d from alloc ring rspq\n", ret);
2329 goto err;
2330 }
2331
2332 for (i = 0; i < ntxq; ++i) {
2333 /*
2334 * The control queue always uses immediate data so does not
2335 * need to keep track of any mbufs.
2336 * XXX Placeholder for future TOE support.
2337 */
2338 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2339
2340 if ((ret = alloc_ring(sc, p->txq_size[i],
2341 sizeof(struct tx_desc), sz,
2342 &q->txq[i].phys_addr, &q->txq[i].desc,
2343 &q->txq[i].sdesc, &q->txq[i].desc_tag,
2344 &q->txq[i].desc_map,
2345 sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2346 printf("error %d from alloc ring tx %i\n", ret, i);
2347 goto err;
2348 }
2349 mbufq_init(&q->txq[i].sendq);
2350 q->txq[i].gen = 1;
2351 q->txq[i].size = p->txq_size[i];
2352 snprintf(q->txq[i].lockbuf, TXQ_NAME_LEN, "t3 txq lock %d:%d:%d",
2353 device_get_unit(sc->dev), irq_vec_idx, i);
2354 MTX_INIT(&q->txq[i].lock, q->txq[i].lockbuf, NULL, MTX_DEF);
2355 }
2356
2357 q->txq[TXQ_ETH].port = pi;
2358
2359 TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2360 TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2361 TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, &q->txq[TXQ_ETH]);
2362 TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, &q->txq[TXQ_OFLD]);
2363
2364 q->fl[0].gen = q->fl[1].gen = 1;
2365 q->fl[0].size = p->fl_size;
2366 q->fl[1].size = p->jumbo_size;
2367
2368 q->rspq.gen = 1;
2369 q->rspq.cidx = 0;
2370 q->rspq.size = p->rspq_size;
2371
2372
2373 header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) + sizeof(struct m_ext_) + sizeof(uint32_t);
2374 q->txq[TXQ_ETH].stop_thres = nports *
2375 flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2376
2377 q->fl[0].buf_size = (MCLBYTES - header_size);
2378 q->fl[0].zone = zone_clust;
2379 q->fl[0].type = EXT_CLUSTER;
2380 #if __FreeBSD_version >= 700111
2381 if (cxgb_use_16k_clusters) {
2382 q->fl[1].buf_size = MJUM16BYTES - header_size;
2383 q->fl[1].zone = zone_jumbo16;
2384 q->fl[1].type = EXT_JUMBO16;
2385 } else {
2386 q->fl[1].buf_size = MJUM9BYTES - header_size;
2387 q->fl[1].zone = zone_jumbo9;
2388 q->fl[1].type = EXT_JUMBO9;
2389 }
2390 #else
2391 q->fl[1].buf_size = MJUMPAGESIZE - header_size;
2392 q->fl[1].zone = zone_jumbop;
2393 q->fl[1].type = EXT_JUMBOP;
2394 #endif
2395
2396 /*
2397 * We allocate and setup the lro_ctrl structure irrespective of whether
2398 * lro is available and/or enabled.
2399 */
2400 q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2401 ret = tcp_lro_init(&q->lro.ctrl);
2402 if (ret) {
2403 printf("error %d from tcp_lro_init\n", ret);
2404 goto err;
2405 }
2406 q->lro.ctrl.ifp = pi->ifp;
2407
2408 mtx_lock_spin(&sc->sge.reg_lock);
2409 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2410 q->rspq.phys_addr, q->rspq.size,
2411 q->fl[0].buf_size, 1, 0);
2412 if (ret) {
2413 printf("error %d from t3_sge_init_rspcntxt\n", ret);
2414 goto err_unlock;
2415 }
2416
2417 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2418 ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2419 q->fl[i].phys_addr, q->fl[i].size,
2420 q->fl[i].buf_size, p->cong_thres, 1,
2421 0);
2422 if (ret) {
2423 printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2424 goto err_unlock;
2425 }
2426 }
2427
2428 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2429 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2430 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2431 1, 0);
2432 if (ret) {
2433 printf("error %d from t3_sge_init_ecntxt\n", ret);
2434 goto err_unlock;
2435 }
2436
2437 if (ntxq > 1) {
2438 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2439 USE_GTS, SGE_CNTXT_OFLD, id,
2440 q->txq[TXQ_OFLD].phys_addr,
2441 q->txq[TXQ_OFLD].size, 0, 1, 0);
2442 if (ret) {
2443 printf("error %d from t3_sge_init_ecntxt\n", ret);
2444 goto err_unlock;
2445 }
2446 }
2447
2448 if (ntxq > 2) {
2449 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2450 SGE_CNTXT_CTRL, id,
2451 q->txq[TXQ_CTRL].phys_addr,
2452 q->txq[TXQ_CTRL].size,
2453 q->txq[TXQ_CTRL].token, 1, 0);
2454 if (ret) {
2455 printf("error %d from t3_sge_init_ecntxt\n", ret);
2456 goto err_unlock;
2457 }
2458 }
2459
2460 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2461 device_get_unit(sc->dev), irq_vec_idx);
2462 MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2463
2464 mtx_unlock_spin(&sc->sge.reg_lock);
2465 t3_update_qset_coalesce(q, p);
2466 q->port = pi;
2467
2468 refill_fl(sc, &q->fl[0], q->fl[0].size);
2469 refill_fl(sc, &q->fl[1], q->fl[1].size);
2470 refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2471
2472 t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2473 V_NEWTIMER(q->rspq.holdoff_tmr));
2474
2475 return (0);
2476
2477 err_unlock:
2478 mtx_unlock_spin(&sc->sge.reg_lock);
2479 err:
2480 t3_free_qset(sc, q);
2481
2482 return (ret);
2483 }
2484
2485 /*
2486 * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2487 * ethernet data. Hardware assistance with various checksums and any vlan tag
2488 * will also be taken into account here.
2489 */
2490 void
2491 t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad)
2492 {
2493 struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2494 struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2495 struct ifnet *ifp = pi->ifp;
2496
2497 DPRINTF("rx_eth m=%p m->m_data=%p p->iff=%d\n", m, mtod(m, uint8_t *), cpl->iff);
2498
2499 if ((ifp->if_capenable & IFCAP_RXCSUM) && !cpl->fragment &&
2500 cpl->csum_valid && cpl->csum == 0xffff) {
2501 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID);
2502 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2503 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID|CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
2504 m->m_pkthdr.csum_data = 0xffff;
2505 }
2506 /*
2507 * XXX need to add VLAN support for 6.x
2508 */
2509 #ifdef VLAN_SUPPORTED
2510 if (__predict_false(cpl->vlan_valid)) {
2511 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2512 m->m_flags |= M_VLANTAG;
2513 }
2514 #endif
2515
2516 m->m_pkthdr.rcvif = ifp;
2517 m->m_pkthdr.header = mtod(m, uint8_t *) + sizeof(*cpl) + ethpad;
2518 ifp->if_ipackets++;
2519 #ifndef DISABLE_MBUF_IOVEC
2520 m_explode(m);
2521 #endif
2522 /*
2523 * adjust after conversion to mbuf chain
2524 */
2525 m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2526 m->m_len -= (sizeof(*cpl) + ethpad);
2527 m->m_data += (sizeof(*cpl) + ethpad);
2528 }
2529
2530 static void
2531 ext_free_handler(void *buf, void * args)
2532 {
2533 uintptr_t type = (uintptr_t)args;
2534 uma_zone_t zone;
2535 struct mbuf *m;
2536
2537 m = buf;
2538 zone = m_getzonefromtype(type);
2539 m->m_ext.ext_type = (int)type;
2540 cxgb_ext_freed++;
2541 cxgb_cache_put(zone, m);
2542 }
2543
2544 static void
2545 init_cluster_mbuf(caddr_t cl, int flags, int type, uma_zone_t zone)
2546 {
2547 struct mbuf *m;
2548 int header_size;
2549
2550 header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) +
2551 sizeof(struct m_ext_) + sizeof(uint32_t);
2552
2553 bzero(cl, header_size);
2554 m = (struct mbuf *)cl;
2555
2556 cxgb_ext_inited++;
2557 SLIST_INIT(&m->m_pkthdr.tags);
2558 m->m_type = MT_DATA;
2559 m->m_flags = flags | M_NOFREE | M_EXT;
2560 m->m_data = cl + header_size;
2561 m->m_ext.ext_buf = cl;
2562 m->m_ext.ref_cnt = (uint32_t *)(cl + header_size - sizeof(uint32_t));
2563 m->m_ext.ext_size = m_getsizefromtype(type);
2564 m->m_ext.ext_free = ext_free_handler;
2565 m->m_ext.ext_args = (void *)(uintptr_t)type;
2566 m->m_ext.ext_type = EXT_EXTREF;
2567 *(m->m_ext.ref_cnt) = 1;
2568 DPRINTF("data=%p ref_cnt=%p\n", m->m_data, m->m_ext.ref_cnt);
2569 }
2570
2571
2572 /**
2573 * get_packet - return the next ingress packet buffer from a free list
2574 * @adap: the adapter that received the packet
2575 * @drop_thres: # of remaining buffers before we start dropping packets
2576 * @qs: the qset that the SGE free list holding the packet belongs to
2577 * @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2578 * @r: response descriptor
2579 *
2580 * Get the next packet from a free list and complete setup of the
2581 * sk_buff. If the packet is small we make a copy and recycle the
2582 * original buffer, otherwise we use the original buffer itself. If a
2583 * positive drop threshold is supplied packets are dropped and their
2584 * buffers recycled if (a) the number of remaining buffers is under the
2585 * threshold and the packet is too big to copy, or (b) the packet should
2586 * be copied but there is no memory for the copy.
2587 */
2588 #ifdef DISABLE_MBUF_IOVEC
2589
2590 static int
2591 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2592 struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2593 {
2594
2595 unsigned int len_cq = ntohl(r->len_cq);
2596 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2597 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2598 uint32_t len = G_RSPD_LEN(len_cq);
2599 uint32_t flags = ntohl(r->flags);
2600 uint8_t sopeop = G_RSPD_SOP_EOP(flags);
2601 caddr_t cl;
2602 struct mbuf *m, *m0;
2603 int ret = 0;
2604
2605 prefetch(sd->rxsd_cl);
2606
2607 fl->credits--;
2608 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2609
2610 if (recycle_enable && len <= SGE_RX_COPY_THRES && sopeop == RSPQ_SOP_EOP) {
2611 if ((m0 = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
2612 goto skip_recycle;
2613 cl = mtod(m0, void *);
2614 memcpy(cl, sd->data, len);
2615 recycle_rx_buf(adap, fl, fl->cidx);
2616 m = m0;
2617 m0->m_len = len;
2618 } else {
2619 skip_recycle:
2620
2621 bus_dmamap_unload(fl->entry_tag, sd->map);
2622 cl = sd->rxsd_cl;
2623 m = m0 = (struct mbuf *)cl;
2624
2625 if ((sopeop == RSPQ_SOP_EOP) ||
2626 (sopeop == RSPQ_SOP))
2627 flags = M_PKTHDR;
2628 init_cluster_mbuf(cl, flags, fl->type, fl->zone);
2629 m0->m_len = len;
2630 }
2631 switch(sopeop) {
2632 case RSPQ_SOP_EOP:
2633 DBG(DBG_RX, ("get_packet: SOP-EOP m %p\n", m));
2634 mh->mh_head = mh->mh_tail = m;
2635 m->m_pkthdr.len = len;
2636 ret = 1;
2637 break;
2638 case RSPQ_NSOP_NEOP:
2639 DBG(DBG_RX, ("get_packet: NO_SOP-NO_EOP m %p\n", m));
2640 if (mh->mh_tail == NULL) {
2641 log(LOG_ERR, "discarding intermediate descriptor entry\n");
2642 m_freem(m);
2643 break;
2644 }
2645 mh->mh_tail->m_next = m;
2646 mh->mh_tail = m;
2647 mh->mh_head->m_pkthdr.len += len;
2648 ret = 0;
2649 break;
2650 case RSPQ_SOP:
2651 DBG(DBG_RX, ("get_packet: SOP m %p\n", m));
2652 m->m_pkthdr.len = len;
2653 mh->mh_head = mh->mh_tail = m;
2654 ret = 0;
2655 break;
2656 case RSPQ_EOP:
2657 DBG(DBG_RX, ("get_packet: EOP m %p\n", m));
2658 mh->mh_head->m_pkthdr.len += len;
2659 mh->mh_tail->m_next = m;
2660 mh->mh_tail = m;
2661 ret = 1;
2662 break;
2663 }
2664 if (++fl->cidx == fl->size)
2665 fl->cidx = 0;
2666
2667 return (ret);
2668 }
2669
2670 #else
2671
2672 static int
2673 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2674 struct mbuf **m, struct rsp_desc *r)
2675 {
2676
2677 unsigned int len_cq = ntohl(r->len_cq);
2678 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2679 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2680 uint32_t len = G_RSPD_LEN(len_cq);
2681 uint32_t flags = ntohl(r->flags);
2682 uint8_t sopeop = G_RSPD_SOP_EOP(flags);
2683 void *cl;
2684 int ret = 0;
2685 struct mbuf *m0;
2686 #if 0
2687 if ((sd + 1 )->rxsd_cl)
2688 prefetch((sd + 1)->rxsd_cl);
2689 if ((sd + 2)->rxsd_cl)
2690 prefetch((sd + 2)->rxsd_cl);
2691 #endif
2692 DPRINTF("rx cpu=%d\n", curcpu);
2693 fl->credits--;
2694 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2695
2696 if (recycle_enable && len <= SGE_RX_COPY_THRES && sopeop == RSPQ_SOP_EOP) {
2697 if ((m0 = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
2698 goto skip_recycle;
2699 cl = mtod(m0, void *);
2700 memcpy(cl, sd->data, len);
2701 recycle_rx_buf(adap, fl, fl->cidx);
2702 *m = m0;
2703 } else {
2704 skip_recycle:
2705 bus_dmamap_unload(fl->entry_tag, sd->map);
2706 cl = sd->rxsd_cl;
2707 *m = m0 = (struct mbuf *)cl;
2708 }
2709
2710 switch(sopeop) {
2711 case RSPQ_SOP_EOP:
2712 DBG(DBG_RX, ("get_packet: SOP-EOP m %p\n", m));
2713 if (cl == sd->rxsd_cl)
2714 init_cluster_mbuf(cl, M_PKTHDR, fl->type, fl->zone);
2715 m0->m_len = m0->m_pkthdr.len = len;
2716 ret = 1;
2717 goto done;
2718 break;
2719 case RSPQ_NSOP_NEOP:
2720 DBG(DBG_RX, ("get_packet: NO_SOP-NO_EOP m %p\n", m));
2721 panic("chaining unsupported");
2722 ret = 0;
2723 break;
2724 case RSPQ_SOP:
2725 DBG(DBG_RX, ("get_packet: SOP m %p\n", m));
2726 panic("chaining unsupported");
2727 m_iovinit(m0);
2728 ret = 0;
2729 break;
2730 case RSPQ_EOP:
2731 DBG(DBG_RX, ("get_packet: EOP m %p\n", m));
2732 panic("chaining unsupported");
2733 ret = 1;
2734 break;
2735 }
2736 panic("append not supported");
2737 #if 0
2738 m_iovappend(m0, cl, fl->buf_size, len, sizeof(uint32_t), sd->rxsd_ref);
2739 #endif
2740 done:
2741 if (++fl->cidx == fl->size)
2742 fl->cidx = 0;
2743
2744 return (ret);
2745 }
2746 #endif
2747 /**
2748 * handle_rsp_cntrl_info - handles control information in a response
2749 * @qs: the queue set corresponding to the response
2750 * @flags: the response control flags
2751 *
2752 * Handles the control information of an SGE response, such as GTS
2753 * indications and completion credits for the queue set's Tx queues.
2754 * HW coalesces credits, we don't do any extra SW coalescing.
2755 */
2756 static __inline void
2757 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2758 {
2759 unsigned int credits;
2760
2761 #if USE_GTS
2762 if (flags & F_RSPD_TXQ0_GTS)
2763 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2764 #endif
2765 credits = G_RSPD_TXQ0_CR(flags);
2766 if (credits)
2767 qs->txq[TXQ_ETH].processed += credits;
2768
2769 credits = G_RSPD_TXQ2_CR(flags);
2770 if (credits)
2771 qs->txq[TXQ_CTRL].processed += credits;
2772
2773 # if USE_GTS
2774 if (flags & F_RSPD_TXQ1_GTS)
2775 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2776 # endif
2777 credits = G_RSPD_TXQ1_CR(flags);
2778 if (credits)
2779 qs->txq[TXQ_OFLD].processed += credits;
2780
2781 }
2782
2783 static void
2784 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2785 unsigned int sleeping)
2786 {
2787 ;
2788 }
2789
2790 /**
2791 * process_responses - process responses from an SGE response queue
2792 * @adap: the adapter
2793 * @qs: the queue set to which the response queue belongs
2794 * @budget: how many responses can be processed in this round
2795 *
2796 * Process responses from an SGE response queue up to the supplied budget.
2797 * Responses include received packets as well as credits and other events
2798 * for the queues that belong to the response queue's queue set.
2799 * A negative budget is effectively unlimited.
2800 *
2801 * Additionally choose the interrupt holdoff time for the next interrupt
2802 * on this queue. If the system is under memory shortage use a fairly
2803 * long delay to help recovery.
2804 */
2805 int
2806 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2807 {
2808 struct sge_rspq *rspq = &qs->rspq;
2809 struct rsp_desc *r = &rspq->desc[rspq->cidx];
2810 int budget_left = budget;
2811 unsigned int sleeping = 0;
2812 int lro_enabled = qs->lro.enabled;
2813 int skip_lro;
2814 struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2815 struct mbuf *offload_mbufs[RX_BUNDLE_SIZE];
2816 int ngathered = 0;
2817 #ifdef DEBUG
2818 static int last_holdoff = 0;
2819 if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2820 printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2821 last_holdoff = rspq->holdoff_tmr;
2822 }
2823 #endif
2824 rspq->next_holdoff = rspq->holdoff_tmr;
2825
2826 while (__predict_true(budget_left && is_new_response(r, rspq))) {
2827 int eth, eop = 0, ethpad = 0;
2828 uint32_t flags = ntohl(r->flags);
2829 uint32_t rss_csum = *(const uint32_t *)r;
2830 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2831
2832 eth = (r->rss_hdr.opcode == CPL_RX_PKT);
2833
2834 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2835 struct mbuf *m;
2836
2837 if (cxgb_debug)
2838 printf("async notification\n");
2839
2840 if (rspq->rspq_mh.mh_head == NULL) {
2841 rspq->rspq_mh.mh_head = m_gethdr(M_DONTWAIT, MT_DATA);
2842 m = rspq->rspq_mh.mh_head;
2843 } else {
2844 m = m_gethdr(M_DONTWAIT, MT_DATA);
2845 }
2846
2847 /* XXX m is lost here if rspq->rspq_mbuf is not NULL */
2848
2849 if (m == NULL)
2850 goto no_mem;
2851
2852 memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2853 m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2854 *mtod(m, char *) = CPL_ASYNC_NOTIF;
2855 rss_csum = htonl(CPL_ASYNC_NOTIF << 24);
2856 eop = 1;
2857 rspq->async_notif++;
2858 goto skip;
2859 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2860 struct mbuf *m = NULL;
2861
2862 DPRINTF("IMM DATA VALID opcode=0x%x rspq->cidx=%d\n",
2863 r->rss_hdr.opcode, rspq->cidx);
2864 if (rspq->rspq_mh.mh_head == NULL)
2865 rspq->rspq_mh.mh_head = m_gethdr(M_DONTWAIT, MT_DATA);
2866 else
2867 m = m_gethdr(M_DONTWAIT, MT_DATA);
2868
2869 if (rspq->rspq_mh.mh_head == NULL && m == NULL) {
2870 no_mem:
2871 rspq->next_holdoff = NOMEM_INTR_DELAY;
2872 budget_left--;
2873 break;
2874 }
2875 get_imm_packet(adap, r, rspq->rspq_mh.mh_head);
2876 eop = 1;
2877 rspq->imm_data++;
2878 } else if (r->len_cq) {
2879 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2880
2881 #ifdef DISABLE_MBUF_IOVEC
2882 eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mh, r);
2883 #else
2884 eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mbuf, r);
2885 #endif
2886 #ifdef IFNET_MULTIQUEUE
2887 rspq->rspq_mh.mh_head->m_pkthdr.rss_hash = rss_hash;
2888 #endif
2889 ethpad = 2;
2890 } else {
2891 DPRINTF("pure response\n");
2892 rspq->pure_rsps++;
2893 }
2894 skip:
2895 if (flags & RSPD_CTRL_MASK) {
2896 sleeping |= flags & RSPD_GTS_MASK;
2897 handle_rsp_cntrl_info(qs, flags);
2898 }
2899
2900 r++;
2901 if (__predict_false(++rspq->cidx == rspq->size)) {
2902 rspq->cidx = 0;
2903 rspq->gen ^= 1;
2904 r = rspq->desc;
2905 }
2906 prefetch(r);
2907 if (++rspq->credits >= (rspq->size / 4)) {
2908 refill_rspq(adap, rspq, rspq->credits);
2909 rspq->credits = 0;
2910 }
2911 DPRINTF("eth=%d eop=%d flags=0x%x\n", eth, eop, flags);
2912
2913 if (!eth && eop) {
2914 rspq->rspq_mh.mh_head->m_pkthdr.csum_data = rss_csum;
2915 /*
2916 * XXX size mismatch
2917 */
2918 m_set_priority(rspq->rspq_mh.mh_head, rss_hash);
2919
2920
2921 ngathered = rx_offload(&adap->tdev, rspq,
2922 rspq->rspq_mh.mh_head, offload_mbufs, ngathered);
2923 rspq->rspq_mh.mh_head = NULL;
2924 DPRINTF("received offload packet\n");
2925
2926 } else if (eth && eop) {
2927 struct mbuf *m = rspq->rspq_mh.mh_head;
2928 prefetch(mtod(m, uint8_t *));
2929 prefetch(mtod(m, uint8_t *) + L1_CACHE_BYTES);
2930
2931 t3_rx_eth(adap, rspq, m, ethpad);
2932 /*
2933 * The T304 sends incoming packets on any qset. If LRO
2934 * is also enabled, we could end up sending packet up
2935 * lro_ctrl->ifp's input. That is incorrect.
2936 *
2937 * The mbuf's rcvif was derived from the cpl header and
2938 * is accurate. Skip LRO and just use that.
2939 */
2940 skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2941
2942 if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro &&
2943 (tcp_lro_rx(lro_ctrl, m, 0) == 0)) {
2944 /* successfully queue'd for LRO */
2945 } else {
2946 /*
2947 * LRO not enabled, packet unsuitable for LRO,
2948 * or unable to queue. Pass it up right now in
2949 * either case.
2950 */
2951 struct ifnet *ifp = m->m_pkthdr.rcvif;
2952 (*ifp->if_input)(ifp, m);
2953 }
2954 DPRINTF("received tunnel packet\n");
2955 rspq->rspq_mh.mh_head = NULL;
2956
2957 }
2958 __refill_fl_lt(adap, &qs->fl[0], 32);
2959 __refill_fl_lt(adap, &qs->fl[1], 32);
2960 --budget_left;
2961 }
2962
2963 deliver_partial_bundle(&adap->tdev, rspq, offload_mbufs, ngathered);
2964
2965 /* Flush LRO */
2966 while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
2967 struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
2968 SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
2969 tcp_lro_flush(lro_ctrl, queued);
2970 }
2971
2972 if (sleeping)
2973 check_ring_db(adap, qs, sleeping);
2974
2975 smp_mb(); /* commit Tx queue processed updates */
2976 if (__predict_false(qs->txq_stopped > 1))
2977 restart_tx(qs);
2978
2979 __refill_fl_lt(adap, &qs->fl[0], 512);
2980 __refill_fl_lt(adap, &qs->fl[1], 512);
2981 budget -= budget_left;
2982 return (budget);
2983 }
2984
2985 /*
2986 * A helper function that processes responses and issues GTS.
2987 */
2988 static __inline int
2989 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
2990 {
2991 int work;
2992 static int last_holdoff = 0;
2993
2994 work = process_responses(adap, rspq_to_qset(rq), -1);
2995
2996 if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
2997 printf("next_holdoff=%d\n", rq->next_holdoff);
2998 last_holdoff = rq->next_holdoff;
2999 }
3000 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3001 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3002
3003 return (work);
3004 }
3005
3006
3007 /*
3008 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3009 * Handles data events from SGE response queues as well as error and other
3010 * async events as they all use the same interrupt pin. We use one SGE
3011 * response queue per port in this mode and protect all response queues with
3012 * queue 0's lock.
3013 */
3014 void
3015 t3b_intr(void *data)
3016 {
3017 uint32_t i, map;
3018 adapter_t *adap = data;
3019 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3020
3021 t3_write_reg(adap, A_PL_CLI, 0);
3022 map = t3_read_reg(adap, A_SG_DATA_INTR);
3023
3024 if (!map)
3025 return;
3026
3027 if (__predict_false(map & F_ERRINTR))
3028 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3029
3030 mtx_lock(&q0->lock);
3031 for_each_port(adap, i)
3032 if (map & (1 << i))
3033 process_responses_gts(adap, &adap->sge.qs[i].rspq);
3034 mtx_unlock(&q0->lock);
3035 }
3036
3037 /*
3038 * The MSI interrupt handler. This needs to handle data events from SGE
3039 * response queues as well as error and other async events as they all use
3040 * the same MSI vector. We use one SGE response queue per port in this mode
3041 * and protect all response queues with queue 0's lock.
3042 */
3043 void
3044 t3_intr_msi(void *data)
3045 {
3046 adapter_t *adap = data;
3047 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3048 int i, new_packets = 0;
3049
3050 mtx_lock(&q0->lock);
3051
3052 for_each_port(adap, i)
3053 if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3054 new_packets = 1;
3055 mtx_unlock(&q0->lock);
3056 if (new_packets == 0)
3057 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3058 }
3059
3060 void
3061 t3_intr_msix(void *data)
3062 {
3063 struct sge_qset *qs = data;
3064 adapter_t *adap = qs->port->adapter;
3065 struct sge_rspq *rspq = &qs->rspq;
3066 #ifndef IFNET_MULTIQUEUE
3067 mtx_lock(&rspq->lock);
3068 #else
3069 if (mtx_trylock(&rspq->lock))
3070 #endif
3071 {
3072
3073 if (process_responses_gts(adap, rspq) == 0)
3074 rspq->unhandled_irqs++;
3075 mtx_unlock(&rspq->lock);
3076 }
3077 }
3078
3079 #define QDUMP_SBUF_SIZE 32 * 400
3080 static int
3081 t3_dump_rspq(SYSCTL_HANDLER_ARGS)
3082 {
3083 struct sge_rspq *rspq;
3084 struct sge_qset *qs;
3085 int i, err, dump_end, idx;
3086 static int multiplier = 1;
3087 struct sbuf *sb;
3088 struct rsp_desc *rspd;
3089 uint32_t data[4];
3090
3091 rspq = arg1;
3092 qs = rspq_to_qset(rspq);
3093 if (rspq->rspq_dump_count == 0)
3094 return (0);
3095 if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3096 log(LOG_WARNING,
3097 "dump count is too large %d\n", rspq->rspq_dump_count);
3098 rspq->rspq_dump_count = 0;
3099 return (EINVAL);
3100 }
3101 if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3102 log(LOG_WARNING,
3103 "dump start of %d is greater than queue size\n",
3104 rspq->rspq_dump_start);
3105 rspq->rspq_dump_start = 0;
3106 return (EINVAL);
3107 }
3108 err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3109 if (err)
3110 return (err);
3111 retry_sbufops:
3112 sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
3113
3114 sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3115 (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3116 ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3117 sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3118 ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3119
3120 sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3121 (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3122
3123 dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3124 for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3125 idx = i & (RSPQ_Q_SIZE-1);
3126
3127 rspd = &rspq->desc[idx];
3128 sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3129 idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3130 rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3131 sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3132 rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3133 be32toh(rspd->len_cq), rspd->intr_gen);
3134 }
3135 if (sbuf_overflowed(sb)) {
3136 sbuf_delete(sb);
3137 multiplier++;
3138 goto retry_sbufops;
3139 }
3140 sbuf_finish(sb);
3141 err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
3142 sbuf_delete(sb);
3143 return (err);
3144 }
3145
3146 static int
3147 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3148 {
3149 struct sge_txq *txq;
3150 struct sge_qset *qs;
3151 int i, j, err, dump_end;
3152 static int multiplier = 1;
3153 struct sbuf *sb;
3154 struct tx_desc *txd;
3155 uint32_t *WR, wr_hi, wr_lo, gen;
3156 uint32_t data[4];
3157
3158 txq = arg1;
3159 qs = txq_to_qset(txq, TXQ_ETH);
3160 if (txq->txq_dump_count == 0) {
3161 return (0);
3162 }
3163 if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3164 log(LOG_WARNING,
3165 "dump count is too large %d\n", txq->txq_dump_count);
3166 txq->txq_dump_count = 1;
3167 return (EINVAL);
3168 }
3169 if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3170 log(LOG_WARNING,
3171 "dump start of %d is greater than queue size\n",
3172 txq->txq_dump_start);
3173 txq->txq_dump_start = 0;
3174 return (EINVAL);
3175 }
3176 err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3177 if (err)
3178 return (err);
3179
3180
3181 retry_sbufops:
3182 sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
3183
3184 sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3185 (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3186 (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3187 sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3188 ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3189 ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3190 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3191 txq->txq_dump_start,
3192 (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3193
3194 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3195 for (i = txq->txq_dump_start; i < dump_end; i++) {
3196 txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3197 WR = (uint32_t *)txd->flit;
3198 wr_hi = ntohl(WR[0]);
3199 wr_lo = ntohl(WR[1]);
3200 gen = G_WR_GEN(wr_lo);
3201
3202 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3203 wr_hi, wr_lo, gen);
3204 for (j = 2; j < 30; j += 4)
3205 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3206 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3207
3208 }
3209 if (sbuf_overflowed(sb)) {
3210 sbuf_delete(sb);
3211 multiplier++;
3212 goto retry_sbufops;
3213 }
3214 sbuf_finish(sb);
3215 err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
3216 sbuf_delete(sb);
3217 return (err);
3218 }
3219
3220 static int
3221 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3222 {
3223 struct sge_txq *txq;
3224 struct sge_qset *qs;
3225 int i, j, err, dump_end;
3226 static int multiplier = 1;
3227 struct sbuf *sb;
3228 struct tx_desc *txd;
3229 uint32_t *WR, wr_hi, wr_lo, gen;
3230
3231 txq = arg1;
3232 qs = txq_to_qset(txq, TXQ_CTRL);
3233 if (txq->txq_dump_count == 0) {
3234 return (0);
3235 }
3236 if (txq->txq_dump_count > 256) {
3237 log(LOG_WARNING,
3238 "dump count is too large %d\n", txq->txq_dump_count);
3239 txq->txq_dump_count = 1;
3240 return (EINVAL);
3241 }
3242 if (txq->txq_dump_start > 255) {
3243 log(LOG_WARNING,
3244 "dump start of %d is greater than queue size\n",
3245 txq->txq_dump_start);
3246 txq->txq_dump_start = 0;
3247 return (EINVAL);
3248 }
3249
3250 retry_sbufops:
3251 sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
3252 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3253 txq->txq_dump_start,
3254 (txq->txq_dump_start + txq->txq_dump_count) & 255);
3255
3256 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3257 for (i = txq->txq_dump_start; i < dump_end; i++) {
3258 txd = &txq->desc[i & (255)];
3259 WR = (uint32_t *)txd->flit;
3260 wr_hi = ntohl(WR[0]);
3261 wr_lo = ntohl(WR[1]);
3262 gen = G_WR_GEN(wr_lo);
3263
3264 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3265 wr_hi, wr_lo, gen);
3266 for (j = 2; j < 30; j += 4)
3267 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3268 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3269
3270 }
3271 if (sbuf_overflowed(sb)) {
3272 sbuf_delete(sb);
3273 multiplier++;
3274 goto retry_sbufops;
3275 }
3276 sbuf_finish(sb);
3277 err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
3278 sbuf_delete(sb);
3279 return (err);
3280 }
3281
3282 static int
3283 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3284 {
3285 adapter_t *sc = arg1;
3286 struct qset_params *qsp = &sc->params.sge.qset[0];
3287 int coalesce_usecs;
3288 struct sge_qset *qs;
3289 int i, j, err, nqsets = 0;
3290 struct mtx *lock;
3291
3292 if ((sc->flags & FULL_INIT_DONE) == 0)
3293 return (ENXIO);
3294
3295 coalesce_usecs = qsp->coalesce_usecs;
3296 err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3297
3298 if (err != 0) {
3299 return (err);
3300 }
3301 if (coalesce_usecs == qsp->coalesce_usecs)
3302 return (0);
3303
3304 for (i = 0; i < sc->params.nports; i++)
3305 for (j = 0; j < sc->port[i].nqsets; j++)
3306 nqsets++;
3307
3308 coalesce_usecs = max(1, coalesce_usecs);
3309
3310 for (i = 0; i < nqsets; i++) {
3311 qs = &sc->sge.qs[i];
3312 qsp = &sc->params.sge.qset[i];
3313 qsp->coalesce_usecs = coalesce_usecs;
3314
3315 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3316 &sc->sge.qs[0].rspq.lock;
3317
3318 mtx_lock(lock);
3319 t3_update_qset_coalesce(qs, qsp);
3320 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3321 V_NEWTIMER(qs->rspq.holdoff_tmr));
3322 mtx_unlock(lock);
3323 }
3324
3325 return (0);
3326 }
3327
3328
3329 void
3330 t3_add_attach_sysctls(adapter_t *sc)
3331 {
3332 struct sysctl_ctx_list *ctx;
3333 struct sysctl_oid_list *children;
3334
3335 ctx = device_get_sysctl_ctx(sc->dev);
3336 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3337
3338 /* random information */
3339 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3340 "firmware_version",
3341 CTLFLAG_RD, &sc->fw_version,
3342 0, "firmware version");
3343 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3344 "hw_revision",
3345 CTLFLAG_RD, &sc->params.rev,
3346 0, "chip model");
3347 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3348 "port_types",
3349 CTLFLAG_RD, &sc->port_types,
3350 0, "type of ports");
3351 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3352 "enable_debug",
3353 CTLFLAG_RW, &cxgb_debug,
3354 0, "enable verbose debugging output");
3355 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tunq_coalesce",
3356 CTLFLAG_RD, &sc->tunq_coalesce,
3357 "#tunneled packets freed");
3358 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3359 "txq_overrun",
3360 CTLFLAG_RD, &txq_fills,
3361 0, "#times txq overrun");
3362 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3363 "pcpu_cache_enable",
3364 CTLFLAG_RW, &cxgb_pcpu_cache_enable,
3365 0, "#enable driver local pcpu caches");
3366 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3367 "cache_alloc",
3368 CTLFLAG_RD, &cxgb_cached_allocations,
3369 0, "#times a cluster was allocated from cache");
3370 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3371 "cached",
3372 CTLFLAG_RD, &cxgb_cached,
3373 0, "#times a cluster was cached");
3374 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3375 "ext_freed",
3376 CTLFLAG_RD, &cxgb_ext_freed,
3377 0, "#times a cluster was freed through ext_free");
3378 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3379 "ext_inited",
3380 CTLFLAG_RD, &cxgb_ext_inited,
3381 0, "#times a cluster was initialized for ext_free");
3382 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3383 "mbufs_outstanding",
3384 CTLFLAG_RD, &cxgb_mbufs_outstanding,
3385 0, "#mbufs in flight in the driver");
3386 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3387 "pack_outstanding",
3388 CTLFLAG_RD, &cxgb_pack_outstanding,
3389 0, "#packet in flight in the driver");
3390 }
3391
3392
3393 static const char *rspq_name = "rspq";
3394 static const char *txq_names[] =
3395 {
3396 "txq_eth",
3397 "txq_ofld",
3398 "txq_ctrl"
3399 };
3400
3401 static int
3402 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3403 {
3404 struct port_info *p = arg1;
3405 uint64_t *parg;
3406
3407 if (!p)
3408 return (EINVAL);
3409
3410 parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3411
3412 PORT_LOCK(p);
3413 t3_mac_update_stats(&p->mac);
3414 PORT_UNLOCK(p);
3415
3416 return (sysctl_handle_quad(oidp, parg, 0, req));
3417 }
3418
3419 void
3420 t3_add_configured_sysctls(adapter_t *sc)
3421 {
3422 struct sysctl_ctx_list *ctx;
3423 struct sysctl_oid_list *children;
3424 int i, j;
3425
3426 ctx = device_get_sysctl_ctx(sc->dev);
3427 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3428
3429 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3430 "intr_coal",
3431 CTLTYPE_INT|CTLFLAG_RW, sc,
3432 0, t3_set_coalesce_usecs,
3433 "I", "interrupt coalescing timer (us)");
3434
3435 for (i = 0; i < sc->params.nports; i++) {
3436 struct port_info *pi = &sc->port[i];
3437 struct sysctl_oid *poid;
3438 struct sysctl_oid_list *poidlist;
3439 struct mac_stats *mstats = &pi->mac.stats;
3440
3441 snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3442 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3443 pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
3444 poidlist = SYSCTL_CHILDREN(poid);
3445 SYSCTL_ADD_INT(ctx, poidlist, OID_AUTO,
3446 "nqsets", CTLFLAG_RD, &pi->nqsets,
3447 0, "#queue sets");
3448
3449 for (j = 0; j < pi->nqsets; j++) {
3450 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3451 struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3452 *ctrlqpoid, *lropoid;
3453 struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3454 *txqpoidlist, *ctrlqpoidlist,
3455 *lropoidlist;
3456 struct sge_txq *txq = &qs->txq[TXQ_ETH];
3457
3458 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3459
3460 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3461 qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
3462 qspoidlist = SYSCTL_CHILDREN(qspoid);
3463
3464 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3465 CTLFLAG_RD, &qs->fl[0].empty, 0,
3466 "freelist #0 empty");
3467 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3468 CTLFLAG_RD, &qs->fl[1].empty, 0,
3469 "freelist #1 empty");
3470
3471 rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3472 rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
3473 rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3474
3475 txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3476 txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3477 txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3478
3479 ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3480 txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
3481 ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3482
3483 lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3484 "lro_stats", CTLFLAG_RD, NULL, "LRO statistics");
3485 lropoidlist = SYSCTL_CHILDREN(lropoid);
3486
3487 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3488 CTLFLAG_RD, &qs->rspq.size,
3489 0, "#entries in response queue");
3490 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3491 CTLFLAG_RD, &qs->rspq.cidx,
3492 0, "consumer index");
3493 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3494 CTLFLAG_RD, &qs->rspq.credits,
3495 0, "#credits");
3496 SYSCTL_ADD_XLONG(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3497 CTLFLAG_RD, &qs->rspq.phys_addr,
3498 "physical_address_of the queue");
3499 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3500 CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3501 0, "start rspq dump entry");
3502 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3503 CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3504 0, "#rspq entries to dump");
3505 SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3506 CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
3507 0, t3_dump_rspq, "A", "dump of the response queue");
3508
3509
3510 SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "dropped",
3511 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_drops,
3512 0, "#tunneled packets dropped");
3513 SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3514 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
3515 0, "#tunneled packets waiting to be sent");
3516 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3517 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3518 0, "#tunneled packets queue producer index");
3519 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3520 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3521 0, "#tunneled packets queue consumer index");
3522 SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "processed",
3523 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3524 0, "#tunneled packets processed by the card");
3525 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3526 CTLFLAG_RD, &txq->cleaned,
3527 0, "#tunneled packets cleaned");
3528 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3529 CTLFLAG_RD, &txq->in_use,
3530 0, "#tunneled packet slots in use");
3531 SYSCTL_ADD_ULONG(ctx, txqpoidlist, OID_AUTO, "frees",
3532 CTLFLAG_RD, &txq->txq_frees,
3533 "#tunneled packets freed");
3534 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3535 CTLFLAG_RD, &txq->txq_skipped,
3536 0, "#tunneled packet descriptors skipped");
3537 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "coalesced",
3538 CTLFLAG_RD, &txq->txq_coalesced,
3539 0, "#tunneled packets coalesced");
3540 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3541 CTLFLAG_RD, &txq->txq_enqueued,
3542 0, "#tunneled packets enqueued to hardware");
3543 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3544 CTLFLAG_RD, &qs->txq_stopped,
3545 0, "tx queues stopped");
3546 SYSCTL_ADD_XLONG(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3547 CTLFLAG_RD, &txq->phys_addr,
3548 "physical_address_of the queue");
3549 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3550 CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3551 0, "txq generation");
3552 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3553 CTLFLAG_RD, &txq->cidx,
3554 0, "hardware queue cidx");
3555 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3556 CTLFLAG_RD, &txq->pidx,
3557 0, "hardware queue pidx");
3558 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3559 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3560 0, "txq start idx for dump");
3561 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3562 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3563 0, "txq #entries to dump");
3564 SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3565 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3566 0, t3_dump_txq_eth, "A", "dump of the transmit queue");
3567
3568 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3569 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3570 0, "ctrlq start idx for dump");
3571 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3572 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3573 0, "ctrl #entries to dump");
3574 SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3575 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
3576 0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
3577
3578 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_queued",
3579 CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3580 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3581 CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3582 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3583 CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3584 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3585 CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3586 }
3587
3588 /* Now add a node for mac stats. */
3589 poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3590 CTLFLAG_RD, NULL, "MAC statistics");
3591 poidlist = SYSCTL_CHILDREN(poid);
3592
3593 /*
3594 * We (ab)use the length argument (arg2) to pass on the offset
3595 * of the data that we are interested in. This is only required
3596 * for the quad counters that are updated from the hardware (we
3597 * make sure that we return the latest value).
3598 * sysctl_handle_macstat first updates *all* the counters from
3599 * the hardware, and then returns the latest value of the
3600 * requested counter. Best would be to update only the
3601 * requested counter from hardware, but t3_mac_update_stats()
3602 * hides all the register details and we don't want to dive into
3603 * all that here.
3604 */
3605 #define CXGB_SYSCTL_ADD_QUAD(a) SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3606 (CTLTYPE_QUAD | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \
3607 sysctl_handle_macstat, "QU", 0)
3608 CXGB_SYSCTL_ADD_QUAD(tx_octets);
3609 CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3610 CXGB_SYSCTL_ADD_QUAD(tx_frames);
3611 CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3612 CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3613 CXGB_SYSCTL_ADD_QUAD(tx_pause);
3614 CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3615 CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3616 CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3617 CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3618 CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3619 CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3620 CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3621 CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3622 CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3623 CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3624 CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3625 CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3626 CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3627 CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3628 CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3629 CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3630 CXGB_SYSCTL_ADD_QUAD(rx_octets);
3631 CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3632 CXGB_SYSCTL_ADD_QUAD(rx_frames);
3633 CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3634 CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3635 CXGB_SYSCTL_ADD_QUAD(rx_pause);
3636 CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3637 CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3638 CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3639 CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3640 CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3641 CXGB_SYSCTL_ADD_QUAD(rx_runt);
3642 CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3643 CXGB_SYSCTL_ADD_QUAD(rx_short);
3644 CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3645 CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3646 CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3647 CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3648 CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3649 CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3650 CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3651 CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3652 CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3653 CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3654 #undef CXGB_SYSCTL_ADD_QUAD
3655
3656 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3657 CTLFLAG_RD, &mstats->a, 0)
3658 CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3659 CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3660 CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3661 CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3662 CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3663 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3664 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3665 CXGB_SYSCTL_ADD_ULONG(num_toggled);
3666 CXGB_SYSCTL_ADD_ULONG(num_resets);
3667 CXGB_SYSCTL_ADD_ULONG(link_faults);
3668 #undef CXGB_SYSCTL_ADD_ULONG
3669 }
3670 }
3671
3672 /**
3673 * t3_get_desc - dump an SGE descriptor for debugging purposes
3674 * @qs: the queue set
3675 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3676 * @idx: the descriptor index in the queue
3677 * @data: where to dump the descriptor contents
3678 *
3679 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3680 * size of the descriptor.
3681 */
3682 int
3683 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3684 unsigned char *data)
3685 {
3686 if (qnum >= 6)
3687 return (EINVAL);
3688
3689 if (qnum < 3) {
3690 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3691 return -EINVAL;
3692 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3693 return sizeof(struct tx_desc);
3694 }
3695
3696 if (qnum == 3) {
3697 if (!qs->rspq.desc || idx >= qs->rspq.size)
3698 return (EINVAL);
3699 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3700 return sizeof(struct rsp_desc);
3701 }
3702
3703 qnum -= 4;
3704 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3705 return (EINVAL);
3706 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3707 return sizeof(struct rx_desc);
3708 }
Cache object: 84e656644e3b8895b6917d80b1552594
|