1 /**************************************************************************
2
3 Copyright (c) 2007-2009, Chelsio Inc.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27
28
29 $FreeBSD: releng/7.3/sys/dev/cxgb/cxgb_adapter.h 202745 2010-01-21 12:18:29Z np $
30
31
32 ***************************************************************************/
33
34
35 #ifndef _CXGB_ADAPTER_H_
36 #define _CXGB_ADAPTER_H_
37
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/rman.h>
41 #include <sys/mbuf.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/condvar.h>
45
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/if_media.h>
49 #include <net/if_dl.h>
50 #include <netinet/tcp_lro.h>
51
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54
55 #include <sys/bus_dma.h>
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58
59
60 #ifdef CONFIG_DEFINED
61 #include <cxgb_osdep.h>
62 #include <t3cdev.h>
63 #include <ulp/toecore/cxgb_toedev.h>
64 #include <sys/mbufq.h>
65 #else
66 #include <dev/cxgb/cxgb_osdep.h>
67 #include <dev/cxgb/t3cdev.h>
68 #include <dev/cxgb/sys/mbufq.h>
69 #include <dev/cxgb/ulp/toecore/cxgb_toedev.h>
70 #endif
71
72 struct adapter;
73 struct sge_qset;
74 extern int cxgb_debug;
75
76 #ifdef DEBUG_LOCKING
77 #define MTX_INIT(lock, lockname, class, flags) \
78 do { \
79 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
80 mtx_init((lock), lockname, class, flags); \
81 } while (0)
82
83 #define MTX_DESTROY(lock) \
84 do { \
85 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
86 mtx_destroy((lock)); \
87 } while (0)
88
89 #else
90 #define MTX_INIT mtx_init
91 #define MTX_DESTROY mtx_destroy
92 #endif
93
94 enum {
95 LF_NO = 0,
96 LF_MAYBE,
97 LF_YES
98 };
99
100 struct port_info {
101 struct adapter *adapter;
102 struct ifnet *ifp;
103 int if_flags;
104 int flags;
105 const struct port_type_info *port_type;
106 struct cphy phy;
107 struct cmac mac;
108 struct link_config link_config;
109 struct ifmedia media;
110 struct mtx lock;
111 uint32_t port_id;
112 uint32_t tx_chan;
113 uint32_t txpkt_intf;
114 uint32_t first_qset;
115 uint32_t nqsets;
116 int link_fault;
117
118 uint8_t hw_addr[ETHER_ADDR_LEN];
119 struct task timer_reclaim_task;
120 struct cdev *port_cdev;
121
122 #define PORT_LOCK_NAME_LEN 32
123 #define PORT_NAME_LEN 32
124 char lockbuf[PORT_LOCK_NAME_LEN];
125 char namebuf[PORT_NAME_LEN];
126 };
127
128 enum {
129 /* adapter flags */
130 FULL_INIT_DONE = (1 << 0),
131 USING_MSI = (1 << 1),
132 USING_MSIX = (1 << 2),
133 QUEUES_BOUND = (1 << 3),
134 FW_UPTODATE = (1 << 4),
135 TPS_UPTODATE = (1 << 5),
136 CXGB_SHUTDOWN = (1 << 6),
137 CXGB_OFLD_INIT = (1 << 7),
138 TP_PARITY_INIT = (1 << 8),
139 CXGB_BUSY = (1 << 9),
140
141 /* port flags */
142 DOOMED = (1 << 0),
143 };
144 #define IS_DOOMED(p) (p->flags & DOOMED)
145 #define SET_DOOMED(p) do {p->flags |= DOOMED;} while (0)
146 #define IS_BUSY(sc) (sc->flags & CXGB_BUSY)
147 #define SET_BUSY(sc) do {sc->flags |= CXGB_BUSY;} while (0)
148 #define CLR_BUSY(sc) do {sc->flags &= ~CXGB_BUSY;} while (0)
149
150 #define FL_Q_SIZE 4096
151 #define JUMBO_Q_SIZE 1024
152 #define RSPQ_Q_SIZE 1024
153 #define TX_ETH_Q_SIZE 1024
154
155 enum { TXQ_ETH = 0,
156 TXQ_OFLD = 1,
157 TXQ_CTRL = 2, };
158
159
160 /*
161 * work request size in bytes
162 */
163 #define WR_LEN (WR_FLITS * 8)
164 #define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
165
166 struct lro_state {
167 unsigned short enabled;
168 struct lro_ctrl ctrl;
169 };
170
171 #define RX_BUNDLE_SIZE 8
172
173 struct rsp_desc;
174
175 struct sge_rspq {
176 uint32_t credits;
177 uint32_t size;
178 uint32_t cidx;
179 uint32_t gen;
180 uint32_t polling;
181 uint32_t holdoff_tmr;
182 uint32_t next_holdoff;
183 uint32_t imm_data;
184 uint32_t async_notif;
185 uint32_t cntxt_id;
186 uint32_t offload_pkts;
187 uint32_t offload_bundles;
188 uint32_t pure_rsps;
189 uint32_t unhandled_irqs;
190
191 bus_addr_t phys_addr;
192 bus_dma_tag_t desc_tag;
193 bus_dmamap_t desc_map;
194
195 struct t3_mbuf_hdr rspq_mh;
196 struct rsp_desc *desc;
197 struct mtx lock;
198 #define RSPQ_NAME_LEN 32
199 char lockbuf[RSPQ_NAME_LEN];
200 uint32_t rspq_dump_start;
201 uint32_t rspq_dump_count;
202 };
203
204 #ifndef DISABLE_MBUF_IOVEC
205 #define rspq_mbuf rspq_mh.mh_head
206 #endif
207
208 struct rx_desc;
209 struct rx_sw_desc;
210
211 struct sge_fl {
212 uint32_t buf_size;
213 uint32_t credits;
214 uint32_t size;
215 uint32_t cidx;
216 uint32_t pidx;
217 uint32_t gen;
218 bus_addr_t phys_addr;
219 uint32_t cntxt_id;
220 uint32_t empty;
221 bus_dma_tag_t desc_tag;
222 bus_dmamap_t desc_map;
223 bus_dma_tag_t entry_tag;
224 uma_zone_t zone;
225 struct rx_desc *desc;
226 struct rx_sw_desc *sdesc;
227 int type;
228 };
229
230 struct tx_desc;
231 struct tx_sw_desc;
232
233 #define TXQ_TRANSMITTING 0x1
234
235 struct sge_txq {
236 uint64_t flags;
237 uint32_t in_use;
238 uint32_t size;
239 uint32_t processed;
240 uint32_t cleaned;
241 uint32_t stop_thres;
242 uint32_t cidx;
243 uint32_t pidx;
244 uint32_t gen;
245 uint32_t unacked;
246 struct tx_desc *desc;
247 struct tx_sw_desc *sdesc;
248 uint32_t token;
249 bus_addr_t phys_addr;
250 struct task qresume_task;
251 struct task qreclaim_task;
252 struct port_info *port;
253 uint32_t cntxt_id;
254 uint64_t stops;
255 uint64_t restarts;
256 bus_dma_tag_t desc_tag;
257 bus_dmamap_t desc_map;
258 bus_dma_tag_t entry_tag;
259 struct mbuf_head sendq;
260 /*
261 * cleanq should really be an buf_ring to avoid extra
262 * mbuf touches
263 */
264 struct mbuf_head cleanq;
265 struct buf_ring txq_mr;
266 struct mbuf *immpkt;
267 uint32_t txq_drops;
268 uint32_t txq_skipped;
269 uint32_t txq_coalesced;
270 uint32_t txq_enqueued;
271 uint32_t txq_dump_start;
272 uint32_t txq_dump_count;
273 unsigned long txq_frees;
274 struct mtx lock;
275 struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1];
276 #define TXQ_NAME_LEN 32
277 char lockbuf[TXQ_NAME_LEN];
278 };
279
280
281 enum {
282 SGE_PSTAT_TSO, /* # of TSO requests */
283 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
284 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
285 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
286 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
287 };
288
289 #define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1)
290
291 #define QS_EXITING 0x1
292 #define QS_RUNNING 0x2
293 #define QS_BOUND 0x4
294
295 struct sge_qset {
296 struct sge_rspq rspq;
297 struct sge_fl fl[SGE_RXQ_PER_SET];
298 struct lro_state lro;
299 struct sge_txq txq[SGE_TXQ_PER_SET];
300 uint32_t txq_stopped; /* which Tx queues are stopped */
301 uint64_t port_stats[SGE_PSTAT_MAX];
302 struct port_info *port;
303 int idx; /* qset # */
304 int qs_cpuid;
305 int qs_flags;
306 struct cv qs_cv;
307 struct mtx qs_mtx;
308 #define QS_NAME_LEN 32
309 char namebuf[QS_NAME_LEN];
310 };
311
312 struct sge {
313 struct sge_qset qs[SGE_QSETS];
314 struct mtx reg_lock;
315 };
316
317 struct filter_info;
318
319 struct adapter {
320 device_t dev;
321 int flags;
322 TAILQ_ENTRY(adapter) adapter_entry;
323
324 /* PCI register resources */
325 int regs_rid;
326 struct resource *regs_res;
327 int udbs_rid;
328 struct resource *udbs_res;
329 bus_space_handle_t bh;
330 bus_space_tag_t bt;
331 bus_size_t mmio_len;
332 uint32_t link_width;
333
334 /* DMA resources */
335 bus_dma_tag_t parent_dmat;
336 bus_dma_tag_t rx_dmat;
337 bus_dma_tag_t rx_jumbo_dmat;
338 bus_dma_tag_t tx_dmat;
339
340 /* Interrupt resources */
341 struct resource *irq_res;
342 int irq_rid;
343 void *intr_tag;
344
345 uint32_t msix_regs_rid;
346 struct resource *msix_regs_res;
347
348 struct resource *msix_irq_res[SGE_QSETS];
349 int msix_irq_rid[SGE_QSETS];
350 void *msix_intr_tag[SGE_QSETS];
351 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
352 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */
353 uint16_t rspq_map[RSS_TABLE_SIZE]; /* maps 7-bit cookie to qidx */
354 union {
355 uint8_t fill[SGE_QSETS];
356 uint64_t coalesce;
357 } u;
358
359 #define tunq_fill u.fill
360 #define tunq_coalesce u.coalesce
361
362 struct filter_info *filters;
363
364 /* Tasks */
365 struct task ext_intr_task;
366 struct task slow_intr_task;
367 struct task tick_task;
368 struct taskqueue *tq;
369 struct callout cxgb_tick_ch;
370 struct callout sge_timer_ch;
371
372 /* Register lock for use by the hardware layer */
373 struct mtx mdio_lock;
374 struct mtx elmer_lock;
375
376 /* Bookkeeping for the hardware layer */
377 struct adapter_params params;
378 unsigned int slow_intr_mask;
379 unsigned long irq_stats[IRQ_NUM_STATS];
380
381 struct sge sge;
382 struct mc7 pmrx;
383 struct mc7 pmtx;
384 struct mc7 cm;
385 struct mc5 mc5;
386
387 struct port_info port[MAX_NPORTS];
388 device_t portdev[MAX_NPORTS];
389 struct t3cdev tdev;
390 char fw_version[64];
391 char port_types[MAX_NPORTS + 1];
392 uint32_t open_device_map;
393 uint32_t registered_device_map;
394 struct mtx lock;
395 driver_intr_t *cxgb_intr;
396 int msi_count;
397
398 #define ADAPTER_LOCK_NAME_LEN 32
399 char lockbuf[ADAPTER_LOCK_NAME_LEN];
400 char reglockbuf[ADAPTER_LOCK_NAME_LEN];
401 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
402 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
403 };
404
405 struct t3_rx_mode {
406
407 uint32_t idx;
408 struct port_info *port;
409 };
410
411
412 #define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock)
413 #define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock)
414 #define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock)
415 #define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock)
416
417
418 #define PORT_LOCK(port) mtx_lock(&(port)->lock);
419 #define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
420 #define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF)
421 #define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock)
422 #define PORT_LOCK_ASSERT_NOTOWNED(port) mtx_assert(&(port)->lock, MA_NOTOWNED)
423 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
424
425 #define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
426 #define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
427 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
428 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
429 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED)
430 #define ADAPTER_LOCK_ASSERT_OWNED(adap) mtx_assert(&(adap)->lock, MA_OWNED)
431
432
433 static __inline uint32_t
434 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
435 {
436 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
437 }
438
439 static __inline void
440 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
441 {
442 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
443 }
444
445 static __inline void
446 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
447 {
448 *val = pci_read_config(adapter->dev, reg, 4);
449 }
450
451 static __inline void
452 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
453 {
454 pci_write_config(adapter->dev, reg, val, 4);
455 }
456
457 static __inline void
458 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
459 {
460 *val = pci_read_config(adapter->dev, reg, 2);
461 }
462
463 static __inline void
464 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
465 {
466 pci_write_config(adapter->dev, reg, val, 2);
467 }
468
469 static __inline uint8_t *
470 t3_get_next_mcaddr(struct t3_rx_mode *rm)
471 {
472 uint8_t *macaddr = NULL;
473 struct ifnet *ifp = rm->port->ifp;
474 struct ifmultiaddr *ifma;
475 int i = 0;
476
477 IF_ADDR_LOCK(ifp);
478 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
479 if (ifma->ifma_addr->sa_family != AF_LINK)
480 continue;
481 if (i == rm->idx) {
482 macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
483 break;
484 }
485 i++;
486 }
487 IF_ADDR_UNLOCK(ifp);
488
489
490 rm->idx++;
491 return (macaddr);
492 }
493
494 static __inline void
495 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
496 {
497 rm->idx = 0;
498 rm->port = port;
499 }
500
501 static __inline struct port_info *
502 adap2pinfo(struct adapter *adap, int idx)
503 {
504 return &adap->port[idx];
505 }
506
507 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
508 int t3_os_pci_save_state(struct adapter *adapter);
509 int t3_os_pci_restore_state(struct adapter *adapter);
510 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
511 int speed, int duplex, int fc, int mac_was_reset);
512 void t3_os_phymod_changed(struct adapter *adap, int port_id);
513 void t3_sge_err_intr_handler(adapter_t *adapter);
514 int t3_offload_tx(struct t3cdev *, struct mbuf *);
515 void t3_os_ext_intr_handler(adapter_t *adapter);
516 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
517 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
518
519
520 int t3_sge_alloc(struct adapter *);
521 int t3_sge_free(struct adapter *);
522 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
523 int, struct port_info *);
524 void t3_free_sge_resources(adapter_t *);
525 void t3_sge_start(adapter_t *);
526 void t3_sge_stop(adapter_t *);
527 void t3b_intr(void *data);
528 void t3_intr_msi(void *data);
529 void t3_intr_msix(void *data);
530 int t3_encap(struct sge_qset *, struct mbuf **, int);
531
532 int t3_sge_init_adapter(adapter_t *);
533 int t3_sge_reset_adapter(adapter_t *);
534 int t3_sge_init_port(struct port_info *);
535 void t3_sge_deinit_sw(adapter_t *);
536 void t3_free_tx_desc(struct sge_txq *q, int n);
537 void t3_free_tx_desc_all(struct sge_txq *q);
538
539 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
540
541 void t3_add_attach_sysctls(adapter_t *sc);
542 void t3_add_configured_sysctls(adapter_t *sc);
543 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
544 unsigned char *data);
545 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
546
547 #define CXGB_TICKS(a) ((a)->params.linkpoll_period ? \
548 (hz * (a)->params.linkpoll_period) / 10 : \
549 (a)->params.stats_update_period * hz)
550
551 /*
552 * XXX figure out how we can return this to being private to sge
553 */
554 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
555
556 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
557
558 static __inline struct sge_qset *
559 fl_to_qset(struct sge_fl *q, int qidx)
560 {
561 return container_of(q, struct sge_qset, fl[qidx]);
562 }
563
564 static __inline struct sge_qset *
565 rspq_to_qset(struct sge_rspq *q)
566 {
567 return container_of(q, struct sge_qset, rspq);
568 }
569
570 static __inline struct sge_qset *
571 txq_to_qset(struct sge_txq *q, int qidx)
572 {
573 return container_of(q, struct sge_qset, txq[qidx]);
574 }
575
576 static __inline struct adapter *
577 tdev2adap(struct t3cdev *d)
578 {
579 return container_of(d, struct adapter, tdev);
580 }
581
582 #undef container_of
583
584 #define OFFLOAD_DEVMAP_BIT 15
585 static inline int offload_running(adapter_t *adapter)
586 {
587 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
588 }
589
590 int cxgb_pcpu_enqueue_packet(struct ifnet *ifp, struct mbuf *m);
591 int cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *m);
592 void cxgb_pcpu_shutdown_threads(struct adapter *sc);
593 void cxgb_pcpu_startup_threads(struct adapter *sc);
594
595 int process_responses(adapter_t *adap, struct sge_qset *qs, int budget);
596 void t3_free_qset(adapter_t *sc, struct sge_qset *q);
597 void cxgb_start(struct ifnet *ifp);
598 void refill_fl_service(adapter_t *adap, struct sge_fl *fl);
599 #endif
Cache object: 406d7007a4633172933425ee66448497
|