1
2 /**************************************************************************
3
4 Copyright (c) 2007, Chelsio Inc.
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Neither the name of the Chelsio Corporation nor the names of its
14 contributors may be used to endorse or promote products derived from
15 this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
28
29
30 ***************************************************************************/
31
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/6.4/sys/dev/cxgb/cxgb_offload.c 171884 2007-08-18 09:10:26Z kmacy $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/module.h>
41 #include <sys/pciio.h>
42 #include <sys/conf.h>
43 #include <machine/bus.h>
44 #include <machine/resource.h>
45 #include <sys/bus_dma.h>
46 #include <sys/rman.h>
47 #include <sys/ioccom.h>
48 #include <sys/mbuf.h>
49 #include <sys/linker.h>
50 #include <sys/firmware.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/smp.h>
54 #include <sys/sysctl.h>
55 #include <sys/queue.h>
56 #include <sys/taskqueue.h>
57
58 #ifdef CONFIG_DEFINED
59 #include <cxgb_include.h>
60 #else
61 #include <dev/cxgb/cxgb_include.h>
62 #endif
63
64 #include <net/if_vlan_var.h>
65 #include <net/route.h>
66
67 /*
68 * XXX
69 */
70 #define LOG_NOTICE 2
71 #define BUG_ON(...)
72 #define VALIDATE_TID 0
73
74
75 TAILQ_HEAD(, cxgb_client) client_list;
76 TAILQ_HEAD(, toedev) ofld_dev_list;
77 TAILQ_HEAD(, adapter) adapter_list;
78
79 static struct mtx cxgb_db_lock;
80 static struct rwlock adapter_list_lock;
81
82
83 static const unsigned int MAX_ATIDS = 64 * 1024;
84 static const unsigned int ATID_BASE = 0x100000;
85 static int inited = 0;
86
87 static inline int
88 offload_activated(struct toedev *tdev)
89 {
90 struct adapter *adapter = tdev2adap(tdev);
91
92 return (isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT));
93 }
94
95 /**
96 * cxgb_register_client - register an offload client
97 * @client: the client
98 *
99 * Add the client to the client list,
100 * and call backs the client for each activated offload device
101 */
102 void
103 cxgb_register_client(struct cxgb_client *client)
104 {
105 struct toedev *tdev;
106
107 mtx_lock(&cxgb_db_lock);
108 TAILQ_INSERT_TAIL(&client_list, client, client_entry);
109
110 if (client->add) {
111 TAILQ_FOREACH(tdev, &ofld_dev_list, ofld_entry) {
112 if (offload_activated(tdev))
113 client->add(tdev);
114 }
115 }
116 mtx_unlock(&cxgb_db_lock);
117 }
118
119 /**
120 * cxgb_unregister_client - unregister an offload client
121 * @client: the client
122 *
123 * Remove the client to the client list,
124 * and call backs the client for each activated offload device.
125 */
126 void
127 cxgb_unregister_client(struct cxgb_client *client)
128 {
129 struct toedev *tdev;
130
131 mtx_lock(&cxgb_db_lock);
132 TAILQ_REMOVE(&client_list, client, client_entry);
133
134 if (client->remove) {
135 TAILQ_FOREACH(tdev, &ofld_dev_list, ofld_entry) {
136 if (offload_activated(tdev))
137 client->remove(tdev);
138 }
139 }
140 mtx_unlock(&cxgb_db_lock);
141 }
142
143 /**
144 * cxgb_add_clients - activate register clients for an offload device
145 * @tdev: the offload device
146 *
147 * Call backs all registered clients once a offload device is activated
148 */
149 void
150 cxgb_add_clients(struct toedev *tdev)
151 {
152 struct cxgb_client *client;
153
154 mtx_lock(&cxgb_db_lock);
155 TAILQ_FOREACH(client, &client_list, client_entry) {
156 if (client->add)
157 client->add(tdev);
158 }
159 mtx_unlock(&cxgb_db_lock);
160 }
161
162 /**
163 * cxgb_remove_clients - activate register clients for an offload device
164 * @tdev: the offload device
165 *
166 * Call backs all registered clients once a offload device is deactivated
167 */
168 void
169 cxgb_remove_clients(struct toedev *tdev)
170 {
171 struct cxgb_client *client;
172
173 mtx_lock(&cxgb_db_lock);
174 TAILQ_FOREACH(client, &client_list, client_entry) {
175 if (client->remove)
176 client->remove(tdev);
177 }
178 mtx_unlock(&cxgb_db_lock);
179 }
180
181 static int
182 is_offloading(struct ifnet *ifp)
183 {
184 struct adapter *adapter;
185 int port;
186
187 rw_rlock(&adapter_list_lock);
188 TAILQ_FOREACH(adapter, &adapter_list, adapter_entry) {
189 for_each_port(adapter, port) {
190 if (ifp == adapter->port[port].ifp) {
191 rw_runlock(&adapter_list_lock);
192 return 1;
193 }
194 }
195 }
196 rw_runlock(&adapter_list_lock);
197 return 0;
198 }
199
200 static struct ifnet *
201 get_iff_from_mac(adapter_t *adapter, const uint8_t *mac, unsigned int vlan)
202 {
203 #ifdef notyet
204 int i;
205
206 for_each_port(adapter, i) {
207 const struct vlan_group *grp;
208 const struct port_info *p = &adapter->port[i];
209 struct ifnet *ifnet = p->ifp;
210
211 if (!memcmp(p->hw_addr, mac, ETHER_ADDR_LEN)) {
212 if (vlan && vlan != EVL_VLID_MASK) {
213 grp = p->vlan_grp;
214 dev = grp ? grp->vlan_devices[vlan] : NULL;
215 } else
216 while (dev->master)
217 dev = dev->master;
218 return dev;
219 }
220 }
221 #endif
222 return NULL;
223 }
224
225 static inline void
226 failover_fixup(adapter_t *adapter, int port)
227 {
228 if (adapter->params.rev == 0) {
229 struct ifnet *ifp = adapter->port[port].ifp;
230 struct cmac *mac = &adapter->port[port].mac;
231 if (!(ifp->if_flags & IFF_UP)) {
232 /* Failover triggered by the interface ifdown */
233 t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset,
234 F_TXEN);
235 t3_read_reg(adapter, A_XGM_TX_CTRL + mac->offset);
236 } else {
237 /* Failover triggered by the interface link down */
238 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
239 t3_read_reg(adapter, A_XGM_RX_CTRL + mac->offset);
240 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset,
241 F_RXEN);
242 }
243 }
244 }
245
246 static int
247 cxgb_ulp_iscsi_ctl(adapter_t *adapter, unsigned int req, void *data)
248 {
249 int ret = 0;
250 struct ulp_iscsi_info *uiip = data;
251
252 switch (req) {
253 case ULP_ISCSI_GET_PARAMS:
254 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
255 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
256 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
257 /*
258 * On tx, the iscsi pdu has to be <= tx page size and has to
259 * fit into the Tx PM FIFO.
260 */
261 uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
262 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
263 /* on rx, the iscsi pdu has to be < rx page size and the
264 whole pdu + cpl headers has to fit into one sge buffer */
265 uiip->max_rxsz =
266 (unsigned int)min(adapter->params.tp.rx_pg_size,
267 (adapter->sge.qs[0].fl[1].buf_size -
268 sizeof(struct cpl_rx_data) * 2 -
269 sizeof(struct cpl_rx_data_ddp)) );
270 break;
271 case ULP_ISCSI_SET_PARAMS:
272 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
273 break;
274 default:
275 ret = (EOPNOTSUPP);
276 }
277 return ret;
278 }
279
280 /* Response queue used for RDMA events. */
281 #define ASYNC_NOTIF_RSPQ 0
282
283 static int
284 cxgb_rdma_ctl(adapter_t *adapter, unsigned int req, void *data)
285 {
286 int ret = 0;
287
288 switch (req) {
289 case RDMA_GET_PARAMS: {
290 struct rdma_info *req = data;
291
292 req->udbell_physbase = rman_get_start(adapter->regs_res);
293 req->udbell_len = rman_get_size(adapter->regs_res);
294 req->tpt_base = t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
295 req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
296 req->pbl_base = t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
297 req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
298 req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
299 req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
300 req->kdb_addr = (void *)(rman_get_start(adapter->regs_res) + A_SG_KDOORBELL);
301 break;
302 }
303 case RDMA_CQ_OP: {
304 struct rdma_cq_op *req = data;
305
306 /* may be called in any context */
307 mtx_lock(&adapter->sge.reg_lock);
308 ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
309 req->credits);
310 mtx_unlock(&adapter->sge.reg_lock);
311 break;
312 }
313 case RDMA_GET_MEM: {
314 struct ch_mem_range *t = data;
315 struct mc7 *mem;
316
317 if ((t->addr & 7) || (t->len & 7))
318 return (EINVAL);
319 if (t->mem_id == MEM_CM)
320 mem = &adapter->cm;
321 else if (t->mem_id == MEM_PMRX)
322 mem = &adapter->pmrx;
323 else if (t->mem_id == MEM_PMTX)
324 mem = &adapter->pmtx;
325 else
326 return (EINVAL);
327
328 ret = t3_mc7_bd_read(mem, t->addr/8, t->len/8, (u64 *)t->buf);
329 if (ret)
330 return (ret);
331 break;
332 }
333 case RDMA_CQ_SETUP: {
334 struct rdma_cq_setup *req = data;
335
336 mtx_lock(&adapter->sge.reg_lock);
337 ret = t3_sge_init_cqcntxt(adapter, req->id, req->base_addr,
338 req->size, ASYNC_NOTIF_RSPQ,
339 req->ovfl_mode, req->credits,
340 req->credit_thres);
341 mtx_unlock(&adapter->sge.reg_lock);
342 break;
343 }
344 case RDMA_CQ_DISABLE:
345 mtx_lock(&adapter->sge.reg_lock);
346 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
347 mtx_unlock(&adapter->sge.reg_lock);
348 break;
349 case RDMA_CTRL_QP_SETUP: {
350 struct rdma_ctrlqp_setup *req = data;
351
352 mtx_lock(&adapter->sge.reg_lock);
353 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
354 SGE_CNTXT_RDMA, ASYNC_NOTIF_RSPQ,
355 req->base_addr, req->size,
356 FW_RI_TID_START, 1, 0);
357 mtx_unlock(&adapter->sge.reg_lock);
358 break;
359 }
360 default:
361 ret = EOPNOTSUPP;
362 }
363 return (ret);
364 }
365
366 static int
367 cxgb_offload_ctl(struct toedev *tdev, unsigned int req, void *data)
368 {
369 struct adapter *adapter = tdev2adap(tdev);
370 struct tid_range *tid;
371 struct mtutab *mtup;
372 struct iff_mac *iffmacp;
373 struct ddp_params *ddpp;
374 struct adap_ports *ports;
375 int port;
376
377 switch (req) {
378 case GET_MAX_OUTSTANDING_WR:
379 *(unsigned int *)data = FW_WR_NUM;
380 break;
381 case GET_WR_LEN:
382 *(unsigned int *)data = WR_FLITS;
383 break;
384 case GET_TX_MAX_CHUNK:
385 *(unsigned int *)data = 1 << 20; /* 1MB */
386 break;
387 case GET_TID_RANGE:
388 tid = data;
389 tid->num = t3_mc5_size(&adapter->mc5) -
390 adapter->params.mc5.nroutes -
391 adapter->params.mc5.nfilters -
392 adapter->params.mc5.nservers;
393 tid->base = 0;
394 break;
395 case GET_STID_RANGE:
396 tid = data;
397 tid->num = adapter->params.mc5.nservers;
398 tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
399 adapter->params.mc5.nfilters -
400 adapter->params.mc5.nroutes;
401 break;
402 case GET_L2T_CAPACITY:
403 *(unsigned int *)data = 2048;
404 break;
405 case GET_MTUS:
406 mtup = data;
407 mtup->size = NMTUS;
408 mtup->mtus = adapter->params.mtus;
409 break;
410 case GET_IFF_FROM_MAC:
411 iffmacp = data;
412 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
413 iffmacp->vlan_tag & EVL_VLID_MASK);
414 break;
415 case GET_DDP_PARAMS:
416 ddpp = data;
417 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
418 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
419 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
420 break;
421 case GET_PORTS:
422 ports = data;
423 ports->nports = adapter->params.nports;
424 for_each_port(adapter, port)
425 ports->lldevs[port] = adapter->port[port].ifp;
426 break;
427 case FAILOVER:
428 port = *(int *)data;
429 t3_port_failover(adapter, port);
430 failover_fixup(adapter, port);
431 break;
432 case FAILOVER_DONE:
433 port = *(int *)data;
434 t3_failover_done(adapter, port);
435 break;
436 case FAILOVER_CLEAR:
437 t3_failover_clear(adapter);
438 break;
439 case ULP_ISCSI_GET_PARAMS:
440 case ULP_ISCSI_SET_PARAMS:
441 if (!offload_running(adapter))
442 return (EAGAIN);
443 return cxgb_ulp_iscsi_ctl(adapter, req, data);
444 case RDMA_GET_PARAMS:
445 case RDMA_CQ_OP:
446 case RDMA_CQ_SETUP:
447 case RDMA_CQ_DISABLE:
448 case RDMA_CTRL_QP_SETUP:
449 case RDMA_GET_MEM:
450 if (!offload_running(adapter))
451 return (EAGAIN);
452 return cxgb_rdma_ctl(adapter, req, data);
453 default:
454 return (EOPNOTSUPP);
455 }
456 return 0;
457 }
458
459 /*
460 * Dummy handler for Rx offload packets in case we get an offload packet before
461 * proper processing is setup. This complains and drops the packet as it isn't
462 * normal to get offload packets at this stage.
463 */
464 static int
465 rx_offload_blackhole(struct toedev *dev, struct mbuf **m, int n)
466 {
467 CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data 0x%x\n",
468 n, *mtod(m[0], uint32_t *));
469 while (n--)
470 m_freem(m[n]);
471 return 0;
472 }
473
474 static void
475 dummy_neigh_update(struct toedev *dev, struct rtentry *neigh)
476 {
477 }
478
479 void
480 cxgb_set_dummy_ops(struct toedev *dev)
481 {
482 dev->recv = rx_offload_blackhole;
483 dev->neigh_update = dummy_neigh_update;
484 }
485
486 /*
487 * Free an active-open TID.
488 */
489 void *
490 cxgb_free_atid(struct toedev *tdev, int atid)
491 {
492 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
493 union active_open_entry *p = atid2entry(t, atid);
494 void *ctx = p->toe_tid.ctx;
495
496 mtx_lock(&t->atid_lock);
497 p->next = t->afree;
498 t->afree = p;
499 t->atids_in_use--;
500 mtx_lock(&t->atid_lock);
501
502 return ctx;
503 }
504
505 /*
506 * Free a server TID and return it to the free pool.
507 */
508 void
509 cxgb_free_stid(struct toedev *tdev, int stid)
510 {
511 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
512 union listen_entry *p = stid2entry(t, stid);
513
514 mtx_lock(&t->stid_lock);
515 p->next = t->sfree;
516 t->sfree = p;
517 t->stids_in_use--;
518 mtx_unlock(&t->stid_lock);
519 }
520
521 void
522 cxgb_insert_tid(struct toedev *tdev, struct cxgb_client *client,
523 void *ctx, unsigned int tid)
524 {
525 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
526
527 t->tid_tab[tid].client = client;
528 t->tid_tab[tid].ctx = ctx;
529 atomic_add_int(&t->tids_in_use, 1);
530 }
531
532 /*
533 * Populate a TID_RELEASE WR. The mbuf must be already propely sized.
534 */
535 static inline void
536 mk_tid_release(struct mbuf *m, unsigned int tid)
537 {
538 struct cpl_tid_release *req;
539
540 m_set_priority(m, CPL_PRIORITY_SETUP);
541 req = mtod(m, struct cpl_tid_release *);
542 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
543 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
544 }
545
546 static void
547 t3_process_tid_release_list(void *data, int pending)
548 {
549 struct mbuf *m;
550 struct toedev *tdev = data;
551 struct toe_data *td = TOE_DATA(tdev);
552
553 mtx_lock(&td->tid_release_lock);
554 while (td->tid_release_list) {
555 struct toe_tid_entry *p = td->tid_release_list;
556
557 td->tid_release_list = (struct toe_tid_entry *)p->ctx;
558 mtx_unlock(&td->tid_release_lock);
559 m = m_get(M_WAIT, MT_DATA);
560 mk_tid_release(m, p - td->tid_maps.tid_tab);
561 cxgb_ofld_send(tdev, m);
562 p->ctx = NULL;
563 mtx_lock(&td->tid_release_lock);
564 }
565 mtx_unlock(&td->tid_release_lock);
566 }
567
568 /* use ctx as a next pointer in the tid release list */
569 void
570 cxgb_queue_tid_release(struct toedev *tdev, unsigned int tid)
571 {
572 struct toe_data *td = TOE_DATA(tdev);
573 struct toe_tid_entry *p = &td->tid_maps.tid_tab[tid];
574
575 mtx_lock(&td->tid_release_lock);
576 p->ctx = td->tid_release_list;
577 td->tid_release_list = p;
578
579 if (!p->ctx)
580 taskqueue_enqueue(tdev->adapter->tq, &td->tid_release_task);
581
582 mtx_unlock(&td->tid_release_lock);
583 }
584
585 /*
586 * Remove a tid from the TID table. A client may defer processing its last
587 * CPL message if it is locked at the time it arrives, and while the message
588 * sits in the client's backlog the TID may be reused for another connection.
589 * To handle this we atomically switch the TID association if it still points
590 * to the original client context.
591 */
592 void
593 cxgb_remove_tid(struct toedev *tdev, void *ctx, unsigned int tid)
594 {
595 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
596
597 BUG_ON(tid >= t->ntids);
598 if (tdev->type == T3A)
599 atomic_cmpset_ptr((uintptr_t *)&t->tid_tab[tid].ctx, (long)NULL, (long)ctx);
600 else {
601 struct mbuf *m;
602
603 m = m_get(M_NOWAIT, MT_DATA);
604 if (__predict_true(m != NULL)) {
605 mk_tid_release(m, tid);
606 cxgb_ofld_send(tdev, m);
607 t->tid_tab[tid].ctx = NULL;
608 } else
609 cxgb_queue_tid_release(tdev, tid);
610 }
611 atomic_add_int(&t->tids_in_use, -1);
612 }
613
614 int
615 cxgb_alloc_atid(struct toedev *tdev, struct cxgb_client *client,
616 void *ctx)
617 {
618 int atid = -1;
619 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
620
621 mtx_lock(&t->atid_lock);
622 if (t->afree) {
623 union active_open_entry *p = t->afree;
624
625 atid = (p - t->atid_tab) + t->atid_base;
626 t->afree = p->next;
627 p->toe_tid.ctx = ctx;
628 p->toe_tid.client = client;
629 t->atids_in_use++;
630 }
631 mtx_unlock(&t->atid_lock);
632 return atid;
633 }
634
635 int
636 cxgb_alloc_stid(struct toedev *tdev, struct cxgb_client *client,
637 void *ctx)
638 {
639 int stid = -1;
640 struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
641
642 mtx_lock(&t->stid_lock);
643 if (t->sfree) {
644 union listen_entry *p = t->sfree;
645
646 stid = (p - t->stid_tab) + t->stid_base;
647 t->sfree = p->next;
648 p->toe_tid.ctx = ctx;
649 p->toe_tid.client = client;
650 t->stids_in_use++;
651 }
652 mtx_unlock(&t->stid_lock);
653 return stid;
654 }
655
656 static int
657 do_smt_write_rpl(struct toedev *dev, struct mbuf *m)
658 {
659 struct cpl_smt_write_rpl *rpl = cplhdr(m);
660
661 if (rpl->status != CPL_ERR_NONE)
662 log(LOG_ERR,
663 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
664 rpl->status, GET_TID(rpl));
665
666 return CPL_RET_BUF_DONE;
667 }
668
669 static int
670 do_l2t_write_rpl(struct toedev *dev, struct mbuf *m)
671 {
672 struct cpl_l2t_write_rpl *rpl = cplhdr(m);
673
674 if (rpl->status != CPL_ERR_NONE)
675 log(LOG_ERR,
676 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
677 rpl->status, GET_TID(rpl));
678
679 return CPL_RET_BUF_DONE;
680 }
681
682 static int
683 do_act_open_rpl(struct toedev *dev, struct mbuf *m)
684 {
685 struct cpl_act_open_rpl *rpl = cplhdr(m);
686 unsigned int atid = G_TID(ntohl(rpl->atid));
687 struct toe_tid_entry *toe_tid;
688
689 toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid);
690 if (toe_tid->ctx && toe_tid->client && toe_tid->client->handlers &&
691 toe_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
692 return toe_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, m,
693 toe_tid->ctx);
694 } else {
695 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
696 dev->name, CPL_ACT_OPEN_RPL);
697 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
698 }
699 }
700
701 static int
702 do_stid_rpl(struct toedev *dev, struct mbuf *m)
703 {
704 union opcode_tid *p = cplhdr(m);
705 unsigned int stid = G_TID(ntohl(p->opcode_tid));
706 struct toe_tid_entry *toe_tid;
707
708 toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid);
709 if (toe_tid->ctx && toe_tid->client->handlers &&
710 toe_tid->client->handlers[p->opcode]) {
711 return toe_tid->client->handlers[p->opcode] (dev, m, toe_tid->ctx);
712 } else {
713 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
714 dev->name, p->opcode);
715 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
716 }
717 }
718
719 static int
720 do_hwtid_rpl(struct toedev *dev, struct mbuf *m)
721 {
722 union opcode_tid *p = cplhdr(m);
723 unsigned int hwtid;
724 struct toe_tid_entry *toe_tid;
725
726 printf("do_hwtid_rpl m=%p\n", m);
727 return (0);
728
729
730 hwtid = G_TID(ntohl(p->opcode_tid));
731
732 toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
733 if (toe_tid->ctx && toe_tid->client->handlers &&
734 toe_tid->client->handlers[p->opcode]) {
735 return toe_tid->client->handlers[p->opcode]
736 (dev, m, toe_tid->ctx);
737 } else {
738 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
739 dev->name, p->opcode);
740 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
741 }
742 }
743
744 static int
745 do_cr(struct toedev *dev, struct mbuf *m)
746 {
747 struct cpl_pass_accept_req *req = cplhdr(m);
748 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
749 struct toe_tid_entry *toe_tid;
750
751 toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid);
752 if (toe_tid->ctx && toe_tid->client->handlers &&
753 toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
754 return toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
755 (dev, m, toe_tid->ctx);
756 } else {
757 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
758 dev->name, CPL_PASS_ACCEPT_REQ);
759 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
760 }
761 }
762
763 static int
764 do_abort_req_rss(struct toedev *dev, struct mbuf *m)
765 {
766 union opcode_tid *p = cplhdr(m);
767 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
768 struct toe_tid_entry *toe_tid;
769
770 toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
771 if (toe_tid->ctx && toe_tid->client->handlers &&
772 toe_tid->client->handlers[p->opcode]) {
773 return toe_tid->client->handlers[p->opcode]
774 (dev, m, toe_tid->ctx);
775 } else {
776 struct cpl_abort_req_rss *req = cplhdr(m);
777 struct cpl_abort_rpl *rpl;
778
779 struct mbuf *m = m_get(M_NOWAIT, MT_DATA);
780 if (!m) {
781 log(LOG_NOTICE, "do_abort_req_rss: couldn't get mbuf!\n");
782 goto out;
783 }
784
785 m_set_priority(m, CPL_PRIORITY_DATA);
786 #if 0
787 __skb_put(skb, sizeof(struct cpl_abort_rpl));
788 #endif
789 rpl = cplhdr(m);
790 rpl->wr.wr_hi =
791 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
792 rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
793 OPCODE_TID(rpl) =
794 htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
795 rpl->cmd = req->status;
796 cxgb_ofld_send(dev, m);
797 out:
798 return CPL_RET_BUF_DONE;
799 }
800 }
801
802 static int
803 do_act_establish(struct toedev *dev, struct mbuf *m)
804 {
805 struct cpl_act_establish *req = cplhdr(m);
806 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
807 struct toe_tid_entry *toe_tid;
808
809 toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid);
810 if (toe_tid->ctx && toe_tid->client->handlers &&
811 toe_tid->client->handlers[CPL_ACT_ESTABLISH]) {
812 return toe_tid->client->handlers[CPL_ACT_ESTABLISH]
813 (dev, m, toe_tid->ctx);
814 } else {
815 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
816 dev->name, CPL_PASS_ACCEPT_REQ);
817 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
818 }
819 }
820
821 static int
822 do_set_tcb_rpl(struct toedev *dev, struct mbuf *m)
823 {
824 struct cpl_set_tcb_rpl *rpl = cplhdr(m);
825
826 if (rpl->status != CPL_ERR_NONE)
827 log(LOG_ERR,
828 "Unexpected SET_TCB_RPL status %u for tid %u\n",
829 rpl->status, GET_TID(rpl));
830 return CPL_RET_BUF_DONE;
831 }
832
833 static int
834 do_trace(struct toedev *dev, struct mbuf *m)
835 {
836 #if 0
837 struct cpl_trace_pkt *p = cplhdr(m);
838
839
840 skb->protocol = 0xffff;
841 skb->dev = dev->lldev;
842 skb_pull(skb, sizeof(*p));
843 skb->mac.raw = mtod(m, (char *));
844 netif_receive_skb(skb);
845 #endif
846 return 0;
847 }
848
849 static int
850 do_term(struct toedev *dev, struct mbuf *m)
851 {
852 unsigned int hwtid = ntohl(m_get_priority(m)) >> 8 & 0xfffff;
853 unsigned int opcode = G_OPCODE(ntohl(m->m_pkthdr.csum_data));
854 struct toe_tid_entry *toe_tid;
855
856 toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
857 if (toe_tid->ctx && toe_tid->client->handlers &&
858 toe_tid->client->handlers[opcode]) {
859 return toe_tid->client->handlers[opcode](dev, m, toe_tid->ctx);
860 } else {
861 log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
862 dev->name, opcode);
863 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
864 }
865 return (0);
866 }
867
868 #if defined(FOO)
869 #include <linux/config.h>
870 #include <linux/kallsyms.h>
871 #include <linux/kprobes.h>
872 #include <net/arp.h>
873
874 static int (*orig_arp_constructor)(struct ifnet *);
875
876 static void
877 neigh_suspect(struct ifnet *neigh)
878 {
879 struct hh_cache *hh;
880
881 neigh->output = neigh->ops->output;
882
883 for (hh = neigh->hh; hh; hh = hh->hh_next)
884 hh->hh_output = neigh->ops->output;
885 }
886
887 static void
888 neigh_connect(struct ifnet *neigh)
889 {
890 struct hh_cache *hh;
891
892 neigh->output = neigh->ops->connected_output;
893
894 for (hh = neigh->hh; hh; hh = hh->hh_next)
895 hh->hh_output = neigh->ops->hh_output;
896 }
897
898 static inline int
899 neigh_max_probes(const struct neighbour *n)
900 {
901 const struct neigh_parms *p = n->parms;
902 return (n->nud_state & NUD_PROBE ?
903 p->ucast_probes :
904 p->ucast_probes + p->app_probes + p->mcast_probes);
905 }
906
907 static void
908 neigh_timer_handler_offload(unsigned long arg)
909 {
910 unsigned long now, next;
911 struct neighbour *neigh = (struct neighbour *)arg;
912 unsigned state;
913 int notify = 0;
914
915 write_lock(&neigh->lock);
916
917 state = neigh->nud_state;
918 now = jiffies;
919 next = now + HZ;
920
921 if (!(state & NUD_IN_TIMER)) {
922 #ifndef CONFIG_SMP
923 log(LOG_WARNING, "neigh: timer & !nud_in_timer\n");
924 #endif
925 goto out;
926 }
927
928 if (state & NUD_REACHABLE) {
929 if (time_before_eq(now,
930 neigh->confirmed +
931 neigh->parms->reachable_time)) {
932 next = neigh->confirmed + neigh->parms->reachable_time;
933 } else if (time_before_eq(now,
934 neigh->used +
935 neigh->parms->delay_probe_time)) {
936 neigh->nud_state = NUD_DELAY;
937 neigh->updated = jiffies;
938 neigh_suspect(neigh);
939 next = now + neigh->parms->delay_probe_time;
940 } else {
941 neigh->nud_state = NUD_STALE;
942 neigh->updated = jiffies;
943 neigh_suspect(neigh);
944 cxgb_neigh_update(neigh);
945 }
946 } else if (state & NUD_DELAY) {
947 if (time_before_eq(now,
948 neigh->confirmed +
949 neigh->parms->delay_probe_time)) {
950 neigh->nud_state = NUD_REACHABLE;
951 neigh->updated = jiffies;
952 neigh_connect(neigh);
953 cxgb_neigh_update(neigh);
954 next = neigh->confirmed + neigh->parms->reachable_time;
955 } else {
956 neigh->nud_state = NUD_PROBE;
957 neigh->updated = jiffies;
958 atomic_set_int(&neigh->probes, 0);
959 next = now + neigh->parms->retrans_time;
960 }
961 } else {
962 /* NUD_PROBE|NUD_INCOMPLETE */
963 next = now + neigh->parms->retrans_time;
964 }
965 /*
966 * Needed for read of probes
967 */
968 mb();
969 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
970 neigh->probes >= neigh_max_probes(neigh)) {
971 struct mbuf *m;
972
973 neigh->nud_state = NUD_FAILED;
974 neigh->updated = jiffies;
975 notify = 1;
976 cxgb_neigh_update(neigh);
977 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
978
979 /* It is very thin place. report_unreachable is very
980 complicated routine. Particularly, it can hit the same
981 neighbour entry!
982 So that, we try to be accurate and avoid dead loop. --ANK
983 */
984 while (neigh->nud_state == NUD_FAILED &&
985 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
986 write_unlock(&neigh->lock);
987 neigh->ops->error_report(neigh, skb);
988 write_lock(&neigh->lock);
989 }
990 skb_queue_purge(&neigh->arp_queue);
991 }
992
993 if (neigh->nud_state & NUD_IN_TIMER) {
994 if (time_before(next, jiffies + HZ/2))
995 next = jiffies + HZ/2;
996 if (!mod_timer(&neigh->timer, next))
997 neigh_hold(neigh);
998 }
999 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1000 struct mbuf *m = skb_peek(&neigh->arp_queue);
1001
1002 write_unlock(&neigh->lock);
1003 neigh->ops->solicit(neigh, skb);
1004 atomic_add_int(&neigh->probes, 1);
1005 if (m)
1006 m_free(m);
1007 } else {
1008 out:
1009 write_unlock(&neigh->lock);
1010 }
1011
1012 #ifdef CONFIG_ARPD
1013 if (notify && neigh->parms->app_probes)
1014 neigh_app_notify(neigh);
1015 #endif
1016 neigh_release(neigh);
1017 }
1018
1019 static int
1020 arp_constructor_offload(struct neighbour *neigh)
1021 {
1022 if (neigh->ifp && is_offloading(neigh->ifp))
1023 neigh->timer.function = neigh_timer_handler_offload;
1024 return orig_arp_constructor(neigh);
1025 }
1026
1027 /*
1028 * This must match exactly the signature of neigh_update for jprobes to work.
1029 * It runs from a trap handler with interrupts off so don't disable BH.
1030 */
1031 static int
1032 neigh_update_offload(struct neighbour *neigh, const u8 *lladdr,
1033 u8 new, u32 flags)
1034 {
1035 write_lock(&neigh->lock);
1036 cxgb_neigh_update(neigh);
1037 write_unlock(&neigh->lock);
1038 jprobe_return();
1039 /* NOTREACHED */
1040 return 0;
1041 }
1042
1043 static struct jprobe neigh_update_jprobe = {
1044 .entry = (kprobe_opcode_t *) neigh_update_offload,
1045 .kp.addr = (kprobe_opcode_t *) neigh_update
1046 };
1047
1048 #ifdef MODULE_SUPPORT
1049 static int
1050 prepare_arp_with_t3core(void)
1051 {
1052 int err;
1053
1054 err = register_jprobe(&neigh_update_jprobe);
1055 if (err) {
1056 log(LOG_ERR, "Could not install neigh_update jprobe, "
1057 "error %d\n", err);
1058 return err;
1059 }
1060
1061 orig_arp_constructor = arp_tbl.constructor;
1062 arp_tbl.constructor = arp_constructor_offload;
1063
1064 return 0;
1065 }
1066
1067 static void
1068 restore_arp_sans_t3core(void)
1069 {
1070 arp_tbl.constructor = orig_arp_constructor;
1071 unregister_jprobe(&neigh_update_jprobe);
1072 }
1073
1074 #else /* Module suport */
1075 static inline int
1076 prepare_arp_with_t3core(void)
1077 {
1078 return 0;
1079 }
1080
1081 static inline void
1082 restore_arp_sans_t3core(void)
1083 {}
1084 #endif
1085 #endif
1086 /*
1087 * Process a received packet with an unknown/unexpected CPL opcode.
1088 */
1089 static int
1090 do_bad_cpl(struct toedev *dev, struct mbuf *m)
1091 {
1092 log(LOG_ERR, "%s: received bad CPL command 0x%x\n", dev->name,
1093 *mtod(m, uint32_t *));
1094 return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG);
1095 }
1096
1097 /*
1098 * Handlers for each CPL opcode
1099 */
1100 static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
1101
1102 /*
1103 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
1104 * to unregister an existing handler.
1105 */
1106 void
1107 t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
1108 {
1109 if (opcode < NUM_CPL_CMDS)
1110 cpl_handlers[opcode] = h ? h : do_bad_cpl;
1111 else
1112 log(LOG_ERR, "T3C: handler registration for "
1113 "opcode %x failed\n", opcode);
1114 }
1115
1116 /*
1117 * TOEDEV's receive method.
1118 */
1119 int
1120 process_rx(struct toedev *dev, struct mbuf **m, int n)
1121 {
1122 while (n--) {
1123 struct mbuf *m0 = *m++;
1124 unsigned int opcode = G_OPCODE(ntohl(m0->m_pkthdr.csum_data));
1125 int ret = cpl_handlers[opcode] (dev, m0);
1126
1127 #if VALIDATE_TID
1128 if (ret & CPL_RET_UNKNOWN_TID) {
1129 union opcode_tid *p = cplhdr(m0);
1130
1131 log(LOG_ERR, "%s: CPL message (opcode %u) had "
1132 "unknown TID %u\n", dev->name, opcode,
1133 G_TID(ntohl(p->opcode_tid)));
1134 }
1135 #endif
1136 if (ret & CPL_RET_BUF_DONE)
1137 m_freem(m0);
1138 }
1139 return 0;
1140 }
1141
1142 /*
1143 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
1144 */
1145 int
1146 cxgb_ofld_send(struct toedev *dev, struct mbuf *m)
1147 {
1148 int r;
1149
1150 critical_enter();
1151 r = dev->send(dev, m);
1152 critical_exit();
1153 return r;
1154 }
1155
1156
1157 /**
1158 * cxgb_ofld_recv - process n received offload packets
1159 * @dev: the offload device
1160 * @m: an array of offload packets
1161 * @n: the number of offload packets
1162 *
1163 * Process an array of ingress offload packets. Each packet is forwarded
1164 * to any active network taps and then passed to the offload device's receive
1165 * method. We optimize passing packets to the receive method by passing
1166 * it the whole array at once except when there are active taps.
1167 */
1168 int
1169 cxgb_ofld_recv(struct toedev *dev, struct mbuf **m, int n)
1170 {
1171
1172 #if defined(CONFIG_CHELSIO_T3)
1173 if (likely(!netdev_nit))
1174 return dev->recv(dev, skb, n);
1175
1176 for ( ; n; n--, skb++) {
1177 skb[0]->dev = dev->lldev;
1178 dev_queue_xmit_nit(skb[0], dev->lldev);
1179 skb[0]->dev = NULL;
1180 dev->recv(dev, skb, 1);
1181 }
1182 return 0;
1183 #else
1184 return dev->recv(dev, m, n);
1185 #endif
1186 }
1187
1188 void
1189 cxgb_neigh_update(struct rtentry *rt)
1190 {
1191
1192 if (is_offloading(rt->rt_ifp)) {
1193 struct toedev *tdev = TOEDEV(rt->rt_ifp);
1194
1195 BUG_ON(!tdev);
1196 t3_l2t_update(tdev, rt);
1197 }
1198 }
1199
1200 static void
1201 set_l2t_ix(struct toedev *tdev, u32 tid, struct l2t_entry *e)
1202 {
1203 struct mbuf *m;
1204 struct cpl_set_tcb_field *req;
1205
1206 m = m_gethdr(M_NOWAIT, MT_DATA);
1207 if (!m) {
1208 log(LOG_ERR, "%s: cannot allocate mbuf!\n", __FUNCTION__);
1209 return;
1210 }
1211
1212 m_set_priority(m, CPL_PRIORITY_CONTROL);
1213 req = mtod(m, struct cpl_set_tcb_field *);
1214 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1215 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1216 req->reply = 0;
1217 req->cpu_idx = 0;
1218 req->word = htons(W_TCB_L2T_IX);
1219 req->mask = htobe64(V_TCB_L2T_IX(M_TCB_L2T_IX));
1220 req->val = htobe64(V_TCB_L2T_IX(e->idx));
1221 tdev->send(tdev, m);
1222 }
1223
1224 void
1225 cxgb_redirect(struct rtentry *old, struct rtentry *new)
1226 {
1227 struct ifnet *olddev, *newdev;
1228 struct tid_info *ti;
1229 struct toedev *tdev;
1230 u32 tid;
1231 int update_tcb;
1232 struct l2t_entry *e;
1233 struct toe_tid_entry *te;
1234
1235 olddev = old->rt_ifp;
1236 newdev = new->rt_ifp;
1237 if (!is_offloading(olddev))
1238 return;
1239 if (!is_offloading(newdev)) {
1240 log(LOG_WARNING, "%s: Redirect to non-offload"
1241 "device ignored.\n", __FUNCTION__);
1242 return;
1243 }
1244 tdev = TOEDEV(olddev);
1245 BUG_ON(!tdev);
1246 if (tdev != TOEDEV(newdev)) {
1247 log(LOG_WARNING, "%s: Redirect to different "
1248 "offload device ignored.\n", __FUNCTION__);
1249 return;
1250 }
1251
1252 /* Add new L2T entry */
1253 e = t3_l2t_get(tdev, new, ((struct port_info *)new->rt_ifp->if_softc)->port_id);
1254 if (!e) {
1255 log(LOG_ERR, "%s: couldn't allocate new l2t entry!\n",
1256 __FUNCTION__);
1257 return;
1258 }
1259
1260 /* Walk tid table and notify clients of dst change. */
1261 ti = &(TOE_DATA(tdev))->tid_maps;
1262 for (tid=0; tid < ti->ntids; tid++) {
1263 te = lookup_tid(ti, tid);
1264 BUG_ON(!te);
1265 if (te->ctx && te->client && te->client->redirect) {
1266 update_tcb = te->client->redirect(te->ctx, old, new,
1267 e);
1268 if (update_tcb) {
1269 l2t_hold(L2DATA(tdev), e);
1270 set_l2t_ix(tdev, tid, e);
1271 }
1272 }
1273 }
1274 l2t_release(L2DATA(tdev), e);
1275 }
1276
1277 /*
1278 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1279 * The allocated memory is cleared.
1280 */
1281 void *
1282 cxgb_alloc_mem(unsigned long size)
1283 {
1284
1285 return malloc(size, M_DEVBUF, M_ZERO);
1286 }
1287
1288 /*
1289 * Free memory allocated through t3_alloc_mem().
1290 */
1291 void
1292 cxgb_free_mem(void *addr)
1293 {
1294 free(addr, M_DEVBUF);
1295 }
1296
1297
1298 /*
1299 * Allocate and initialize the TID tables. Returns 0 on success.
1300 */
1301 static int
1302 init_tid_tabs(struct tid_info *t, unsigned int ntids,
1303 unsigned int natids, unsigned int nstids,
1304 unsigned int atid_base, unsigned int stid_base)
1305 {
1306 unsigned long size = ntids * sizeof(*t->tid_tab) +
1307 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1308
1309 t->tid_tab = cxgb_alloc_mem(size);
1310 if (!t->tid_tab)
1311 return (ENOMEM);
1312
1313 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1314 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1315 t->ntids = ntids;
1316 t->nstids = nstids;
1317 t->stid_base = stid_base;
1318 t->sfree = NULL;
1319 t->natids = natids;
1320 t->atid_base = atid_base;
1321 t->afree = NULL;
1322 t->stids_in_use = t->atids_in_use = 0;
1323 atomic_set_int(&t->tids_in_use, 0);
1324 mtx_init(&t->stid_lock, "stid", NULL, MTX_DEF);
1325 mtx_init(&t->atid_lock, "atid", NULL, MTX_DEF);
1326
1327 /*
1328 * Setup the free lists for stid_tab and atid_tab.
1329 */
1330 if (nstids) {
1331 while (--nstids)
1332 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1333 t->sfree = t->stid_tab;
1334 }
1335 if (natids) {
1336 while (--natids)
1337 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1338 t->afree = t->atid_tab;
1339 }
1340 return 0;
1341 }
1342
1343 static void
1344 free_tid_maps(struct tid_info *t)
1345 {
1346 cxgb_free_mem(t->tid_tab);
1347 }
1348
1349 static inline void
1350 add_adapter(adapter_t *adap)
1351 {
1352 rw_wlock(&adapter_list_lock);
1353 TAILQ_INSERT_TAIL(&adapter_list, adap, adapter_entry);
1354 rw_wunlock(&adapter_list_lock);
1355 }
1356
1357 static inline void
1358 remove_adapter(adapter_t *adap)
1359 {
1360 rw_wlock(&adapter_list_lock);
1361 TAILQ_REMOVE(&adapter_list, adap, adapter_entry);
1362 rw_wunlock(&adapter_list_lock);
1363 }
1364
1365 /*
1366 * XXX
1367 */
1368 #define t3_free_l2t(...)
1369
1370 int
1371 cxgb_offload_activate(struct adapter *adapter)
1372 {
1373 struct toedev *dev = &adapter->tdev;
1374 int natids, err;
1375 struct toe_data *t;
1376 struct tid_range stid_range, tid_range;
1377 struct mtutab mtutab;
1378 unsigned int l2t_capacity;
1379
1380 t = malloc(sizeof(*t), M_DEVBUF, M_WAITOK);
1381 if (!t)
1382 return (ENOMEM);
1383
1384 err = (EOPNOTSUPP);
1385 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1386 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1387 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1388 dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1389 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1390 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1391 goto out_free;
1392
1393 err = (ENOMEM);
1394 L2DATA(dev) = t3_init_l2t(l2t_capacity);
1395 if (!L2DATA(dev))
1396 goto out_free;
1397
1398 natids = min(tid_range.num / 2, MAX_ATIDS);
1399 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1400 stid_range.num, ATID_BASE, stid_range.base);
1401 if (err)
1402 goto out_free_l2t;
1403
1404 t->mtus = mtutab.mtus;
1405 t->nmtus = mtutab.size;
1406
1407 TASK_INIT(&t->tid_release_task, 0 /* XXX? */, t3_process_tid_release_list, dev);
1408 mtx_init(&t->tid_release_lock, "tid release", NULL, MTX_DEF);
1409 t->dev = dev;
1410
1411 TOE_DATA(dev) = t;
1412 dev->recv = process_rx;
1413 dev->neigh_update = t3_l2t_update;
1414 #if 0
1415 offload_proc_dev_setup(dev);
1416 #endif
1417 /* Register netevent handler once */
1418 if (TAILQ_EMPTY(&adapter_list)) {
1419 #if defined(CONFIG_CHELSIO_T3_MODULE)
1420 if (prepare_arp_with_t3core())
1421 log(LOG_ERR, "Unable to set offload capabilities\n");
1422 #endif
1423 }
1424 add_adapter(adapter);
1425 return 0;
1426
1427 out_free_l2t:
1428 t3_free_l2t(L2DATA(dev));
1429 L2DATA(dev) = NULL;
1430 out_free:
1431 free(t, M_DEVBUF);
1432 return err;
1433
1434 }
1435
1436 void
1437 cxgb_offload_deactivate(struct adapter *adapter)
1438 {
1439 struct toedev *tdev = &adapter->tdev;
1440 struct toe_data *t = TOE_DATA(tdev);
1441
1442 remove_adapter(adapter);
1443 if (TAILQ_EMPTY(&adapter_list)) {
1444 #if defined(CONFIG_CHELSIO_T3_MODULE)
1445 restore_arp_sans_t3core();
1446 #endif
1447 }
1448 free_tid_maps(&t->tid_maps);
1449 TOE_DATA(tdev) = NULL;
1450 t3_free_l2t(L2DATA(tdev));
1451 L2DATA(tdev) = NULL;
1452 free(t, M_DEVBUF);
1453 }
1454
1455
1456 static inline void
1457 register_tdev(struct toedev *tdev)
1458 {
1459 static int unit;
1460
1461 mtx_lock(&cxgb_db_lock);
1462 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1463 TAILQ_INSERT_TAIL(&ofld_dev_list, tdev, ofld_entry);
1464 mtx_unlock(&cxgb_db_lock);
1465 }
1466
1467 static inline void
1468 unregister_tdev(struct toedev *tdev)
1469 {
1470 mtx_lock(&cxgb_db_lock);
1471 TAILQ_REMOVE(&ofld_dev_list, tdev, ofld_entry);
1472 mtx_unlock(&cxgb_db_lock);
1473 }
1474
1475 void
1476 cxgb_adapter_ofld(struct adapter *adapter)
1477 {
1478 struct toedev *tdev = &adapter->tdev;
1479
1480 cxgb_set_dummy_ops(tdev);
1481 tdev->send = t3_offload_tx;
1482 tdev->ctl = cxgb_offload_ctl;
1483 tdev->type = adapter->params.rev == 0 ?
1484 T3A : T3B;
1485
1486 register_tdev(tdev);
1487 #if 0
1488 offload_proc_dev_init(tdev);
1489 #endif
1490 }
1491
1492 void
1493 cxgb_adapter_unofld(struct adapter *adapter)
1494 {
1495 struct toedev *tdev = &adapter->tdev;
1496 #if 0
1497 offload_proc_dev_cleanup(tdev);
1498 offload_proc_dev_exit(tdev);
1499 #endif
1500 tdev->recv = NULL;
1501 tdev->neigh_update = NULL;
1502
1503 unregister_tdev(tdev);
1504 }
1505
1506 void
1507 cxgb_offload_init(void)
1508 {
1509 int i;
1510
1511 if (inited)
1512 return;
1513 else
1514 inited = 1;
1515
1516 mtx_init(&cxgb_db_lock, "ofld db", NULL, MTX_DEF);
1517 rw_init(&adapter_list_lock, "ofld adap list");
1518 TAILQ_INIT(&client_list);
1519 TAILQ_INIT(&ofld_dev_list);
1520 TAILQ_INIT(&adapter_list);
1521
1522 for (i = 0; i < NUM_CPL_CMDS; ++i)
1523 cpl_handlers[i] = do_bad_cpl;
1524
1525 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1526 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1527 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1528 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1529 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1530 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1531 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1532 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1533 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1534 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1535 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1536 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1537 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1538 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1539 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1540 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1541 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1542 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
1543 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1544 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1545 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1546 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1547 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1548 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1549 #if 0
1550 if (offload_proc_init())
1551 log(LOG_WARNING, "Unable to create /proc/net/cxgb3 dir\n");
1552 #endif
1553 }
1554
1555 void
1556 cxgb_offload_exit(void)
1557 {
1558 static int deinited = 0;
1559
1560 if (deinited)
1561 return;
1562
1563 deinited = 1;
1564 mtx_destroy(&cxgb_db_lock);
1565 rw_destroy(&adapter_list_lock);
1566 #if 0
1567 offload_proc_cleanup();
1568 #endif
1569 }
1570
1571 #if 0
1572 static int
1573 offload_info_read_proc(char *buf, char **start, off_t offset,
1574 int length, int *eof, void *data)
1575 {
1576 struct toe_data *d = data;
1577 struct tid_info *t = &d->tid_maps;
1578 int len;
1579
1580 len = sprintf(buf, "TID range: 0..%d, in use: %u\n"
1581 "STID range: %d..%d, in use: %u\n"
1582 "ATID range: %d..%d, in use: %u\n"
1583 "MSS: %u\n",
1584 t->ntids - 1, atomic_read(&t->tids_in_use), t->stid_base,
1585 t->stid_base + t->nstids - 1, t->stids_in_use,
1586 t->atid_base, t->atid_base + t->natids - 1,
1587 t->atids_in_use, d->tx_max_chunk);
1588 if (len > length)
1589 len = length;
1590 *eof = 1;
1591 return len;
1592 }
1593
1594 static int
1595 offload_info_proc_setup(struct proc_dir_entry *dir,
1596 struct toe_data *d)
1597 {
1598 struct proc_dir_entry *p;
1599
1600 if (!dir)
1601 return (EINVAL);
1602
1603 p = create_proc_read_entry("info", 0, dir, offload_info_read_proc, d);
1604 if (!p)
1605 return (ENOMEM);
1606
1607 p->owner = THIS_MODULE;
1608 return 0;
1609 }
1610
1611
1612 static int
1613 offload_devices_read_proc(char *buf, char **start, off_t offset,
1614 int length, int *eof, void *data)
1615 {
1616 int len;
1617 struct toedev *dev;
1618 struct net_device *ndev;
1619
1620 len = sprintf(buf, "Device Interfaces\n");
1621
1622 mtx_lock(&cxgb_db_lock);
1623 TAILQ_FOREACH(dev, &ofld_dev_list, ofld_entry) {
1624 len += sprintf(buf + len, "%-16s", dev->name);
1625 read_lock(&dev_base_lock);
1626 for (ndev = dev_base; ndev; ndev = ndev->next) {
1627 if (TOEDEV(ndev) == dev)
1628 len += sprintf(buf + len, " %s", ndev->name);
1629 }
1630 read_unlock(&dev_base_lock);
1631 len += sprintf(buf + len, "\n");
1632 if (len >= length)
1633 break;
1634 }
1635 mtx_unlock(&cxgb_db_lock);
1636
1637 if (len > length)
1638 len = length;
1639 *eof = 1;
1640 return len;
1641 }
1642
1643 #endif
1644
Cache object: 27bbd3088de2cb8bcddc663aecfb8a5e
|