1 /**************************************************************************
2
3 Copyright (c) 2007-2008, Chelsio Inc.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27
28
29 ***************************************************************************/
30
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/pciio.h>
41 #include <sys/conf.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <sys/bus_dma.h>
45 #include <sys/rman.h>
46 #include <sys/ioccom.h>
47 #include <sys/mbuf.h>
48 #include <sys/linker.h>
49 #include <sys/firmware.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/queue.h>
56 #include <sys/taskqueue.h>
57 #include <sys/proc.h>
58
59 #ifdef CONFIG_DEFINED
60 #include <cxgb_include.h>
61 #else
62 #include <dev/cxgb/cxgb_include.h>
63 #endif
64
65 #include <net/route.h>
66
67 #define VALIDATE_TID 0
68 MALLOC_DEFINE(M_CXGB, "cxgb", "Chelsio 10 Gigabit Ethernet and services");
69
70 TAILQ_HEAD(, cxgb_client) client_list;
71 TAILQ_HEAD(, t3cdev) ofld_dev_list;
72
73
74 static struct mtx cxgb_db_lock;
75
76
77 static int inited = 0;
78
79 static inline int
80 offload_activated(struct t3cdev *tdev)
81 {
82 struct adapter *adapter = tdev2adap(tdev);
83
84 return (isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT));
85 }
86
87 static inline void
88 register_tdev(struct t3cdev *tdev)
89 {
90 static int unit;
91
92 mtx_lock(&cxgb_db_lock);
93 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
94 TAILQ_INSERT_TAIL(&ofld_dev_list, tdev, entry);
95 mtx_unlock(&cxgb_db_lock);
96 }
97
98 static inline void
99 unregister_tdev(struct t3cdev *tdev)
100 {
101 mtx_lock(&cxgb_db_lock);
102 TAILQ_REMOVE(&ofld_dev_list, tdev, entry);
103 mtx_unlock(&cxgb_db_lock);
104 }
105
106 #ifndef TCP_OFFLOAD_DISABLE
107 /**
108 * cxgb_register_client - register an offload client
109 * @client: the client
110 *
111 * Add the client to the client list,
112 * and call backs the client for each activated offload device
113 */
114 void
115 cxgb_register_client(struct cxgb_client *client)
116 {
117 struct t3cdev *tdev;
118
119 mtx_lock(&cxgb_db_lock);
120 TAILQ_INSERT_TAIL(&client_list, client, client_entry);
121
122 if (client->add) {
123 TAILQ_FOREACH(tdev, &ofld_dev_list, entry) {
124 if (offload_activated(tdev)) {
125 client->add(tdev);
126 } else
127 CTR1(KTR_CXGB,
128 "cxgb_register_client: %p not activated", tdev);
129
130 }
131 }
132 mtx_unlock(&cxgb_db_lock);
133 }
134
135 /**
136 * cxgb_unregister_client - unregister an offload client
137 * @client: the client
138 *
139 * Remove the client to the client list,
140 * and call backs the client for each activated offload device.
141 */
142 void
143 cxgb_unregister_client(struct cxgb_client *client)
144 {
145 struct t3cdev *tdev;
146
147 mtx_lock(&cxgb_db_lock);
148 TAILQ_REMOVE(&client_list, client, client_entry);
149
150 if (client->remove) {
151 TAILQ_FOREACH(tdev, &ofld_dev_list, entry) {
152 if (offload_activated(tdev))
153 client->remove(tdev);
154 }
155 }
156 mtx_unlock(&cxgb_db_lock);
157 }
158
159 /**
160 * cxgb_add_clients - activate register clients for an offload device
161 * @tdev: the offload device
162 *
163 * Call backs all registered clients once a offload device is activated
164 */
165 void
166 cxgb_add_clients(struct t3cdev *tdev)
167 {
168 struct cxgb_client *client;
169
170 mtx_lock(&cxgb_db_lock);
171 TAILQ_FOREACH(client, &client_list, client_entry) {
172 if (client->add)
173 client->add(tdev);
174 }
175 mtx_unlock(&cxgb_db_lock);
176 }
177
178 /**
179 * cxgb_remove_clients - activate register clients for an offload device
180 * @tdev: the offload device
181 *
182 * Call backs all registered clients once a offload device is deactivated
183 */
184 void
185 cxgb_remove_clients(struct t3cdev *tdev)
186 {
187 struct cxgb_client *client;
188
189 mtx_lock(&cxgb_db_lock);
190 TAILQ_FOREACH(client, &client_list, client_entry) {
191 if (client->remove)
192 client->remove(tdev);
193 }
194 mtx_unlock(&cxgb_db_lock);
195 }
196 #endif
197
198 /**
199 * cxgb_ofld_recv - process n received offload packets
200 * @dev: the offload device
201 * @m: an array of offload packets
202 * @n: the number of offload packets
203 *
204 * Process an array of ingress offload packets. Each packet is forwarded
205 * to any active network taps and then passed to the offload device's receive
206 * method. We optimize passing packets to the receive method by passing
207 * it the whole array at once except when there are active taps.
208 */
209 int
210 cxgb_ofld_recv(struct t3cdev *dev, struct mbuf **m, int n)
211 {
212
213 return dev->recv(dev, m, n);
214 }
215
216 /*
217 * Dummy handler for Rx offload packets in case we get an offload packet before
218 * proper processing is setup. This complains and drops the packet as it isn't
219 * normal to get offload packets at this stage.
220 */
221 static int
222 rx_offload_blackhole(struct t3cdev *dev, struct mbuf **m, int n)
223 {
224 while (n--)
225 m_freem(m[n]);
226 return 0;
227 }
228
229 static void
230 dummy_neigh_update(struct t3cdev *dev, struct rtentry *neigh, uint8_t *enaddr,
231 struct sockaddr *sa)
232 {
233 }
234
235 void
236 cxgb_set_dummy_ops(struct t3cdev *dev)
237 {
238 dev->recv = rx_offload_blackhole;
239 dev->arp_update = dummy_neigh_update;
240 }
241
242 static int
243 do_smt_write_rpl(struct t3cdev *dev, struct mbuf *m)
244 {
245 struct cpl_smt_write_rpl *rpl = cplhdr(m);
246
247 if (rpl->status != CPL_ERR_NONE)
248 log(LOG_ERR,
249 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
250 rpl->status, GET_TID(rpl));
251
252 return CPL_RET_BUF_DONE;
253 }
254
255 static int
256 do_l2t_write_rpl(struct t3cdev *dev, struct mbuf *m)
257 {
258 struct cpl_l2t_write_rpl *rpl = cplhdr(m);
259
260 if (rpl->status != CPL_ERR_NONE)
261 log(LOG_ERR,
262 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
263 rpl->status, GET_TID(rpl));
264
265 return CPL_RET_BUF_DONE;
266 }
267
268 static int
269 do_rte_write_rpl(struct t3cdev *dev, struct mbuf *m)
270 {
271 struct cpl_rte_write_rpl *rpl = cplhdr(m);
272
273 if (rpl->status != CPL_ERR_NONE)
274 log(LOG_ERR,
275 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
276 rpl->status, GET_TID(rpl));
277
278 return CPL_RET_BUF_DONE;
279 }
280
281 static int
282 do_set_tcb_rpl(struct t3cdev *dev, struct mbuf *m)
283 {
284 struct cpl_set_tcb_rpl *rpl = cplhdr(m);
285
286 if (rpl->status != CPL_ERR_NONE)
287 log(LOG_ERR,
288 "Unexpected SET_TCB_RPL status %u for tid %u\n",
289 rpl->status, GET_TID(rpl));
290 return CPL_RET_BUF_DONE;
291 }
292
293 static int
294 do_trace(struct t3cdev *dev, struct mbuf *m)
295 {
296 #if 0
297 struct cpl_trace_pkt *p = cplhdr(m);
298
299
300 skb->protocol = 0xffff;
301 skb->dev = dev->lldev;
302 skb_pull(skb, sizeof(*p));
303 skb->mac.raw = mtod(m, (char *));
304 netif_receive_skb(skb);
305 #endif
306 return 0;
307 }
308
309 /*
310 * Process a received packet with an unknown/unexpected CPL opcode.
311 */
312 static int
313 do_bad_cpl(struct t3cdev *dev, struct mbuf *m)
314 {
315 log(LOG_ERR, "%s: received bad CPL command 0x%x\n", dev->name,
316 0xFF & *mtod(m, uint32_t *));
317 return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG);
318 }
319
320 /*
321 * Handlers for each CPL opcode
322 */
323 static cpl_handler_func cpl_handlers[256];
324
325 /*
326 * T3CDEV's receive method.
327 */
328 int
329 process_rx(struct t3cdev *dev, struct mbuf **m, int n)
330 {
331 while (n--) {
332 struct mbuf *m0 = *m++;
333 unsigned int opcode = G_OPCODE(ntohl(m0->m_pkthdr.csum_data));
334 int ret;
335
336 DPRINTF("processing op=0x%x m=%p data=%p\n", opcode, m0, m0->m_data);
337
338 ret = cpl_handlers[opcode] (dev, m0);
339
340 #if VALIDATE_TID
341 if (ret & CPL_RET_UNKNOWN_TID) {
342 union opcode_tid *p = cplhdr(m0);
343
344 log(LOG_ERR, "%s: CPL message (opcode %u) had "
345 "unknown TID %u\n", dev->name, opcode,
346 G_TID(ntohl(p->opcode_tid)));
347 }
348 #endif
349 if (ret & CPL_RET_BUF_DONE)
350 m_freem(m0);
351 }
352 return 0;
353 }
354
355 /*
356 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
357 * to unregister an existing handler.
358 */
359 void
360 t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
361 {
362 if (opcode < NUM_CPL_CMDS)
363 cpl_handlers[opcode] = h ? h : do_bad_cpl;
364 else
365 log(LOG_ERR, "T3C: handler registration for "
366 "opcode %x failed\n", opcode);
367 }
368
369 /*
370 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
371 * The allocated memory is cleared.
372 */
373 void *
374 cxgb_alloc_mem(unsigned long size)
375 {
376
377 return malloc(size, M_CXGB, M_ZERO|M_NOWAIT);
378 }
379
380 /*
381 * Free memory allocated through t3_alloc_mem().
382 */
383 void
384 cxgb_free_mem(void *addr)
385 {
386 free(addr, M_CXGB);
387 }
388
389 static __inline int
390 adap2type(struct adapter *adapter)
391 {
392 int type = 0;
393
394 switch (adapter->params.rev) {
395 case T3_REV_A:
396 type = T3A;
397 break;
398 case T3_REV_B:
399 case T3_REV_B2:
400 type = T3B;
401 break;
402 case T3_REV_C:
403 type = T3C;
404 break;
405 }
406 return type;
407 }
408
409 void
410 cxgb_adapter_ofld(struct adapter *adapter)
411 {
412 struct t3cdev *tdev = &adapter->tdev;
413
414 cxgb_set_dummy_ops(tdev);
415 tdev->type = adap2type(adapter);
416 tdev->adapter = adapter;
417 register_tdev(tdev);
418
419 }
420
421 void
422 cxgb_adapter_unofld(struct adapter *adapter)
423 {
424 struct t3cdev *tdev = &adapter->tdev;
425
426 tdev->recv = NULL;
427 tdev->arp_update = NULL;
428 unregister_tdev(tdev);
429 }
430
431 void
432 cxgb_offload_init(void)
433 {
434 int i;
435
436 if (inited++)
437 return;
438
439 mtx_init(&cxgb_db_lock, "ofld db", NULL, MTX_DEF);
440
441 TAILQ_INIT(&client_list);
442 TAILQ_INIT(&ofld_dev_list);
443
444 for (i = 0; i < 0x100; ++i)
445 cpl_handlers[i] = do_bad_cpl;
446
447 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
448 t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
449 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
450
451 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
452 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
453
454 }
455
456 void
457 cxgb_offload_exit(void)
458 {
459
460 if (--inited)
461 return;
462
463 mtx_destroy(&cxgb_db_lock);
464 }
465
466 MODULE_VERSION(if_cxgb, 1);
Cache object: ba588a64477d071ce15e81462355f32d
|