1 /*
2 * Copyright (c) 2018-2019 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "qlnxr_def.h"
32 #include "rdma_common.h"
33 #include "qlnxr_cm.h"
34
35 void
36 qlnxr_inc_sw_gsi_cons(struct qlnxr_qp_hwq_info *info)
37 {
38 info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
39 }
40
41 void
42 qlnxr_store_gsi_qp_cq(struct qlnxr_dev *dev,
43 struct qlnxr_qp *qp,
44 struct ib_qp_init_attr *attrs)
45 {
46 QL_DPRINT12(dev->ha, "enter\n");
47
48 dev->gsi_qp_created = 1;
49 dev->gsi_sqcq = get_qlnxr_cq((attrs->send_cq));
50 dev->gsi_rqcq = get_qlnxr_cq((attrs->recv_cq));
51 dev->gsi_qp = qp;
52
53 QL_DPRINT12(dev->ha, "exit\n");
54
55 return;
56 }
57
58 void
59 qlnxr_ll2_complete_tx_packet(void *cxt,
60 uint8_t connection_handle,
61 void *cookie,
62 dma_addr_t first_frag_addr,
63 bool b_last_fragment,
64 bool b_last_packet)
65 {
66 struct qlnxr_dev *dev = (struct qlnxr_dev *)cxt;
67 struct ecore_roce_ll2_packet *pkt = cookie;
68 struct qlnxr_cq *cq = dev->gsi_sqcq;
69 struct qlnxr_qp *qp = dev->gsi_qp;
70 unsigned long flags;
71
72 QL_DPRINT12(dev->ha, "enter\n");
73
74 qlnx_dma_free_coherent(&dev->ha->cdev, pkt->header.vaddr,
75 pkt->header.baddr, pkt->header.len);
76 kfree(pkt);
77
78 spin_lock_irqsave(&qp->q_lock, flags);
79
80 qlnxr_inc_sw_gsi_cons(&qp->sq);
81
82 spin_unlock_irqrestore(&qp->q_lock, flags);
83
84 if (cq->ibcq.comp_handler)
85 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
86
87 QL_DPRINT12(dev->ha, "exit\n");
88
89 return;
90 }
91
92 void
93 qlnxr_ll2_complete_rx_packet(void *cxt,
94 struct ecore_ll2_comp_rx_data *data)
95 {
96 struct qlnxr_dev *dev = (struct qlnxr_dev *)cxt;
97 struct qlnxr_cq *cq = dev->gsi_rqcq;
98 // struct qlnxr_qp *qp = dev->gsi_qp;
99 struct qlnxr_qp *qp = NULL;
100 unsigned long flags;
101 // uint32_t delay_count = 0, gsi_cons = 0;
102 //void * dest_va;
103
104 QL_DPRINT12(dev->ha, "enter\n");
105
106 if (data->u.data_length_error) {
107 /* TODO: add statistic */
108 }
109
110 if (data->cookie == NULL) {
111 QL_DPRINT12(dev->ha, "cookie is NULL, bad sign\n");
112 }
113
114 if (data->qp_id == 1) {
115 qp = dev->gsi_qp;
116 } else {
117 /* TODO: This will be needed for UD QP support */
118 /* For RoCEv1 this is invalid */
119 QL_DPRINT12(dev->ha, "invalid QP\n");
120 return;
121 }
122 /* note: currently only one recv sg is supported */
123 QL_DPRINT12(dev->ha, "MAD received on QP : %x\n", data->rx_buf_addr);
124
125 spin_lock_irqsave(&qp->q_lock, flags);
126
127 qp->rqe_wr_id[qp->rq.gsi_cons].rc =
128 data->u.data_length_error ? -EINVAL : 0;
129 qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
130 /* note: length stands for data length i.e. GRH is excluded */
131 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
132 data->length.data_length;
133 *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
134 ntohl(data->opaque_data_0);
135 *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
136 ntohs((u16)data->opaque_data_1);
137
138 qlnxr_inc_sw_gsi_cons(&qp->rq);
139
140 spin_unlock_irqrestore(&qp->q_lock, flags);
141
142 if (cq->ibcq.comp_handler)
143 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
144
145 QL_DPRINT12(dev->ha, "exit\n");
146
147 return;
148 }
149
150 void qlnxr_ll2_release_rx_packet(void *cxt,
151 u8 connection_handle,
152 void *cookie,
153 dma_addr_t rx_buf_addr,
154 bool b_last_packet)
155 {
156 /* Do nothing... */
157 }
158
159 static void
160 qlnxr_destroy_gsi_cq(struct qlnxr_dev *dev,
161 struct ib_qp_init_attr *attrs)
162 {
163 struct ecore_rdma_destroy_cq_in_params iparams;
164 struct ecore_rdma_destroy_cq_out_params oparams;
165 struct qlnxr_cq *cq;
166
167 QL_DPRINT12(dev->ha, "enter\n");
168
169 cq = get_qlnxr_cq((attrs->send_cq));
170 iparams.icid = cq->icid;
171 ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
172 ecore_chain_free(&dev->ha->cdev, &cq->pbl);
173
174 cq = get_qlnxr_cq((attrs->recv_cq));
175 /* if a dedicated recv_cq was used, delete it too */
176 if (iparams.icid != cq->icid) {
177 iparams.icid = cq->icid;
178 ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
179 ecore_chain_free(&dev->ha->cdev, &cq->pbl);
180 }
181
182 QL_DPRINT12(dev->ha, "exit\n");
183
184 return;
185 }
186
187 static inline int
188 qlnxr_check_gsi_qp_attrs(struct qlnxr_dev *dev,
189 struct ib_qp_init_attr *attrs)
190 {
191 QL_DPRINT12(dev->ha, "enter\n");
192
193 if (attrs->cap.max_recv_sge > QLNXR_GSI_MAX_RECV_SGE) {
194 QL_DPRINT11(dev->ha,
195 "(attrs->cap.max_recv_sge > QLNXR_GSI_MAX_RECV_SGE)\n");
196 return -EINVAL;
197 }
198
199 if (attrs->cap.max_recv_wr > QLNXR_GSI_MAX_RECV_WR) {
200 QL_DPRINT11(dev->ha,
201 "(attrs->cap.max_recv_wr > QLNXR_GSI_MAX_RECV_WR)\n");
202 return -EINVAL;
203 }
204
205 if (attrs->cap.max_send_wr > QLNXR_GSI_MAX_SEND_WR) {
206 QL_DPRINT11(dev->ha,
207 "(attrs->cap.max_send_wr > QLNXR_GSI_MAX_SEND_WR)\n");
208 return -EINVAL;
209 }
210
211 QL_DPRINT12(dev->ha, "exit\n");
212
213 return 0;
214 }
215
216 static int
217 qlnxr_ll2_post_tx(struct qlnxr_dev *dev, struct ecore_roce_ll2_packet *pkt)
218 {
219 enum ecore_ll2_roce_flavor_type roce_flavor;
220 struct ecore_ll2_tx_pkt_info ll2_tx_pkt;
221 int rc;
222 int i;
223
224 QL_DPRINT12(dev->ha, "enter\n");
225
226 memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
227
228 if (pkt->roce_mode != ROCE_V1) {
229 QL_DPRINT11(dev->ha, "roce_mode != ROCE_V1\n");
230 return (-1);
231 }
232
233 roce_flavor = (pkt->roce_mode == ROCE_V1) ?
234 ECORE_LL2_ROCE : ECORE_LL2_RROCE;
235
236 ll2_tx_pkt.num_of_bds = 1 /* hdr */ + pkt->n_seg;
237 ll2_tx_pkt.vlan = 0; /* ??? */
238 ll2_tx_pkt.tx_dest = ECORE_LL2_TX_DEST_NW;
239 ll2_tx_pkt.ecore_roce_flavor = roce_flavor;
240 ll2_tx_pkt.first_frag = pkt->header.baddr;
241 ll2_tx_pkt.first_frag_len = pkt->header.len;
242 ll2_tx_pkt.cookie = pkt;
243 ll2_tx_pkt.enable_ip_cksum = 1; // Only for RoCEv2:IPv4
244
245 /* tx header */
246 rc = ecore_ll2_prepare_tx_packet(dev->rdma_ctx,
247 dev->gsi_ll2_handle,
248 &ll2_tx_pkt,
249 1);
250 if (rc) {
251 QL_DPRINT11(dev->ha, "ecore_ll2_prepare_tx_packet failed\n");
252
253 /* TX failed while posting header - release resources*/
254 qlnx_dma_free_coherent(&dev->ha->cdev,
255 pkt->header.vaddr,
256 pkt->header.baddr,
257 pkt->header.len);
258
259 kfree(pkt);
260
261 return rc;
262 }
263
264 /* tx payload */
265 for (i = 0; i < pkt->n_seg; i++) {
266 rc = ecore_ll2_set_fragment_of_tx_packet(dev->rdma_ctx,
267 dev->gsi_ll2_handle,
268 pkt->payload[i].baddr,
269 pkt->payload[i].len);
270 if (rc) {
271 /* if failed not much to do here, partial packet has
272 * been posted we can't free memory, will need to wait
273 * for completion
274 */
275 QL_DPRINT11(dev->ha,
276 "ecore_ll2_set_fragment_of_tx_packet failed\n");
277 return rc;
278 }
279 }
280 struct ecore_ll2_stats stats = {0};
281 rc = ecore_ll2_get_stats(dev->rdma_ctx, dev->gsi_ll2_handle, &stats);
282 if (rc) {
283 QL_DPRINT11(dev->ha, "failed to obtain ll2 stats\n");
284 }
285 QL_DPRINT12(dev->ha, "exit\n");
286
287 return 0;
288 }
289
290 int
291 qlnxr_ll2_stop(struct qlnxr_dev *dev)
292 {
293 int rc;
294
295 QL_DPRINT12(dev->ha, "enter\n");
296
297 if (dev->gsi_ll2_handle == 0xFF)
298 return 0;
299
300 /* remove LL2 MAC address filter */
301 rc = qlnx_rdma_ll2_set_mac_filter(dev->rdma_ctx,
302 dev->gsi_ll2_mac_address, NULL);
303
304 rc = ecore_ll2_terminate_connection(dev->rdma_ctx,
305 dev->gsi_ll2_handle);
306
307 ecore_ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
308
309 dev->gsi_ll2_handle = 0xFF;
310
311 QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
312 return rc;
313 }
314
315 int qlnxr_ll2_start(struct qlnxr_dev *dev,
316 struct ib_qp_init_attr *attrs,
317 struct qlnxr_qp *qp)
318 {
319 struct ecore_ll2_acquire_data data;
320 struct ecore_ll2_cbs cbs;
321 int rc;
322
323 QL_DPRINT12(dev->ha, "enter\n");
324
325 /* configure and start LL2 */
326 cbs.rx_comp_cb = qlnxr_ll2_complete_rx_packet;
327 cbs.tx_comp_cb = qlnxr_ll2_complete_tx_packet;
328 cbs.rx_release_cb = qlnxr_ll2_release_rx_packet;
329 cbs.tx_release_cb = qlnxr_ll2_complete_tx_packet;
330 cbs.cookie = dev;
331 dev->gsi_ll2_handle = 0xFF;
332
333 memset(&data, 0, sizeof(data));
334 data.input.conn_type = ECORE_LL2_TYPE_ROCE;
335 data.input.mtu = dev->ha->ifp->if_mtu;
336 data.input.rx_num_desc = 8 * 1024;
337 data.input.rx_drop_ttl0_flg = 1;
338 data.input.rx_vlan_removal_en = 0;
339 data.input.tx_num_desc = 8 * 1024;
340 data.input.tx_tc = 0;
341 data.input.tx_dest = ECORE_LL2_TX_DEST_NW;
342 data.input.ai_err_packet_too_big = ECORE_LL2_DROP_PACKET;
343 data.input.ai_err_no_buf = ECORE_LL2_DROP_PACKET;
344 data.input.gsi_enable = 1;
345 data.p_connection_handle = &dev->gsi_ll2_handle;
346 data.cbs = &cbs;
347
348 rc = ecore_ll2_acquire_connection(dev->rdma_ctx, &data);
349
350 if (rc) {
351 QL_DPRINT11(dev->ha,
352 "ecore_ll2_acquire_connection failed: %d\n",
353 rc);
354 return rc;
355 }
356
357 QL_DPRINT11(dev->ha,
358 "ll2 connection acquired successfully\n");
359 rc = ecore_ll2_establish_connection(dev->rdma_ctx,
360 dev->gsi_ll2_handle);
361
362 if (rc) {
363 QL_DPRINT11(dev->ha,
364 "ecore_ll2_establish_connection failed\n", rc);
365 goto err1;
366 }
367
368 QL_DPRINT11(dev->ha,
369 "ll2 connection established successfully\n");
370 rc = qlnx_rdma_ll2_set_mac_filter(dev->rdma_ctx, NULL,
371 dev->ha->primary_mac);
372 if (rc) {
373 QL_DPRINT11(dev->ha, "qlnx_rdma_ll2_set_mac_filter failed\n", rc);
374 goto err2;
375 }
376
377 QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
378 return 0;
379
380 err2:
381 ecore_ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
382 err1:
383 ecore_ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
384
385 QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
386 return rc;
387 }
388
389 struct ib_qp*
390 qlnxr_create_gsi_qp(struct qlnxr_dev *dev,
391 struct ib_qp_init_attr *attrs,
392 struct qlnxr_qp *qp)
393 {
394 int rc;
395
396 QL_DPRINT12(dev->ha, "enter\n");
397
398 rc = qlnxr_check_gsi_qp_attrs(dev, attrs);
399
400 if (rc) {
401 QL_DPRINT11(dev->ha, "qlnxr_check_gsi_qp_attrs failed\n");
402 return ERR_PTR(rc);
403 }
404
405 rc = qlnxr_ll2_start(dev, attrs, qp);
406 if (rc) {
407 QL_DPRINT11(dev->ha, "qlnxr_ll2_start failed\n");
408 return ERR_PTR(rc);
409 }
410
411 /* create QP */
412 qp->ibqp.qp_num = 1;
413 qp->rq.max_wr = attrs->cap.max_recv_wr;
414 qp->sq.max_wr = attrs->cap.max_send_wr;
415
416 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
417 GFP_KERNEL);
418 if (!qp->rqe_wr_id) {
419 QL_DPRINT11(dev->ha, "(!qp->rqe_wr_id)\n");
420 goto err;
421 }
422
423 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
424 GFP_KERNEL);
425 if (!qp->wqe_wr_id) {
426 QL_DPRINT11(dev->ha, "(!qp->wqe_wr_id)\n");
427 goto err;
428 }
429
430 qlnxr_store_gsi_qp_cq(dev, qp, attrs);
431 memcpy(dev->gsi_ll2_mac_address, dev->ha->primary_mac, ETH_ALEN);
432
433 /* the GSI CQ is handled by the driver so remove it from the FW */
434 qlnxr_destroy_gsi_cq(dev, attrs);
435 dev->gsi_rqcq->cq_type = QLNXR_CQ_TYPE_GSI;
436 dev->gsi_rqcq->cq_type = QLNXR_CQ_TYPE_GSI;
437
438 QL_DPRINT12(dev->ha, "exit &qp->ibqp = %p\n", &qp->ibqp);
439
440 return &qp->ibqp;
441 err:
442 kfree(qp->rqe_wr_id);
443
444 rc = qlnxr_ll2_stop(dev);
445
446 QL_DPRINT12(dev->ha, "exit with error\n");
447
448 return ERR_PTR(-ENOMEM);
449 }
450
451 int
452 qlnxr_destroy_gsi_qp(struct qlnxr_dev *dev)
453 {
454 int rc = 0;
455
456 QL_DPRINT12(dev->ha, "enter\n");
457
458 rc = qlnxr_ll2_stop(dev);
459
460 QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
461 return (rc);
462 }
463
464 static inline bool
465 qlnxr_get_vlan_id_gsi(struct ib_ah_attr *ah_attr, u16 *vlan_id)
466 {
467 u16 tmp_vlan_id;
468 union ib_gid *dgid = &ah_attr->grh.dgid;
469
470 tmp_vlan_id = (dgid->raw[11] << 8) | dgid->raw[12];
471 if (tmp_vlan_id < 0x1000) {
472 *vlan_id = tmp_vlan_id;
473 return true;
474 } else {
475 *vlan_id = 0;
476 return false;
477 }
478 }
479
480 #define QLNXR_MAX_UD_HEADER_SIZE (100)
481 #define QLNXR_GSI_QPN (1)
482 static inline int
483 qlnxr_gsi_build_header(struct qlnxr_dev *dev,
484 struct qlnxr_qp *qp,
485 const struct ib_send_wr *swr,
486 struct ib_ud_header *udh,
487 int *roce_mode)
488 {
489 bool has_vlan = false, has_grh_ipv6 = true;
490 struct ib_ah_attr *ah_attr = &get_qlnxr_ah((ud_wr(swr)->ah))->attr;
491 struct ib_global_route *grh = &ah_attr->grh;
492 union ib_gid sgid;
493 int send_size = 0;
494 u16 vlan_id = 0;
495 u16 ether_type;
496
497 #if __FreeBSD_version >= 1102000
498 int rc = 0;
499 int ip_ver = 0;
500 bool has_udp = false;
501 #endif /* #if __FreeBSD_version >= 1102000 */
502
503 #if !DEFINE_IB_AH_ATTR_WITH_DMAC
504 u8 mac[ETH_ALEN];
505 #endif
506 int i;
507
508 send_size = 0;
509 for (i = 0; i < swr->num_sge; ++i)
510 send_size += swr->sg_list[i].length;
511
512 has_vlan = qlnxr_get_vlan_id_gsi(ah_attr, &vlan_id);
513 ether_type = ETH_P_ROCE;
514 *roce_mode = ROCE_V1;
515 if (grh->sgid_index < QLNXR_MAX_SGID)
516 sgid = dev->sgid_tbl[grh->sgid_index];
517 else
518 sgid = dev->sgid_tbl[0];
519
520 #if __FreeBSD_version >= 1102000
521
522 rc = ib_ud_header_init(send_size, false /* LRH */, true /* ETH */,
523 has_vlan, has_grh_ipv6, ip_ver, has_udp,
524 0 /* immediate */, udh);
525
526 if (rc) {
527 QL_DPRINT11(dev->ha, "gsi post send: failed to init header\n");
528 return rc;
529 }
530
531 #else
532 ib_ud_header_init(send_size, false /* LRH */, true /* ETH */,
533 has_vlan, has_grh_ipv6, 0 /* immediate */, udh);
534
535 #endif /* #if __FreeBSD_version >= 1102000 */
536
537 /* ENET + VLAN headers*/
538 #if DEFINE_IB_AH_ATTR_WITH_DMAC
539 memcpy(udh->eth.dmac_h, ah_attr->dmac, ETH_ALEN);
540 #else
541 qlnxr_get_dmac(dev, ah_attr, mac);
542 memcpy(udh->eth.dmac_h, mac, ETH_ALEN);
543 #endif
544 memcpy(udh->eth.smac_h, dev->ha->primary_mac, ETH_ALEN);
545 if (has_vlan) {
546 udh->eth.type = htons(ETH_P_8021Q);
547 udh->vlan.tag = htons(vlan_id);
548 udh->vlan.type = htons(ether_type);
549 } else {
550 udh->eth.type = htons(ether_type);
551 }
552
553 for (int j = 0; j < 4; j++) {
554 QL_DPRINT12(dev->ha, "destination mac: %x\n",
555 udh->eth.dmac_h[j]);
556 }
557 for (int j = 0; j < 4; j++) {
558 QL_DPRINT12(dev->ha, "source mac: %x\n",
559 udh->eth.smac_h[j]);
560 }
561
562 QL_DPRINT12(dev->ha, "QP: %p, opcode: %d, wq: %lx, roce: %x, hops:%d,"
563 "imm : %d, vlan :%d, AH: %p\n",
564 qp, swr->opcode, swr->wr_id, *roce_mode, grh->hop_limit,
565 0, has_vlan, get_qlnxr_ah((ud_wr(swr)->ah)));
566
567 if (has_grh_ipv6) {
568 /* GRH / IPv6 header */
569 udh->grh.traffic_class = grh->traffic_class;
570 udh->grh.flow_label = grh->flow_label;
571 udh->grh.hop_limit = grh->hop_limit;
572 udh->grh.destination_gid = grh->dgid;
573 memcpy(&udh->grh.source_gid.raw, &sgid.raw,
574 sizeof(udh->grh.source_gid.raw));
575 QL_DPRINT12(dev->ha, "header: tc: %x, flow_label : %x, "
576 "hop_limit: %x \n", udh->grh.traffic_class,
577 udh->grh.flow_label, udh->grh.hop_limit);
578 for (i = 0; i < 16; i++) {
579 QL_DPRINT12(dev->ha, "udh dgid = %x\n", udh->grh.destination_gid.raw[i]);
580 }
581 for (i = 0; i < 16; i++) {
582 QL_DPRINT12(dev->ha, "udh sgid = %x\n", udh->grh.source_gid.raw[i]);
583 }
584 udh->grh.next_header = 0x1b;
585 }
586 #ifdef DEFINE_IB_UD_HEADER_INIT_UDP_PRESENT
587 /* This is for RoCEv2 */
588 else {
589 /* IPv4 header */
590 u32 ipv4_addr;
591
592 udh->ip4.protocol = IPPROTO_UDP;
593 udh->ip4.tos = htonl(grh->flow_label);
594 udh->ip4.frag_off = htons(IP_DF);
595 udh->ip4.ttl = grh->hop_limit;
596
597 ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
598 udh->ip4.saddr = ipv4_addr;
599 ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
600 udh->ip4.daddr = ipv4_addr;
601 /* note: checksum is calculated by the device */
602 }
603 #endif
604
605 /* BTH */
606 udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
607 udh->bth.pkey = QLNXR_ROCE_PKEY_DEFAULT;/* TODO: ib_get_cahced_pkey?! */
608 //udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
609 udh->bth.destination_qpn = OSAL_CPU_TO_BE32(ud_wr(swr)->remote_qpn);
610 //udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
611 udh->bth.psn = OSAL_CPU_TO_BE32((qp->sq_psn++) & ((1 << 24) - 1));
612 udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
613
614 /* DETH */
615 //udh->deth.qkey = htonl(0x80010000); /* qp->qkey */ /* TODO: what is?! */
616 //udh->deth.source_qpn = htonl(QLNXR_GSI_QPN);
617 udh->deth.qkey = OSAL_CPU_TO_BE32(0x80010000); /* qp->qkey */ /* TODO: what is?! */
618 udh->deth.source_qpn = OSAL_CPU_TO_BE32(QLNXR_GSI_QPN);
619 QL_DPRINT12(dev->ha, "exit\n");
620 return 0;
621 }
622
623 static inline int
624 qlnxr_gsi_build_packet(struct qlnxr_dev *dev,
625 struct qlnxr_qp *qp, const struct ib_send_wr *swr,
626 struct ecore_roce_ll2_packet **p_packet)
627 {
628 u8 ud_header_buffer[QLNXR_MAX_UD_HEADER_SIZE];
629 struct ecore_roce_ll2_packet *packet;
630 int roce_mode, header_size;
631 struct ib_ud_header udh;
632 int i, rc;
633
634 QL_DPRINT12(dev->ha, "enter\n");
635
636 *p_packet = NULL;
637
638 rc = qlnxr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
639 if (rc) {
640 QL_DPRINT11(dev->ha,
641 "qlnxr_gsi_build_header failed rc = %d\n", rc);
642 return rc;
643 }
644
645 header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
646
647 packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
648 if (!packet) {
649 QL_DPRINT11(dev->ha, "packet == NULL\n");
650 return -ENOMEM;
651 }
652
653 packet->header.vaddr = qlnx_dma_alloc_coherent(&dev->ha->cdev,
654 &packet->header.baddr,
655 header_size);
656 if (!packet->header.vaddr) {
657 QL_DPRINT11(dev->ha, "packet->header.vaddr == NULL\n");
658 kfree(packet);
659 return -ENOMEM;
660 }
661
662 if (memcmp(udh.eth.smac_h, udh.eth.dmac_h, ETH_ALEN))
663 packet->tx_dest = ECORE_ROCE_LL2_TX_DEST_NW;
664 else
665 packet->tx_dest = ECORE_ROCE_LL2_TX_DEST_LB;
666
667 packet->roce_mode = roce_mode;
668 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
669 packet->header.len = header_size;
670 packet->n_seg = swr->num_sge;
671 qp->wqe_wr_id[qp->sq.prod].bytes_len = IB_GRH_BYTES; //RDMA_GRH_BYTES
672 for (i = 0; i < packet->n_seg; i++) {
673 packet->payload[i].baddr = swr->sg_list[i].addr;
674 packet->payload[i].len = swr->sg_list[i].length;
675 qp->wqe_wr_id[qp->sq.prod].bytes_len +=
676 packet->payload[i].len;
677 QL_DPRINT11(dev->ha, "baddr: %p, len: %d\n",
678 packet->payload[i].baddr,
679 packet->payload[i].len);
680 }
681
682 *p_packet = packet;
683
684 QL_DPRINT12(dev->ha, "exit, packet->n_seg: %d\n", packet->n_seg);
685 return 0;
686 }
687
688 int
689 qlnxr_gsi_post_send(struct ib_qp *ibqp,
690 const struct ib_send_wr *wr,
691 const struct ib_send_wr **bad_wr)
692 {
693 struct ecore_roce_ll2_packet *pkt = NULL;
694 struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
695 struct qlnxr_dev *dev = qp->dev;
696 unsigned long flags;
697 int rc;
698
699 QL_DPRINT12(dev->ha, "exit\n");
700
701 if (qp->state != ECORE_ROCE_QP_STATE_RTS) {
702 QL_DPRINT11(dev->ha,
703 "(qp->state != ECORE_ROCE_QP_STATE_RTS)\n");
704 *bad_wr = wr;
705 return -EINVAL;
706 }
707
708 if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
709 QL_DPRINT11(dev->ha,
710 "(wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE)\n");
711 rc = -EINVAL;
712 goto err;
713 }
714
715 if (wr->opcode != IB_WR_SEND) {
716 QL_DPRINT11(dev->ha, "(wr->opcode > IB_WR_SEND)\n");
717 rc = -EINVAL;
718 goto err;
719 }
720
721 spin_lock_irqsave(&qp->q_lock, flags);
722
723 rc = qlnxr_gsi_build_packet(dev, qp, wr, &pkt);
724 if(rc) {
725 spin_unlock_irqrestore(&qp->q_lock, flags);
726 QL_DPRINT11(dev->ha, "qlnxr_gsi_build_packet failed\n");
727 goto err;
728 }
729
730 rc = qlnxr_ll2_post_tx(dev, pkt);
731
732 if (!rc) {
733 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
734 qp->wqe_wr_id[qp->sq.prod].signaled =
735 !!(wr->send_flags & IB_SEND_SIGNALED);
736 qp->wqe_wr_id[qp->sq.prod].opcode = IB_WC_SEND;
737 qlnxr_inc_sw_prod(&qp->sq);
738 QL_DPRINT11(dev->ha, "packet sent over gsi qp\n");
739 } else {
740 QL_DPRINT11(dev->ha, "qlnxr_ll2_post_tx failed\n");
741 rc = -EAGAIN;
742 *bad_wr = wr;
743 }
744
745 spin_unlock_irqrestore(&qp->q_lock, flags);
746
747 if (wr->next != NULL) {
748 *bad_wr = wr->next;
749 rc=-EINVAL;
750 }
751
752 QL_DPRINT12(dev->ha, "exit\n");
753 return rc;
754
755 err:
756 *bad_wr = wr;
757 QL_DPRINT12(dev->ha, "exit error\n");
758 return rc;
759 }
760
761 #define QLNXR_LL2_RX_BUFFER_SIZE (4 * 1024)
762 int
763 qlnxr_gsi_post_recv(struct ib_qp *ibqp,
764 const struct ib_recv_wr *wr,
765 const struct ib_recv_wr **bad_wr)
766 {
767 struct qlnxr_dev *dev = get_qlnxr_dev((ibqp->device));
768 struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
769 unsigned long flags;
770 int rc = 0;
771
772 QL_DPRINT12(dev->ha, "enter, wr: %p\n", wr);
773
774 if ((qp->state != ECORE_ROCE_QP_STATE_RTR) &&
775 (qp->state != ECORE_ROCE_QP_STATE_RTS)) {
776 *bad_wr = wr;
777 QL_DPRINT11(dev->ha, "exit 0\n");
778 return -EINVAL;
779 }
780
781 spin_lock_irqsave(&qp->q_lock, flags);
782
783 while (wr) {
784 if (wr->num_sge > QLNXR_GSI_MAX_RECV_SGE) {
785 QL_DPRINT11(dev->ha, "exit 1\n");
786 goto err;
787 }
788
789 rc = ecore_ll2_post_rx_buffer(dev->rdma_ctx,
790 dev->gsi_ll2_handle,
791 wr->sg_list[0].addr,
792 wr->sg_list[0].length,
793 0 /* cookie */,
794 1 /* notify_fw */);
795 if (rc) {
796 QL_DPRINT11(dev->ha, "exit 2\n");
797 goto err;
798 }
799
800 memset(&qp->rqe_wr_id[qp->rq.prod], 0,
801 sizeof(qp->rqe_wr_id[qp->rq.prod]));
802 qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
803 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
804
805 qlnxr_inc_sw_prod(&qp->rq);
806
807 wr = wr->next;
808 }
809
810 spin_unlock_irqrestore(&qp->q_lock, flags);
811
812 QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
813 return rc;
814 err:
815
816 spin_unlock_irqrestore(&qp->q_lock, flags);
817 *bad_wr = wr;
818
819 QL_DPRINT12(dev->ha, "exit with -ENOMEM\n");
820 return -ENOMEM;
821 }
822
823 int
824 qlnxr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
825 {
826 struct qlnxr_dev *dev = get_qlnxr_dev((ibcq->device));
827 struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
828 struct qlnxr_qp *qp = dev->gsi_qp;
829 unsigned long flags;
830 int i = 0;
831
832 QL_DPRINT12(dev->ha, "enter\n");
833
834 spin_lock_irqsave(&cq->cq_lock, flags);
835
836 while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
837 memset(&wc[i], 0, sizeof(*wc));
838
839 wc[i].qp = &qp->ibqp;
840 wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
841 wc[i].opcode = IB_WC_RECV;
842 wc[i].pkey_index = 0;
843 wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc)?
844 IB_WC_GENERAL_ERR:IB_WC_SUCCESS;
845 /* 0 - currently only one recv sg is supported */
846 wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
847 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
848
849 #if __FreeBSD_version >= 1100000
850 memcpy(&wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac, ETH_ALEN);
851 wc[i].wc_flags |= IB_WC_WITH_SMAC;
852
853 if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
854 wc[i].wc_flags |= IB_WC_WITH_VLAN;
855 wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
856 }
857
858 #endif
859 qlnxr_inc_sw_cons(&qp->rq);
860 i++;
861 }
862
863 while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
864 memset(&wc[i], 0, sizeof(*wc));
865
866 wc[i].qp = &qp->ibqp;
867 wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
868 wc[i].opcode = IB_WC_SEND;
869 wc[i].status = IB_WC_SUCCESS;
870
871 qlnxr_inc_sw_cons(&qp->sq);
872 i++;
873 }
874
875 spin_unlock_irqrestore(&cq->cq_lock, flags);
876
877 QL_DPRINT12(dev->ha, "exit i = %d\n", i);
878 return i;
879 }
Cache object: bee160535ba1960b85a03b9a03fcc69d
|