1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Microsoft Corp.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/bus.h>
42 #include <machine/bus.h>
43
44 #include "mana.h"
45 #include "hw_channel.h"
46
47 static int
48 mana_hwc_get_msg_index(struct hw_channel_context *hwc, uint16_t *msg_id)
49 {
50 struct gdma_resource *r = &hwc->inflight_msg_res;
51 uint32_t index;
52
53 sema_wait(&hwc->sema);
54
55 mtx_lock_spin(&r->lock_spin);
56
57 index = find_first_zero_bit(hwc->inflight_msg_res.map,
58 hwc->inflight_msg_res.size);
59
60 bitmap_set(hwc->inflight_msg_res.map, index, 1);
61
62 mtx_unlock_spin(&r->lock_spin);
63
64 *msg_id = index;
65
66 return 0;
67 }
68
69 static void
70 mana_hwc_put_msg_index(struct hw_channel_context *hwc, uint16_t msg_id)
71 {
72 struct gdma_resource *r = &hwc->inflight_msg_res;
73
74 mtx_lock_spin(&r->lock_spin);
75 bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
76 mtx_unlock_spin(&r->lock_spin);
77
78 sema_post(&hwc->sema);
79 }
80
81 static int
82 mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
83 const struct gdma_resp_hdr *resp_msg,
84 uint32_t resp_len)
85 {
86 if (resp_len < sizeof(*resp_msg))
87 return EPROTO;
88
89 if (resp_len > caller_ctx->output_buflen)
90 return EPROTO;
91
92 return 0;
93 }
94
95 static void
96 mana_hwc_handle_resp(struct hw_channel_context *hwc, uint32_t resp_len,
97 const struct gdma_resp_hdr *resp_msg)
98 {
99 struct hwc_caller_ctx *ctx;
100 int err;
101
102 if (!test_bit(resp_msg->response.hwc_msg_id,
103 hwc->inflight_msg_res.map)) {
104 device_printf(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
105 resp_msg->response.hwc_msg_id);
106 return;
107 }
108
109 ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
110 err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
111 if (err)
112 goto out;
113
114 ctx->status_code = resp_msg->status;
115
116 memcpy(ctx->output_buf, resp_msg, resp_len);
117 out:
118 ctx->error = err;
119 complete(&ctx->comp_event);
120 }
121
122 static int
123 mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
124 struct hwc_work_request *req)
125 {
126 device_t dev = hwc_rxq->hwc->dev;
127 struct gdma_sge *sge;
128 int err;
129
130 sge = &req->sge;
131 sge->address = (uintptr_t)req->buf_sge_addr;
132 sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
133 sge->size = req->buf_len;
134
135 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
136 req->wqe_req.sgl = sge;
137 req->wqe_req.num_sge = 1;
138 req->wqe_req.client_data_unit = 0;
139
140 err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
141 if (err)
142 device_printf(dev,
143 "Failed to post WQE on HWC RQ: %d\n", err);
144 return err;
145 }
146
147 static void
148 mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
149 struct gdma_event *event)
150 {
151 struct hw_channel_context *hwc = ctx;
152 struct gdma_dev *gd = hwc->gdma_dev;
153 union hwc_init_type_data type_data;
154 union hwc_init_eq_id_db eq_db;
155 uint32_t type, val;
156
157 switch (event->type) {
158 case GDMA_EQE_HWC_INIT_EQ_ID_DB:
159 eq_db.as_uint32 = event->details[0];
160 hwc->cq->gdma_eq->id = eq_db.eq_id;
161 gd->doorbell = eq_db.doorbell;
162 break;
163
164 case GDMA_EQE_HWC_INIT_DATA:
165 type_data.as_uint32 = event->details[0];
166 type = type_data.type;
167 val = type_data.value;
168
169 switch (type) {
170 case HWC_INIT_DATA_CQID:
171 hwc->cq->gdma_cq->id = val;
172 break;
173
174 case HWC_INIT_DATA_RQID:
175 hwc->rxq->gdma_wq->id = val;
176 break;
177
178 case HWC_INIT_DATA_SQID:
179 hwc->txq->gdma_wq->id = val;
180 break;
181
182 case HWC_INIT_DATA_QUEUE_DEPTH:
183 hwc->hwc_init_q_depth_max = (uint16_t)val;
184 break;
185
186 case HWC_INIT_DATA_MAX_REQUEST:
187 hwc->hwc_init_max_req_msg_size = val;
188 break;
189
190 case HWC_INIT_DATA_MAX_RESPONSE:
191 hwc->hwc_init_max_resp_msg_size = val;
192 break;
193
194 case HWC_INIT_DATA_MAX_NUM_CQS:
195 gd->gdma_context->max_num_cqs = val;
196 break;
197
198 case HWC_INIT_DATA_PDID:
199 hwc->gdma_dev->pdid = val;
200 break;
201
202 case HWC_INIT_DATA_GPA_MKEY:
203 hwc->rxq->msg_buf->gpa_mkey = val;
204 hwc->txq->msg_buf->gpa_mkey = val;
205 break;
206 }
207
208 break;
209
210 case GDMA_EQE_HWC_INIT_DONE:
211 complete(&hwc->hwc_init_eqe_comp);
212 break;
213
214 default:
215 /* Ignore unknown events, which should never happen. */
216 break;
217 }
218 }
219
220 static void
221 mana_hwc_rx_event_handler(void *ctx, uint32_t gdma_rxq_id,
222 const struct hwc_rx_oob *rx_oob)
223 {
224 struct hw_channel_context *hwc = ctx;
225 struct hwc_wq *hwc_rxq = hwc->rxq;
226 struct hwc_work_request *rx_req;
227 struct gdma_resp_hdr *resp;
228 struct gdma_wqe *dma_oob;
229 struct gdma_queue *rq;
230 struct gdma_sge *sge;
231 uint64_t rq_base_addr;
232 uint64_t rx_req_idx;
233 uint8_t *wqe;
234
235 if (hwc_rxq->gdma_wq->id != gdma_rxq_id) {
236 mana_warn(NULL, "unmatched rx queue %u != %u\n",
237 hwc_rxq->gdma_wq->id, gdma_rxq_id);
238 return;
239 }
240
241
242 rq = hwc_rxq->gdma_wq;
243 wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
244 dma_oob = (struct gdma_wqe *)wqe;
245
246 bus_dmamap_sync(rq->mem_info.dma_tag, rq->mem_info.dma_map,
247 BUS_DMASYNC_POSTREAD);
248
249 sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
250
251 /* Select the RX work request for virtual address and for reposting. */
252 rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
253 rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
254
255 bus_dmamap_sync(hwc_rxq->msg_buf->mem_info.dma_tag,
256 hwc_rxq->msg_buf->mem_info.dma_map,
257 BUS_DMASYNC_POSTREAD);
258
259 rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
260 resp = (struct gdma_resp_hdr *)rx_req->buf_va;
261
262 if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
263 device_printf(hwc->dev, "HWC RX: wrong msg_id=%u\n",
264 resp->response.hwc_msg_id);
265 return;
266 }
267
268 mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
269
270 /* Do no longer use 'resp', because the buffer is posted to the HW
271 * in the below mana_hwc_post_rx_wqe().
272 */
273 resp = NULL;
274
275 bus_dmamap_sync(hwc_rxq->msg_buf->mem_info.dma_tag,
276 hwc_rxq->msg_buf->mem_info.dma_map,
277 BUS_DMASYNC_PREREAD);
278
279 mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
280 }
281
282 static void
283 mana_hwc_tx_event_handler(void *ctx, uint32_t gdma_txq_id,
284 const struct hwc_rx_oob *rx_oob)
285 {
286 struct hw_channel_context *hwc = ctx;
287 struct hwc_wq *hwc_txq = hwc->txq;
288
289 if (!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id) {
290 mana_warn(NULL, "unmatched tx queue %u != %u\n",
291 hwc_txq->gdma_wq->id, gdma_txq_id);
292 }
293
294 bus_dmamap_sync(hwc_txq->gdma_wq->mem_info.dma_tag,
295 hwc_txq->gdma_wq->mem_info.dma_map,
296 BUS_DMASYNC_POSTWRITE);
297 }
298
299 static int
300 mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
301 enum gdma_queue_type type, uint64_t queue_size,
302 struct gdma_queue **queue)
303 {
304 struct gdma_queue_spec spec = {};
305
306 if (type != GDMA_SQ && type != GDMA_RQ)
307 return EINVAL;
308
309 spec.type = type;
310 spec.monitor_avl_buf = false;
311 spec.queue_size = queue_size;
312
313 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
314 }
315
316 static int
317 mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
318 uint64_t queue_size,
319 void *ctx, gdma_cq_callback *cb,
320 struct gdma_queue *parent_eq,
321 struct gdma_queue **queue)
322 {
323 struct gdma_queue_spec spec = {};
324
325 spec.type = GDMA_CQ;
326 spec.monitor_avl_buf = false;
327 spec.queue_size = queue_size;
328 spec.cq.context = ctx;
329 spec.cq.callback = cb;
330 spec.cq.parent_eq = parent_eq;
331
332 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
333 }
334
335 static int
336 mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
337 uint64_t queue_size,
338 void *ctx, gdma_eq_callback *cb,
339 struct gdma_queue **queue)
340 {
341 struct gdma_queue_spec spec = {};
342
343 spec.type = GDMA_EQ;
344 spec.monitor_avl_buf = false;
345 spec.queue_size = queue_size;
346 spec.eq.context = ctx;
347 spec.eq.callback = cb;
348 spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
349
350 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
351 }
352
353 static void
354 mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
355 {
356 struct hwc_rx_oob comp_data = {};
357 struct gdma_comp *completions;
358 struct hwc_cq *hwc_cq = ctx;
359 int comp_read, i;
360
361 completions = hwc_cq->comp_buf;
362 comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
363
364 for (i = 0; i < comp_read; ++i) {
365 comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
366
367 if (completions[i].is_sq)
368 hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
369 completions[i].wq_num,
370 &comp_data);
371 else
372 hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
373 completions[i].wq_num,
374 &comp_data);
375 }
376
377 bus_dmamap_sync(q_self->mem_info.dma_tag, q_self->mem_info.dma_map,
378 BUS_DMASYNC_POSTREAD);
379
380 mana_gd_ring_cq(q_self, SET_ARM_BIT);
381 }
382
383 static void
384 mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
385 {
386 if (hwc_cq->comp_buf)
387 free(hwc_cq->comp_buf, M_DEVBUF);
388
389 if (hwc_cq->gdma_cq)
390 mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
391
392 if (hwc_cq->gdma_eq)
393 mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
394
395 free(hwc_cq, M_DEVBUF);
396 }
397
398 static int
399 mana_hwc_create_cq(struct hw_channel_context *hwc,
400 uint16_t q_depth,
401 gdma_eq_callback *callback, void *ctx,
402 hwc_rx_event_handler_t *rx_ev_hdlr, void *rx_ev_ctx,
403 hwc_tx_event_handler_t *tx_ev_hdlr, void *tx_ev_ctx,
404 struct hwc_cq **hwc_cq_ptr)
405 {
406 struct gdma_queue *eq, *cq;
407 struct gdma_comp *comp_buf;
408 struct hwc_cq *hwc_cq;
409 uint32_t eq_size, cq_size;
410 int err;
411
412 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
413 if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
414 eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
415
416 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
417 if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
418 cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
419
420 hwc_cq = malloc(sizeof(*hwc_cq), M_DEVBUF, M_WAITOK | M_ZERO);
421 if (!hwc_cq)
422 return ENOMEM;
423
424 err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
425 if (err) {
426 device_printf(hwc->dev,
427 "Failed to create HWC EQ for RQ: %d\n", err);
428 goto out;
429 }
430 hwc_cq->gdma_eq = eq;
431
432 err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq,
433 mana_hwc_comp_event, eq, &cq);
434 if (err) {
435 device_printf(hwc->dev,
436 "Failed to create HWC CQ for RQ: %d\n", err);
437 goto out;
438 }
439 hwc_cq->gdma_cq = cq;
440
441 comp_buf = mallocarray(q_depth, sizeof(struct gdma_comp),
442 M_DEVBUF, M_WAITOK | M_ZERO);
443 if (!comp_buf) {
444 err = ENOMEM;
445 goto out;
446 }
447
448 hwc_cq->hwc = hwc;
449 hwc_cq->comp_buf = comp_buf;
450 hwc_cq->queue_depth = q_depth;
451 hwc_cq->rx_event_handler = rx_ev_hdlr;
452 hwc_cq->rx_event_ctx = rx_ev_ctx;
453 hwc_cq->tx_event_handler = tx_ev_hdlr;
454 hwc_cq->tx_event_ctx = tx_ev_ctx;
455
456 *hwc_cq_ptr = hwc_cq;
457 return 0;
458 out:
459 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
460 return err;
461 }
462
463 static int
464 mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, uint16_t q_depth,
465 uint32_t max_msg_size,
466 struct hwc_dma_buf **dma_buf_ptr)
467 {
468 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
469 struct hwc_work_request *hwc_wr;
470 struct hwc_dma_buf *dma_buf;
471 struct gdma_mem_info *gmi;
472 uint32_t buf_size;
473 uint8_t *base_pa;
474 void *virt_addr;
475 uint16_t i;
476 int err;
477
478 dma_buf = malloc(sizeof(*dma_buf) +
479 q_depth * sizeof(struct hwc_work_request),
480 M_DEVBUF, M_WAITOK | M_ZERO);
481 if (!dma_buf)
482 return ENOMEM;
483
484 dma_buf->num_reqs = q_depth;
485
486 buf_size = ALIGN(q_depth * max_msg_size, PAGE_SIZE);
487
488 gmi = &dma_buf->mem_info;
489 err = mana_gd_alloc_memory(gc, buf_size, gmi);
490 if (err) {
491 device_printf(hwc->dev,
492 "Failed to allocate DMA buffer: %d\n", err);
493 goto out;
494 }
495
496 virt_addr = dma_buf->mem_info.virt_addr;
497 base_pa = (uint8_t *)dma_buf->mem_info.dma_handle;
498
499 for (i = 0; i < q_depth; i++) {
500 hwc_wr = &dma_buf->reqs[i];
501
502 hwc_wr->buf_va = (char *)virt_addr + i * max_msg_size;
503 hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
504
505 hwc_wr->buf_len = max_msg_size;
506 }
507
508 *dma_buf_ptr = dma_buf;
509 return 0;
510 out:
511 free(dma_buf, M_DEVBUF);
512 return err;
513 }
514
515 static void
516 mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
517 struct hwc_dma_buf *dma_buf)
518 {
519 if (!dma_buf)
520 return;
521
522 mana_gd_free_memory(&dma_buf->mem_info);
523
524 free(dma_buf, M_DEVBUF);
525 }
526
527 static void
528 mana_hwc_destroy_wq(struct hw_channel_context *hwc,
529 struct hwc_wq *hwc_wq)
530 {
531 mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
532
533 if (hwc_wq->gdma_wq)
534 mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
535 hwc_wq->gdma_wq);
536
537 free(hwc_wq, M_DEVBUF);
538 }
539
540 static int
541 mana_hwc_create_wq(struct hw_channel_context *hwc,
542 enum gdma_queue_type q_type, uint16_t q_depth,
543 uint32_t max_msg_size, struct hwc_cq *hwc_cq,
544 struct hwc_wq **hwc_wq_ptr)
545 {
546 struct gdma_queue *queue;
547 struct hwc_wq *hwc_wq;
548 uint32_t queue_size;
549 int err;
550
551 if (q_type != GDMA_SQ && q_type != GDMA_RQ) {
552 /* XXX should fail and return error? */
553 mana_warn(NULL, "Invalid q_type %u\n", q_type);
554 }
555
556 if (q_type == GDMA_RQ)
557 queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
558 else
559 queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
560
561 if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
562 queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
563
564 hwc_wq = malloc(sizeof(*hwc_wq), M_DEVBUF, M_WAITOK | M_ZERO);
565 if (!hwc_wq)
566 return ENOMEM;
567
568 err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
569 if (err)
570 goto out;
571
572 hwc_wq->hwc = hwc;
573 hwc_wq->gdma_wq = queue;
574 hwc_wq->queue_depth = q_depth;
575 hwc_wq->hwc_cq = hwc_cq;
576
577 err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
578 &hwc_wq->msg_buf);
579 if (err)
580 goto out;
581
582 *hwc_wq_ptr = hwc_wq;
583 return 0;
584 out:
585 if (err)
586 mana_hwc_destroy_wq(hwc, hwc_wq);
587 return err;
588 }
589
590 static int
591 mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
592 struct hwc_work_request *req,
593 uint32_t dest_virt_rq_id, uint32_t dest_virt_rcq_id,
594 bool dest_pf)
595 {
596 device_t dev = hwc_txq->hwc->dev;
597 struct hwc_tx_oob *tx_oob;
598 struct gdma_sge *sge;
599 int err;
600
601 if (req->msg_size == 0 || req->msg_size > req->buf_len) {
602 device_printf(dev, "wrong msg_size: %u, buf_len: %u\n",
603 req->msg_size, req->buf_len);
604 return EINVAL;
605 }
606
607 tx_oob = &req->tx_oob;
608
609 tx_oob->vrq_id = dest_virt_rq_id;
610 tx_oob->dest_vfid = 0;
611 tx_oob->vrcq_id = dest_virt_rcq_id;
612 tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
613 tx_oob->loopback = false;
614 tx_oob->lso_override = false;
615 tx_oob->dest_pf = dest_pf;
616 tx_oob->vsq_id = hwc_txq->gdma_wq->id;
617
618 sge = &req->sge;
619 sge->address = (uintptr_t)req->buf_sge_addr;
620 sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
621 sge->size = req->msg_size;
622
623 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
624 req->wqe_req.sgl = sge;
625 req->wqe_req.num_sge = 1;
626 req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
627 req->wqe_req.inline_oob_data = tx_oob;
628 req->wqe_req.client_data_unit = 0;
629
630 err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
631 if (err)
632 device_printf(dev,
633 "Failed to post WQE on HWC SQ: %d\n", err);
634 return err;
635 }
636
637 static int
638 mana_hwc_init_inflight_msg(struct hw_channel_context *hwc, uint16_t num_msg)
639 {
640 int err;
641
642 sema_init(&hwc->sema, num_msg, "gdma hwc sema");
643
644 err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res,
645 "gdma hwc res lock");
646 if (err)
647 device_printf(hwc->dev,
648 "Failed to init inflight_msg_res: %d\n", err);
649
650 return (err);
651 }
652
653 static int
654 mana_hwc_test_channel(struct hw_channel_context *hwc, uint16_t q_depth,
655 uint32_t max_req_msg_size, uint32_t max_resp_msg_size)
656 {
657 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
658 struct hwc_wq *hwc_rxq = hwc->rxq;
659 struct hwc_work_request *req;
660 struct hwc_caller_ctx *ctx;
661 int err;
662 int i;
663
664 /* Post all WQEs on the RQ */
665 for (i = 0; i < q_depth; i++) {
666 req = &hwc_rxq->msg_buf->reqs[i];
667 err = mana_hwc_post_rx_wqe(hwc_rxq, req);
668 if (err)
669 return err;
670 }
671
672 ctx = malloc(q_depth * sizeof(struct hwc_caller_ctx),
673 M_DEVBUF, M_WAITOK | M_ZERO);
674 if (!ctx)
675 return ENOMEM;
676
677 for (i = 0; i < q_depth; ++i)
678 init_completion(&ctx[i].comp_event);
679
680 hwc->caller_ctx = ctx;
681
682 return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
683 }
684
685 static int
686 mana_hwc_establish_channel(struct gdma_context *gc, uint16_t *q_depth,
687 uint32_t *max_req_msg_size,
688 uint32_t *max_resp_msg_size)
689 {
690 struct hw_channel_context *hwc = gc->hwc.driver_data;
691 struct gdma_queue *rq = hwc->rxq->gdma_wq;
692 struct gdma_queue *sq = hwc->txq->gdma_wq;
693 struct gdma_queue *eq = hwc->cq->gdma_eq;
694 struct gdma_queue *cq = hwc->cq->gdma_cq;
695 int err;
696
697 init_completion(&hwc->hwc_init_eqe_comp);
698
699 err = mana_smc_setup_hwc(&gc->shm_channel, false,
700 eq->mem_info.dma_handle,
701 cq->mem_info.dma_handle,
702 rq->mem_info.dma_handle,
703 sq->mem_info.dma_handle,
704 eq->eq.msix_index);
705 if (err)
706 return err;
707
708 if (wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * hz))
709 return ETIMEDOUT;
710
711 *q_depth = hwc->hwc_init_q_depth_max;
712 *max_req_msg_size = hwc->hwc_init_max_req_msg_size;
713 *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
714
715 /* Both were set in mana_hwc_init_event_handler(). */
716 if (cq->id >= gc->max_num_cqs) {
717 mana_warn(NULL, "invalid cq id %u > %u\n",
718 cq->id, gc->max_num_cqs);
719 return EPROTO;
720 }
721
722 gc->cq_table = malloc(gc->max_num_cqs * sizeof(struct gdma_queue *),
723 M_DEVBUF, M_WAITOK | M_ZERO);
724 if (!gc->cq_table)
725 return ENOMEM;
726
727 gc->cq_table[cq->id] = cq;
728
729 return 0;
730 }
731
732 static int
733 mana_hwc_init_queues(struct hw_channel_context *hwc, uint16_t q_depth,
734 uint32_t max_req_msg_size, uint32_t max_resp_msg_size)
735 {
736 int err;
737
738 err = mana_hwc_init_inflight_msg(hwc, q_depth);
739 if (err)
740 return err;
741
742 /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
743 * queue depth and RQ queue depth.
744 */
745 err = mana_hwc_create_cq(hwc, q_depth * 2,
746 mana_hwc_init_event_handler, hwc,
747 mana_hwc_rx_event_handler, hwc,
748 mana_hwc_tx_event_handler, hwc, &hwc->cq);
749 if (err) {
750 device_printf(hwc->dev, "Failed to create HWC CQ: %d\n", err);
751 goto out;
752 }
753
754 err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
755 hwc->cq, &hwc->rxq);
756 if (err) {
757 device_printf(hwc->dev, "Failed to create HWC RQ: %d\n", err);
758 goto out;
759 }
760
761 err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
762 hwc->cq, &hwc->txq);
763 if (err) {
764 device_printf(hwc->dev, "Failed to create HWC SQ: %d\n", err);
765 goto out;
766 }
767
768 hwc->num_inflight_msg = q_depth;
769 hwc->max_req_msg_size = max_req_msg_size;
770
771 return 0;
772 out:
773 /* mana_hwc_create_channel() will do the cleanup.*/
774 return err;
775 }
776
777 int
778 mana_hwc_create_channel(struct gdma_context *gc)
779 {
780 uint32_t max_req_msg_size, max_resp_msg_size;
781 struct gdma_dev *gd = &gc->hwc;
782 struct hw_channel_context *hwc;
783 uint16_t q_depth_max;
784 int err;
785
786 hwc = malloc(sizeof(*hwc), M_DEVBUF, M_WAITOK | M_ZERO);
787 if (!hwc)
788 return ENOMEM;
789
790 gd->gdma_context = gc;
791 gd->driver_data = hwc;
792 hwc->gdma_dev = gd;
793 hwc->dev = gc->dev;
794
795 /* HWC's instance number is always 0. */
796 gd->dev_id.as_uint32 = 0;
797 gd->dev_id.type = GDMA_DEVICE_HWC;
798
799 gd->pdid = INVALID_PDID;
800 gd->doorbell = INVALID_DOORBELL;
801
802 /*
803 * mana_hwc_init_queues() only creates the required data structures,
804 * and doesn't touch the HWC device.
805 */
806 err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
807 HW_CHANNEL_MAX_REQUEST_SIZE,
808 HW_CHANNEL_MAX_RESPONSE_SIZE);
809 if (err) {
810 device_printf(hwc->dev, "Failed to initialize HWC: %d\n",
811 err);
812 goto out;
813 }
814
815 err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
816 &max_resp_msg_size);
817 if (err) {
818 device_printf(hwc->dev, "Failed to establish HWC: %d\n", err);
819 goto out;
820 }
821
822 err = mana_hwc_test_channel(gc->hwc.driver_data,
823 HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
824 max_req_msg_size, max_resp_msg_size);
825 if (err) {
826 /* Test failed, but the channel has been established */
827 device_printf(hwc->dev, "Failed to test HWC: %d\n", err);
828 return EIO;
829 }
830
831 return 0;
832 out:
833 mana_hwc_destroy_channel(gc);
834 return (err);
835 }
836
837 void
838 mana_hwc_destroy_channel(struct gdma_context *gc)
839 {
840 struct hw_channel_context *hwc = gc->hwc.driver_data;
841
842 if (!hwc)
843 return;
844
845 /*
846 * gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
847 * non-zero, the HWC worked and we should tear down the HWC here.
848 */
849 if (gc->max_num_cqs > 0) {
850 mana_smc_teardown_hwc(&gc->shm_channel, false);
851 gc->max_num_cqs = 0;
852 }
853
854 free(hwc->caller_ctx, M_DEVBUF);
855 hwc->caller_ctx = NULL;
856
857 if (hwc->txq)
858 mana_hwc_destroy_wq(hwc, hwc->txq);
859
860 if (hwc->rxq)
861 mana_hwc_destroy_wq(hwc, hwc->rxq);
862
863 if (hwc->cq)
864 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
865
866 mana_gd_free_res_map(&hwc->inflight_msg_res);
867
868 hwc->num_inflight_msg = 0;
869
870 hwc->gdma_dev->doorbell = INVALID_DOORBELL;
871 hwc->gdma_dev->pdid = INVALID_PDID;
872
873 free(hwc, M_DEVBUF);
874 gc->hwc.driver_data = NULL;
875 gc->hwc.gdma_context = NULL;
876
877 free(gc->cq_table, M_DEVBUF);
878 gc->cq_table = NULL;
879 }
880
881 int
882 mana_hwc_send_request(struct hw_channel_context *hwc, uint32_t req_len,
883 const void *req, uint32_t resp_len, void *resp)
884 {
885 struct hwc_work_request *tx_wr;
886 struct hwc_wq *txq = hwc->txq;
887 struct gdma_req_hdr *req_msg;
888 struct hwc_caller_ctx *ctx;
889 uint16_t msg_id;
890 int err;
891
892 mana_hwc_get_msg_index(hwc, &msg_id);
893
894 tx_wr = &txq->msg_buf->reqs[msg_id];
895
896 if (req_len > tx_wr->buf_len) {
897 device_printf(hwc->dev,
898 "HWC: req msg size: %d > %d\n", req_len,
899 tx_wr->buf_len);
900 err = EINVAL;
901 goto out;
902 }
903
904 ctx = hwc->caller_ctx + msg_id;
905 ctx->output_buf = resp;
906 ctx->output_buflen = resp_len;
907
908 req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
909 if (req)
910 memcpy(req_msg, req, req_len);
911
912 req_msg->req.hwc_msg_id = msg_id;
913
914 tx_wr->msg_size = req_len;
915
916 err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
917 if (err) {
918 device_printf(hwc->dev,
919 "HWC: Failed to post send WQE: %d\n", err);
920 goto out;
921 }
922
923 if (wait_for_completion_timeout(&ctx->comp_event, 30 * hz)) {
924 device_printf(hwc->dev, "HWC: Request timed out!\n");
925 err = ETIMEDOUT;
926 goto out;
927 }
928
929 if (ctx->error) {
930 err = ctx->error;
931 goto out;
932 }
933
934 if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
935 device_printf(hwc->dev,
936 "HWC: Failed hw_channel req: 0x%x\n", ctx->status_code);
937 err = EPROTO;
938 goto out;
939 }
940 out:
941 mana_hwc_put_msg_index(hwc, msg_id);
942 return err;
943 }
Cache object: 3379cd42473eed2a4b478b0f271d672f
|