1 /*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30
31 #include <linux/gfp.h>
32 #include <dev/mlx5/qp.h>
33 #include <dev/mlx5/driver.h>
34 #include <dev/mlx5/mlx5_core/mlx5_core.h>
35 #include <dev/mlx5/mlx5_core/transobj.h>
36
37 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
38 u32 rsn)
39 {
40 struct mlx5_qp_table *table = &dev->priv.qp_table;
41 struct mlx5_core_rsc_common *common;
42
43 spin_lock(&table->lock);
44
45 common = radix_tree_lookup(&table->tree, rsn);
46 if (common)
47 atomic_inc(&common->refcount);
48
49 spin_unlock(&table->lock);
50
51 if (!common) {
52 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
53 rsn);
54 return NULL;
55 }
56 return common;
57 }
58
59 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
60 {
61 if (atomic_dec_and_test(&common->refcount))
62 complete(&common->free);
63 }
64
65 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
66 {
67 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
68 struct mlx5_core_qp *qp;
69
70 if (!common)
71 return;
72
73 switch (common->res) {
74 case MLX5_RES_QP:
75 qp = (struct mlx5_core_qp *)common;
76 qp->event(qp, event_type);
77 break;
78
79 default:
80 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
81 }
82
83 mlx5_core_put_rsc(common);
84 }
85
86 static int create_qprqsq_common(struct mlx5_core_dev *dev,
87 struct mlx5_core_qp *qp, int rsc_type)
88 {
89 struct mlx5_qp_table *table = &dev->priv.qp_table;
90 int err;
91
92 qp->common.res = rsc_type;
93
94 spin_lock_irq(&table->lock);
95 err = radix_tree_insert(&table->tree, qp->qpn | (rsc_type << 24), qp);
96 spin_unlock_irq(&table->lock);
97 if (err)
98 return err;
99
100 atomic_set(&qp->common.refcount, 1);
101 init_completion(&qp->common.free);
102 qp->pid = curthread->td_proc->p_pid;
103
104 return 0;
105 }
106
107 static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
108 struct mlx5_core_qp *qp, int rsc_type)
109 {
110 struct mlx5_qp_table *table = &dev->priv.qp_table;
111 unsigned long flags;
112
113 spin_lock_irqsave(&table->lock, flags);
114 radix_tree_delete(&table->tree, qp->qpn | (rsc_type << 24));
115 spin_unlock_irqrestore(&table->lock, flags);
116
117 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
118 wait_for_completion(&qp->common.free);
119 }
120
121 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
122 struct mlx5_core_qp *qp,
123 u32 *in, int inlen)
124 {
125 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
126 u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
127 u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
128 int err;
129
130 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
131
132 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
133 if (err)
134 return err;
135
136 qp->uid = MLX5_GET(create_qp_in, in, uid);
137 qp->qpn = MLX5_GET(create_qp_out, out, qpn);
138 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
139
140 err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
141 if (err)
142 goto err_cmd;
143
144 atomic_inc(&dev->num_qps);
145
146 return 0;
147
148 err_cmd:
149 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
150 MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
151 MLX5_SET(destroy_qp_in, din, uid, qp->uid);
152 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
153 return err;
154 }
155 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
156
157 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
158 struct mlx5_core_qp *qp)
159 {
160 u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
161 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
162 int err;
163
164
165 destroy_qprqsq_common(dev, qp, MLX5_RES_QP);
166
167 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
168 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
169 MLX5_SET(destroy_qp_in, in, uid, qp->uid);
170 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
171 if (err)
172 return err;
173
174 atomic_dec(&dev->num_qps);
175 return 0;
176 }
177 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
178
179 struct mbox_info {
180 u32 *in;
181 u32 *out;
182 int inlen;
183 int outlen;
184 };
185
186 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
187 {
188 mbox->inlen = inlen;
189 mbox->outlen = outlen;
190 mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
191 mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
192 if (!mbox->in || !mbox->out) {
193 kfree(mbox->in);
194 kfree(mbox->out);
195 return -ENOMEM;
196 }
197
198 return 0;
199 }
200
201 static void mbox_free(struct mbox_info *mbox)
202 {
203 kfree(mbox->in);
204 kfree(mbox->out);
205 }
206
207 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
208 u32 opt_param_mask, void *qpc,
209 struct mbox_info *mbox, u16 uid)
210 {
211 mbox->out = NULL;
212 mbox->in = NULL;
213
214 #define MBOX_ALLOC(mbox, typ) \
215 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
216
217 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
218 do { \
219 MLX5_SET(typ##_in, in, opcode, _opcode); \
220 MLX5_SET(typ##_in, in, qpn, _qpn); \
221 MLX5_SET(typ##_in, in, uid, _uid); \
222 } while (0)
223
224 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
225 do { \
226 MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
227 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
228 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
229 MLX5_ST_SZ_BYTES(qpc)); \
230 } while (0)
231
232 switch (opcode) {
233 /* 2RST & 2ERR */
234 case MLX5_CMD_OP_2RST_QP:
235 if (MBOX_ALLOC(mbox, qp_2rst))
236 return -ENOMEM;
237 MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
238 break;
239 case MLX5_CMD_OP_2ERR_QP:
240 if (MBOX_ALLOC(mbox, qp_2err))
241 return -ENOMEM;
242 MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
243 break;
244
245 /* MODIFY with QPC */
246 case MLX5_CMD_OP_RST2INIT_QP:
247 if (MBOX_ALLOC(mbox, rst2init_qp))
248 return -ENOMEM;
249 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
250 opt_param_mask, qpc, uid);
251 break;
252 case MLX5_CMD_OP_INIT2RTR_QP:
253 if (MBOX_ALLOC(mbox, init2rtr_qp))
254 return -ENOMEM;
255 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
256 opt_param_mask, qpc, uid);
257 break;
258 case MLX5_CMD_OP_RTR2RTS_QP:
259 if (MBOX_ALLOC(mbox, rtr2rts_qp))
260 return -ENOMEM;
261 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
262 opt_param_mask, qpc, uid);
263 break;
264 case MLX5_CMD_OP_RTS2RTS_QP:
265 if (MBOX_ALLOC(mbox, rts2rts_qp))
266 return -ENOMEM;
267 MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
268 opt_param_mask, qpc, uid);
269 break;
270 case MLX5_CMD_OP_SQERR2RTS_QP:
271 if (MBOX_ALLOC(mbox, sqerr2rts_qp))
272 return -ENOMEM;
273 MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
274 opt_param_mask, qpc, uid);
275 break;
276 case MLX5_CMD_OP_INIT2INIT_QP:
277 if (MBOX_ALLOC(mbox, init2init_qp))
278 return -ENOMEM;
279 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
280 opt_param_mask, qpc, uid);
281 break;
282 default:
283 mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
284 opcode, qpn);
285 return -EINVAL;
286 }
287
288 return 0;
289 }
290
291
292 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
293 u32 opt_param_mask, void *qpc,
294 struct mlx5_core_qp *qp)
295 {
296 struct mbox_info mbox;
297 int err;
298
299 err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
300 opt_param_mask, qpc, &mbox, qp->uid);
301 if (err)
302 return err;
303
304 err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
305 mbox_free(&mbox);
306 return err;
307 }
308 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
309
310 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
311 {
312 struct mlx5_qp_table *table = &dev->priv.qp_table;
313
314 memset(table, 0, sizeof(*table));
315 spin_lock_init(&table->lock);
316 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
317 }
318
319 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
320 {
321 }
322
323 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
324 u32 *out, int outlen)
325 {
326 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
327
328 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
329 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
330
331 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
332 }
333 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
334
335 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
336 {
337 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
338 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
339 int err;
340
341 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
342 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
343 if (!err)
344 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
345 return err;
346 }
347 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
348
349 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
350 {
351 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
352 u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
353
354 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
355 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
356 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
357 }
358 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
359
360 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
361 struct mlx5_core_dct *dct,
362 u32 *in, int inlen,
363 u32 *out, int outlen)
364 {
365 struct mlx5_qp_table *table = &dev->priv.qp_table;
366 u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
367 u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
368 int err;
369
370 init_completion(&dct->drained);
371 MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
372
373 err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
374 if (err) {
375 mlx5_core_warn(dev, "create DCT failed, ret %d", err);
376 return err;
377 }
378
379 dct->dctn = MLX5_GET(create_dct_out, out, dctn);
380 dct->uid = MLX5_GET(create_dct_in, in, uid);
381
382 dct->common.res = MLX5_RES_DCT;
383 spin_lock_irq(&table->lock);
384 err = radix_tree_insert(&table->tree, dct->dctn, dct);
385 spin_unlock_irq(&table->lock);
386 if (err) {
387 mlx5_core_warn(dev, "err %d", err);
388 goto err_cmd;
389 }
390
391 dct->pid = curthread->td_proc->p_pid;
392 atomic_set(&dct->common.refcount, 1);
393 init_completion(&dct->common.free);
394
395 return 0;
396
397 err_cmd:
398 MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
399 MLX5_SET(destroy_dct_in, din, dctn, dct->dctn);
400 MLX5_SET(destroy_dct_in, din, uid, dct->uid);
401 mlx5_cmd_exec(dev, &din, sizeof(din), dout, sizeof(dout));
402
403 return err;
404 }
405 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
406
407 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
408 struct mlx5_core_dct *dct)
409 {
410 u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
411 u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
412
413 MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
414 MLX5_SET(drain_dct_in, in, dctn, dct->dctn);
415 MLX5_SET(drain_dct_in, in, uid, dct->uid);
416 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
417 (void *)&out, sizeof(out));
418 }
419
420 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
421 struct mlx5_core_dct *dct)
422 {
423 struct mlx5_qp_table *table = &dev->priv.qp_table;
424 u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
425 u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
426 unsigned long flags;
427 int err;
428
429 err = mlx5_core_drain_dct(dev, dct);
430 if (err) {
431 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
432 goto free_dct;
433 } else {
434 mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
435 return err;
436 }
437 }
438
439 wait_for_completion(&dct->drained);
440
441 free_dct:
442 spin_lock_irqsave(&table->lock, flags);
443 if (radix_tree_delete(&table->tree, dct->dctn) != dct)
444 mlx5_core_warn(dev, "dct delete differs\n");
445 spin_unlock_irqrestore(&table->lock, flags);
446
447 if (atomic_dec_and_test(&dct->common.refcount))
448 complete(&dct->common.free);
449 wait_for_completion(&dct->common.free);
450
451 MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
452 MLX5_SET(destroy_dct_in, in, dctn, dct->dctn);
453 MLX5_SET(destroy_dct_in, in, uid, dct->uid);
454
455 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
456 (void *)&out, sizeof(out));
457 }
458 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
459
460 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
461 u32 *out, int outlen)
462 {
463 u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
464
465 MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
466 MLX5_SET(query_dct_in, in, dctn, dct->dctn);
467
468 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
469 (void *)out, outlen);
470 }
471 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
472
473 int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct)
474 {
475 u32 out[MLX5_ST_SZ_DW(arm_dct_out)] = {0};
476 u32 in[MLX5_ST_SZ_DW(arm_dct_in)] = {0};
477
478 MLX5_SET(arm_dct_in, in, opcode, MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
479 MLX5_SET(arm_dct_in, in, dctn, dct->dctn);
480
481 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
482 (void *)&out, sizeof(out));
483 }
484 EXPORT_SYMBOL_GPL(mlx5_core_arm_dct);
485
486 static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
487 {
488 u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
489 u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
490
491 MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
492 MLX5_SET(destroy_rq_in, in, rqn, rqn);
493 MLX5_SET(destroy_rq_in, in, uid, uid);
494 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
495 }
496
497 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
498 struct mlx5_core_qp *rq)
499 {
500 int err;
501
502 err = mlx5_core_create_rq(dev, in, inlen, &rq->qpn);
503 if (err)
504 return err;
505
506 rq->uid = MLX5_GET(create_rq_in, in, uid);
507
508 err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
509 if (err)
510 destroy_rq_tracked(dev, rq->qpn, rq->uid);
511
512 return err;
513 }
514 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
515
516 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
517 struct mlx5_core_qp *rq)
518 {
519 destroy_qprqsq_common(dev, rq, MLX5_RES_RQ);
520 destroy_rq_tracked(dev, rq->qpn, rq->uid);
521 }
522 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
523
524 static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
525 {
526 u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
527 u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
528
529 MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
530 MLX5_SET(destroy_sq_in, in, sqn, sqn);
531 MLX5_SET(destroy_sq_in, in, uid, uid);
532 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
533 }
534
535 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
536 struct mlx5_core_qp *sq)
537 {
538 int err;
539
540 err = mlx5_core_create_sq(dev, in, inlen, &sq->qpn);
541 if (err)
542 return err;
543
544 sq->uid = MLX5_GET(create_sq_in, in, uid);
545
546 err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
547 if (err)
548 destroy_sq_tracked(dev, sq->qpn, sq->uid);
549
550 return err;
551 }
552 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
553
554 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
555 struct mlx5_core_qp *sq)
556 {
557 destroy_qprqsq_common(dev, sq, MLX5_RES_SQ);
558 destroy_sq_tracked(dev, sq->qpn, sq->uid);
559 }
560 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
Cache object: 622a92fcc8c9f2a3f30677b76a05f912
|