1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3 *
4 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <linux/completion.h>
39 #include <linux/file.h>
40 #include <linux/mutex.h>
41 #include <linux/poll.h>
42 #include <linux/sched.h>
43 #include <linux/idr.h>
44 #include <linux/in.h>
45 #include <linux/in6.h>
46 #include <linux/miscdevice.h>
47 #include <linux/slab.h>
48 #include <linux/module.h>
49
50 #include <sys/filio.h>
51
52 #include <rdma/rdma_user_cm.h>
53 #include <rdma/ib_marshall.h>
54 #include <rdma/rdma_cm.h>
55 #include <rdma/rdma_cm_ib.h>
56 #include <rdma/ib_addr.h>
57 #include <rdma/ib.h>
58
59 MODULE_AUTHOR("Sean Hefty");
60 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
61 MODULE_LICENSE("Dual BSD/GPL");
62
63 static unsigned int max_backlog = 1024;
64
65 struct ucma_file {
66 struct mutex mut;
67 struct file *filp;
68 struct list_head ctx_list;
69 struct list_head event_list;
70 wait_queue_head_t poll_wait;
71 struct workqueue_struct *close_wq;
72 };
73
74 struct ucma_context {
75 int id;
76 struct completion comp;
77 atomic_t ref;
78 int events_reported;
79 int backlog;
80
81 struct ucma_file *file;
82 struct rdma_cm_id *cm_id;
83 u64 uid;
84
85 struct list_head list;
86 struct list_head mc_list;
87 /* mark that device is in process of destroying the internal HW
88 * resources, protected by the global mut
89 */
90 int closing;
91 /* sync between removal event and id destroy, protected by file mut */
92 int destroying;
93 struct work_struct close_work;
94 };
95
96 struct ucma_multicast {
97 struct ucma_context *ctx;
98 int id;
99 int events_reported;
100
101 u64 uid;
102 u8 join_state;
103 struct list_head list;
104 struct sockaddr_storage addr;
105 };
106
107 struct ucma_event {
108 struct ucma_context *ctx;
109 struct ucma_multicast *mc;
110 struct list_head list;
111 struct rdma_cm_id *cm_id;
112 struct rdma_ucm_event_resp resp;
113 struct work_struct close_work;
114 };
115
116 static DEFINE_MUTEX(mut);
117 static DEFINE_IDR(ctx_idr);
118 static DEFINE_IDR(multicast_idr);
119
120 static inline struct ucma_context *_ucma_find_context(int id,
121 struct ucma_file *file)
122 {
123 struct ucma_context *ctx;
124
125 ctx = idr_find(&ctx_idr, id);
126 if (!ctx)
127 ctx = ERR_PTR(-ENOENT);
128 else if (ctx->file != file || !ctx->cm_id)
129 ctx = ERR_PTR(-EINVAL);
130 return ctx;
131 }
132
133 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
134 {
135 struct ucma_context *ctx;
136
137 mutex_lock(&mut);
138 ctx = _ucma_find_context(id, file);
139 if (!IS_ERR(ctx)) {
140 if (ctx->closing)
141 ctx = ERR_PTR(-EIO);
142 else
143 atomic_inc(&ctx->ref);
144 }
145 mutex_unlock(&mut);
146 return ctx;
147 }
148
149 static void ucma_put_ctx(struct ucma_context *ctx)
150 {
151 if (atomic_dec_and_test(&ctx->ref))
152 complete(&ctx->comp);
153 }
154
155 /*
156 * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
157 * CM_ID is bound.
158 */
159 static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
160 {
161 struct ucma_context *ctx = ucma_get_ctx(file, id);
162
163 if (IS_ERR(ctx))
164 return ctx;
165 if (!ctx->cm_id->device) {
166 ucma_put_ctx(ctx);
167 return ERR_PTR(-EINVAL);
168 }
169 return ctx;
170 }
171
172 static void ucma_close_event_id(struct work_struct *work)
173 {
174 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
175
176 rdma_destroy_id(uevent_close->cm_id);
177 kfree(uevent_close);
178 }
179
180 static void ucma_close_id(struct work_struct *work)
181 {
182 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
183
184 /* once all inflight tasks are finished, we close all underlying
185 * resources. The context is still alive till its explicit destryoing
186 * by its creator.
187 */
188 ucma_put_ctx(ctx);
189 wait_for_completion(&ctx->comp);
190 /* No new events will be generated after destroying the id. */
191 rdma_destroy_id(ctx->cm_id);
192 }
193
194 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
195 {
196 struct ucma_context *ctx;
197
198 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
199 if (!ctx)
200 return NULL;
201
202 INIT_WORK(&ctx->close_work, ucma_close_id);
203 atomic_set(&ctx->ref, 1);
204 init_completion(&ctx->comp);
205 INIT_LIST_HEAD(&ctx->mc_list);
206 ctx->file = file;
207
208 mutex_lock(&mut);
209 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
210 mutex_unlock(&mut);
211 if (ctx->id < 0)
212 goto error;
213
214 list_add_tail(&ctx->list, &file->ctx_list);
215 return ctx;
216
217 error:
218 kfree(ctx);
219 return NULL;
220 }
221
222 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
223 {
224 struct ucma_multicast *mc;
225
226 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
227 if (!mc)
228 return NULL;
229
230 mutex_lock(&mut);
231 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
232 mutex_unlock(&mut);
233 if (mc->id < 0)
234 goto error;
235
236 mc->ctx = ctx;
237 list_add_tail(&mc->list, &ctx->mc_list);
238 return mc;
239
240 error:
241 kfree(mc);
242 return NULL;
243 }
244
245 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
246 struct rdma_conn_param *src)
247 {
248 if (src->private_data_len)
249 memcpy(dst->private_data, src->private_data,
250 src->private_data_len);
251 dst->private_data_len = src->private_data_len;
252 dst->responder_resources =src->responder_resources;
253 dst->initiator_depth = src->initiator_depth;
254 dst->flow_control = src->flow_control;
255 dst->retry_count = src->retry_count;
256 dst->rnr_retry_count = src->rnr_retry_count;
257 dst->srq = src->srq;
258 dst->qp_num = src->qp_num;
259 }
260
261 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
262 struct rdma_ud_param *src)
263 {
264 if (src->private_data_len)
265 memcpy(dst->private_data, src->private_data,
266 src->private_data_len);
267 dst->private_data_len = src->private_data_len;
268 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
269 dst->qp_num = src->qp_num;
270 dst->qkey = src->qkey;
271 }
272
273 static void ucma_set_event_context(struct ucma_context *ctx,
274 struct rdma_cm_event *event,
275 struct ucma_event *uevent)
276 {
277 uevent->ctx = ctx;
278 switch (event->event) {
279 case RDMA_CM_EVENT_MULTICAST_JOIN:
280 case RDMA_CM_EVENT_MULTICAST_ERROR:
281 uevent->mc = __DECONST(struct ucma_multicast *,
282 event->param.ud.private_data);
283 uevent->resp.uid = uevent->mc->uid;
284 uevent->resp.id = uevent->mc->id;
285 break;
286 default:
287 uevent->resp.uid = ctx->uid;
288 uevent->resp.id = ctx->id;
289 break;
290 }
291 }
292
293 /* Called with file->mut locked for the relevant context. */
294 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
295 {
296 struct ucma_context *ctx = cm_id->context;
297 struct ucma_event *con_req_eve;
298 int event_found = 0;
299
300 if (ctx->destroying)
301 return;
302
303 /* only if context is pointing to cm_id that it owns it and can be
304 * queued to be closed, otherwise that cm_id is an inflight one that
305 * is part of that context event list pending to be detached and
306 * reattached to its new context as part of ucma_get_event,
307 * handled separately below.
308 */
309 if (ctx->cm_id == cm_id) {
310 mutex_lock(&mut);
311 ctx->closing = 1;
312 mutex_unlock(&mut);
313 queue_work(ctx->file->close_wq, &ctx->close_work);
314 return;
315 }
316
317 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
318 if (con_req_eve->cm_id == cm_id &&
319 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
320 list_del(&con_req_eve->list);
321 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
322 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
323 event_found = 1;
324 break;
325 }
326 }
327 if (!event_found)
328 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
329 }
330
331 static int ucma_event_handler(struct rdma_cm_id *cm_id,
332 struct rdma_cm_event *event)
333 {
334 struct ucma_event *uevent;
335 struct ucma_context *ctx = cm_id->context;
336 int ret = 0;
337
338 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
339 if (!uevent)
340 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
341
342 mutex_lock(&ctx->file->mut);
343 uevent->cm_id = cm_id;
344 ucma_set_event_context(ctx, event, uevent);
345 uevent->resp.event = event->event;
346 uevent->resp.status = event->status;
347 if (cm_id->qp_type == IB_QPT_UD)
348 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
349 else
350 ucma_copy_conn_event(&uevent->resp.param.conn,
351 &event->param.conn);
352
353 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
354 if (!ctx->backlog) {
355 ret = -ENOMEM;
356 kfree(uevent);
357 goto out;
358 }
359 ctx->backlog--;
360 } else if (!ctx->uid || ctx->cm_id != cm_id) {
361 /*
362 * We ignore events for new connections until userspace has set
363 * their context. This can only happen if an error occurs on a
364 * new connection before the user accepts it. This is okay,
365 * since the accept will just fail later. However, we do need
366 * to release the underlying HW resources in case of a device
367 * removal event.
368 */
369 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
370 ucma_removal_event_handler(cm_id);
371
372 kfree(uevent);
373 goto out;
374 }
375
376 list_add_tail(&uevent->list, &ctx->file->event_list);
377 wake_up_interruptible(&ctx->file->poll_wait);
378 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
379 ucma_removal_event_handler(cm_id);
380 out:
381 mutex_unlock(&ctx->file->mut);
382 return ret;
383 }
384
385 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
386 int in_len, int out_len)
387 {
388 struct ucma_context *ctx;
389 struct rdma_ucm_get_event cmd;
390 struct ucma_event *uevent;
391 int ret = 0;
392
393 if (out_len < sizeof uevent->resp)
394 return -ENOSPC;
395
396 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
397 return -EFAULT;
398
399 mutex_lock(&file->mut);
400 while (list_empty(&file->event_list)) {
401 mutex_unlock(&file->mut);
402
403 if (file->filp->f_flags & O_NONBLOCK)
404 return -EAGAIN;
405
406 if (wait_event_interruptible(file->poll_wait,
407 !list_empty(&file->event_list)))
408 return -ERESTARTSYS;
409
410 mutex_lock(&file->mut);
411 }
412
413 uevent = list_entry(file->event_list.next, struct ucma_event, list);
414
415 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
416 ctx = ucma_alloc_ctx(file);
417 if (!ctx) {
418 ret = -ENOMEM;
419 goto done;
420 }
421 uevent->ctx->backlog++;
422 ctx->cm_id = uevent->cm_id;
423 ctx->cm_id->context = ctx;
424 uevent->resp.id = ctx->id;
425 }
426
427 if (copy_to_user((void __user *)(unsigned long)cmd.response,
428 &uevent->resp, sizeof uevent->resp)) {
429 ret = -EFAULT;
430 goto done;
431 }
432
433 list_del(&uevent->list);
434 uevent->ctx->events_reported++;
435 if (uevent->mc)
436 uevent->mc->events_reported++;
437 kfree(uevent);
438 done:
439 mutex_unlock(&file->mut);
440 return ret;
441 }
442
443 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
444 {
445 switch (cmd->ps) {
446 case RDMA_PS_TCP:
447 *qp_type = IB_QPT_RC;
448 return 0;
449 case RDMA_PS_UDP:
450 case RDMA_PS_IPOIB:
451 *qp_type = IB_QPT_UD;
452 return 0;
453 case RDMA_PS_IB:
454 *qp_type = cmd->qp_type;
455 return 0;
456 default:
457 return -EINVAL;
458 }
459 }
460
461 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
462 int in_len, int out_len)
463 {
464 struct rdma_ucm_create_id cmd;
465 struct rdma_ucm_create_id_resp resp;
466 struct ucma_context *ctx;
467 struct rdma_cm_id *cm_id;
468 enum ib_qp_type qp_type;
469 int ret;
470
471 if (out_len < sizeof(resp))
472 return -ENOSPC;
473
474 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
475 return -EFAULT;
476
477 ret = ucma_get_qp_type(&cmd, &qp_type);
478 if (ret)
479 return ret;
480
481 mutex_lock(&file->mut);
482 ctx = ucma_alloc_ctx(file);
483 mutex_unlock(&file->mut);
484 if (!ctx)
485 return -ENOMEM;
486
487 ctx->uid = cmd.uid;
488 cm_id = rdma_create_id(TD_TO_VNET(curthread),
489 ucma_event_handler, ctx, cmd.ps, qp_type);
490 if (IS_ERR(cm_id)) {
491 ret = PTR_ERR(cm_id);
492 goto err1;
493 }
494
495 resp.id = ctx->id;
496 if (copy_to_user((void __user *)(unsigned long)cmd.response,
497 &resp, sizeof(resp))) {
498 ret = -EFAULT;
499 goto err2;
500 }
501
502 ctx->cm_id = cm_id;
503 return 0;
504
505 err2:
506 rdma_destroy_id(cm_id);
507 err1:
508 mutex_lock(&mut);
509 idr_remove(&ctx_idr, ctx->id);
510 mutex_unlock(&mut);
511 mutex_lock(&file->mut);
512 list_del(&ctx->list);
513 mutex_unlock(&file->mut);
514 kfree(ctx);
515 return ret;
516 }
517
518 static void ucma_cleanup_multicast(struct ucma_context *ctx)
519 {
520 struct ucma_multicast *mc, *tmp;
521
522 mutex_lock(&mut);
523 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
524 list_del(&mc->list);
525 idr_remove(&multicast_idr, mc->id);
526 kfree(mc);
527 }
528 mutex_unlock(&mut);
529 }
530
531 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
532 {
533 struct ucma_event *uevent, *tmp;
534
535 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
536 if (uevent->mc != mc)
537 continue;
538
539 list_del(&uevent->list);
540 kfree(uevent);
541 }
542 }
543
544 /*
545 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
546 * this point, no new events will be reported from the hardware. However, we
547 * still need to cleanup the UCMA context for this ID. Specifically, there
548 * might be events that have not yet been consumed by the user space software.
549 * These might include pending connect requests which we have not completed
550 * processing. We cannot call rdma_destroy_id while holding the lock of the
551 * context (file->mut), as it might cause a deadlock. We therefore extract all
552 * relevant events from the context pending events list while holding the
553 * mutex. After that we release them as needed.
554 */
555 static int ucma_free_ctx(struct ucma_context *ctx)
556 {
557 int events_reported;
558 struct ucma_event *uevent, *tmp;
559 LIST_HEAD(list);
560
561
562 ucma_cleanup_multicast(ctx);
563
564 /* Cleanup events not yet reported to the user. */
565 mutex_lock(&ctx->file->mut);
566 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
567 if (uevent->ctx == ctx)
568 list_move_tail(&uevent->list, &list);
569 }
570 list_del(&ctx->list);
571 mutex_unlock(&ctx->file->mut);
572
573 list_for_each_entry_safe(uevent, tmp, &list, list) {
574 list_del(&uevent->list);
575 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
576 rdma_destroy_id(uevent->cm_id);
577 kfree(uevent);
578 }
579
580 events_reported = ctx->events_reported;
581 kfree(ctx);
582 return events_reported;
583 }
584
585 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
586 int in_len, int out_len)
587 {
588 struct rdma_ucm_destroy_id cmd;
589 struct rdma_ucm_destroy_id_resp resp;
590 struct ucma_context *ctx;
591 int ret = 0;
592
593 if (out_len < sizeof(resp))
594 return -ENOSPC;
595
596 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
597 return -EFAULT;
598
599 mutex_lock(&mut);
600 ctx = _ucma_find_context(cmd.id, file);
601 if (!IS_ERR(ctx))
602 idr_remove(&ctx_idr, ctx->id);
603 mutex_unlock(&mut);
604
605 if (IS_ERR(ctx))
606 return PTR_ERR(ctx);
607
608 mutex_lock(&ctx->file->mut);
609 ctx->destroying = 1;
610 mutex_unlock(&ctx->file->mut);
611
612 flush_workqueue(ctx->file->close_wq);
613 /* At this point it's guaranteed that there is no inflight
614 * closing task */
615 mutex_lock(&mut);
616 if (!ctx->closing) {
617 mutex_unlock(&mut);
618 ucma_put_ctx(ctx);
619 wait_for_completion(&ctx->comp);
620 rdma_destroy_id(ctx->cm_id);
621 } else {
622 mutex_unlock(&mut);
623 }
624
625 resp.events_reported = ucma_free_ctx(ctx);
626 if (copy_to_user((void __user *)(unsigned long)cmd.response,
627 &resp, sizeof(resp)))
628 ret = -EFAULT;
629
630 return ret;
631 }
632
633 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
634 int in_len, int out_len)
635 {
636 struct rdma_ucm_bind_ip cmd;
637 struct ucma_context *ctx;
638 int ret;
639
640 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
641 return -EFAULT;
642
643 if (!rdma_addr_size_in6(&cmd.addr))
644 return -EINVAL;
645
646 ctx = ucma_get_ctx(file, cmd.id);
647 if (IS_ERR(ctx))
648 return PTR_ERR(ctx);
649
650 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
651 ucma_put_ctx(ctx);
652 return ret;
653 }
654
655 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
656 int in_len, int out_len)
657 {
658 struct rdma_ucm_bind cmd;
659 struct ucma_context *ctx;
660 int ret;
661
662 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
663 return -EFAULT;
664
665 if (cmd.reserved || !cmd.addr_size ||
666 cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
667 return -EINVAL;
668
669 ctx = ucma_get_ctx(file, cmd.id);
670 if (IS_ERR(ctx))
671 return PTR_ERR(ctx);
672
673 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
674 ucma_put_ctx(ctx);
675 return ret;
676 }
677
678 static ssize_t ucma_resolve_ip(struct ucma_file *file,
679 const char __user *inbuf,
680 int in_len, int out_len)
681 {
682 struct rdma_ucm_resolve_ip cmd;
683 struct ucma_context *ctx;
684 int ret;
685
686 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
687 return -EFAULT;
688
689 if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
690 !rdma_addr_size_in6(&cmd.dst_addr))
691 return -EINVAL;
692
693 ctx = ucma_get_ctx(file, cmd.id);
694 if (IS_ERR(ctx))
695 return PTR_ERR(ctx);
696
697 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
698 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
699 ucma_put_ctx(ctx);
700 return ret;
701 }
702
703 static ssize_t ucma_resolve_addr(struct ucma_file *file,
704 const char __user *inbuf,
705 int in_len, int out_len)
706 {
707 struct rdma_ucm_resolve_addr cmd;
708 struct ucma_context *ctx;
709 int ret;
710
711 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
712 return -EFAULT;
713
714 if (cmd.reserved ||
715 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
716 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
717 return -EINVAL;
718
719 ctx = ucma_get_ctx(file, cmd.id);
720 if (IS_ERR(ctx))
721 return PTR_ERR(ctx);
722
723 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
724 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
725 ucma_put_ctx(ctx);
726 return ret;
727 }
728
729 static ssize_t ucma_resolve_route(struct ucma_file *file,
730 const char __user *inbuf,
731 int in_len, int out_len)
732 {
733 struct rdma_ucm_resolve_route cmd;
734 struct ucma_context *ctx;
735 int ret;
736
737 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
738 return -EFAULT;
739
740 ctx = ucma_get_ctx_dev(file, cmd.id);
741 if (IS_ERR(ctx))
742 return PTR_ERR(ctx);
743
744 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
745 ucma_put_ctx(ctx);
746 return ret;
747 }
748
749 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
750 struct rdma_route *route)
751 {
752 struct rdma_dev_addr *dev_addr;
753
754 resp->num_paths = route->num_paths;
755 switch (route->num_paths) {
756 case 0:
757 dev_addr = &route->addr.dev_addr;
758 rdma_addr_get_dgid(dev_addr,
759 (union ib_gid *) &resp->ib_route[0].dgid);
760 rdma_addr_get_sgid(dev_addr,
761 (union ib_gid *) &resp->ib_route[0].sgid);
762 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
763 break;
764 case 2:
765 ib_copy_path_rec_to_user(&resp->ib_route[1],
766 &route->path_rec[1]);
767 /* fall through */
768 case 1:
769 ib_copy_path_rec_to_user(&resp->ib_route[0],
770 &route->path_rec[0]);
771 break;
772 default:
773 break;
774 }
775 }
776
777 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
778 struct rdma_route *route)
779 {
780
781 resp->num_paths = route->num_paths;
782 switch (route->num_paths) {
783 case 0:
784 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
785 (union ib_gid *)&resp->ib_route[0].dgid);
786 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
787 (union ib_gid *)&resp->ib_route[0].sgid);
788 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
789 break;
790 case 2:
791 ib_copy_path_rec_to_user(&resp->ib_route[1],
792 &route->path_rec[1]);
793 /* fall through */
794 case 1:
795 ib_copy_path_rec_to_user(&resp->ib_route[0],
796 &route->path_rec[0]);
797 break;
798 default:
799 break;
800 }
801 }
802
803 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
804 struct rdma_route *route)
805 {
806 struct rdma_dev_addr *dev_addr;
807
808 dev_addr = &route->addr.dev_addr;
809 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
810 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
811 }
812
813 static ssize_t ucma_query_route(struct ucma_file *file,
814 const char __user *inbuf,
815 int in_len, int out_len)
816 {
817 struct rdma_ucm_query cmd;
818 struct rdma_ucm_query_route_resp resp;
819 struct ucma_context *ctx;
820 struct sockaddr *addr;
821 int ret = 0;
822
823 if (out_len < sizeof(resp))
824 return -ENOSPC;
825
826 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
827 return -EFAULT;
828
829 ctx = ucma_get_ctx(file, cmd.id);
830 if (IS_ERR(ctx))
831 return PTR_ERR(ctx);
832
833 memset(&resp, 0, sizeof resp);
834 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
835 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
836 sizeof(struct sockaddr_in) :
837 sizeof(struct sockaddr_in6));
838 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
839 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
840 sizeof(struct sockaddr_in) :
841 sizeof(struct sockaddr_in6));
842 if (!ctx->cm_id->device)
843 goto out;
844
845 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
846 resp.port_num = ctx->cm_id->port_num;
847
848 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
849 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
850 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
851 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
852 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
853 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
854
855 out:
856 if (copy_to_user((void __user *)(unsigned long)cmd.response,
857 &resp, sizeof(resp)))
858 ret = -EFAULT;
859
860 ucma_put_ctx(ctx);
861 return ret;
862 }
863
864 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
865 struct rdma_ucm_query_addr_resp *resp)
866 {
867 if (!cm_id->device)
868 return;
869
870 resp->node_guid = (__force __u64) cm_id->device->node_guid;
871 resp->port_num = cm_id->port_num;
872 resp->pkey = (__force __u16) cpu_to_be16(
873 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
874 }
875
876 static ssize_t ucma_query_addr(struct ucma_context *ctx,
877 void __user *response, int out_len)
878 {
879 struct rdma_ucm_query_addr_resp resp;
880 struct sockaddr *addr;
881 int ret = 0;
882
883 if (out_len < sizeof(resp))
884 return -ENOSPC;
885
886 memset(&resp, 0, sizeof resp);
887
888 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
889 resp.src_size = rdma_addr_size(addr);
890 memcpy(&resp.src_addr, addr, resp.src_size);
891
892 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
893 resp.dst_size = rdma_addr_size(addr);
894 memcpy(&resp.dst_addr, addr, resp.dst_size);
895
896 ucma_query_device_addr(ctx->cm_id, &resp);
897
898 if (copy_to_user(response, &resp, sizeof(resp)))
899 ret = -EFAULT;
900
901 return ret;
902 }
903
904 static ssize_t ucma_query_path(struct ucma_context *ctx,
905 void __user *response, int out_len)
906 {
907 struct rdma_ucm_query_path_resp *resp;
908 int i, ret = 0;
909
910 if (out_len < sizeof(*resp))
911 return -ENOSPC;
912
913 resp = kzalloc(out_len, GFP_KERNEL);
914 if (!resp)
915 return -ENOMEM;
916
917 resp->num_paths = ctx->cm_id->route.num_paths;
918 for (i = 0, out_len -= sizeof(*resp);
919 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
920 i++, out_len -= sizeof(struct ib_path_rec_data)) {
921
922 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
923 IB_PATH_BIDIRECTIONAL;
924 ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
925 &resp->path_data[i].path_rec);
926 }
927
928 if (copy_to_user(response, resp,
929 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
930 ret = -EFAULT;
931
932 kfree(resp);
933 return ret;
934 }
935
936 static ssize_t ucma_query_gid(struct ucma_context *ctx,
937 void __user *response, int out_len)
938 {
939 struct rdma_ucm_query_addr_resp resp;
940 struct sockaddr_ib *addr;
941 int ret = 0;
942
943 if (out_len < sizeof(resp))
944 return -ENOSPC;
945
946 memset(&resp, 0, sizeof resp);
947
948 ucma_query_device_addr(ctx->cm_id, &resp);
949
950 addr = (struct sockaddr_ib *) &resp.src_addr;
951 resp.src_size = sizeof(*addr);
952 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
953 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
954 } else {
955 addr->sib_family = AF_IB;
956 addr->sib_pkey = (__force __be16) resp.pkey;
957 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
958 (union ib_gid *) &addr->sib_addr);
959 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
960 &ctx->cm_id->route.addr.src_addr);
961 }
962
963 addr = (struct sockaddr_ib *) &resp.dst_addr;
964 resp.dst_size = sizeof(*addr);
965 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
966 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
967 } else {
968 addr->sib_family = AF_IB;
969 addr->sib_pkey = (__force __be16) resp.pkey;
970 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
971 (union ib_gid *) &addr->sib_addr);
972 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
973 &ctx->cm_id->route.addr.dst_addr);
974 }
975
976 if (copy_to_user(response, &resp, sizeof(resp)))
977 ret = -EFAULT;
978
979 return ret;
980 }
981
982 static ssize_t ucma_query(struct ucma_file *file,
983 const char __user *inbuf,
984 int in_len, int out_len)
985 {
986 struct rdma_ucm_query cmd;
987 struct ucma_context *ctx;
988 void __user *response;
989 int ret;
990
991 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
992 return -EFAULT;
993
994 response = (void __user *)(unsigned long) cmd.response;
995 ctx = ucma_get_ctx(file, cmd.id);
996 if (IS_ERR(ctx))
997 return PTR_ERR(ctx);
998
999 switch (cmd.option) {
1000 case RDMA_USER_CM_QUERY_ADDR:
1001 ret = ucma_query_addr(ctx, response, out_len);
1002 break;
1003 case RDMA_USER_CM_QUERY_PATH:
1004 ret = ucma_query_path(ctx, response, out_len);
1005 break;
1006 case RDMA_USER_CM_QUERY_GID:
1007 ret = ucma_query_gid(ctx, response, out_len);
1008 break;
1009 default:
1010 ret = -ENOSYS;
1011 break;
1012 }
1013
1014 ucma_put_ctx(ctx);
1015 return ret;
1016 }
1017
1018 static void ucma_copy_conn_param(struct rdma_cm_id *id,
1019 struct rdma_conn_param *dst,
1020 struct rdma_ucm_conn_param *src)
1021 {
1022 dst->private_data = src->private_data;
1023 dst->private_data_len = src->private_data_len;
1024 dst->responder_resources =src->responder_resources;
1025 dst->initiator_depth = src->initiator_depth;
1026 dst->flow_control = src->flow_control;
1027 dst->retry_count = src->retry_count;
1028 dst->rnr_retry_count = src->rnr_retry_count;
1029 dst->srq = src->srq;
1030 dst->qp_num = src->qp_num;
1031 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1032 }
1033
1034 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1035 int in_len, int out_len)
1036 {
1037 struct rdma_ucm_connect cmd;
1038 struct rdma_conn_param conn_param;
1039 struct ucma_context *ctx;
1040 int ret;
1041
1042 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1043 return -EFAULT;
1044
1045 if (!cmd.conn_param.valid)
1046 return -EINVAL;
1047
1048 ctx = ucma_get_ctx_dev(file, cmd.id);
1049 if (IS_ERR(ctx))
1050 return PTR_ERR(ctx);
1051
1052 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1053 ret = rdma_connect(ctx->cm_id, &conn_param);
1054 ucma_put_ctx(ctx);
1055 return ret;
1056 }
1057
1058 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1059 int in_len, int out_len)
1060 {
1061 struct rdma_ucm_listen cmd;
1062 struct ucma_context *ctx;
1063 int ret;
1064
1065 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1066 return -EFAULT;
1067
1068 ctx = ucma_get_ctx(file, cmd.id);
1069 if (IS_ERR(ctx))
1070 return PTR_ERR(ctx);
1071
1072 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1073 cmd.backlog : max_backlog;
1074 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1075 ucma_put_ctx(ctx);
1076 return ret;
1077 }
1078
1079 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1080 int in_len, int out_len)
1081 {
1082 struct rdma_ucm_accept cmd;
1083 struct rdma_conn_param conn_param;
1084 struct ucma_context *ctx;
1085 int ret;
1086
1087 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1088 return -EFAULT;
1089
1090 ctx = ucma_get_ctx_dev(file, cmd.id);
1091 if (IS_ERR(ctx))
1092 return PTR_ERR(ctx);
1093
1094 if (cmd.conn_param.valid) {
1095 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1096 mutex_lock(&file->mut);
1097 ret = rdma_accept(ctx->cm_id, &conn_param);
1098 if (!ret)
1099 ctx->uid = cmd.uid;
1100 mutex_unlock(&file->mut);
1101 } else
1102 ret = rdma_accept(ctx->cm_id, NULL);
1103
1104 ucma_put_ctx(ctx);
1105 return ret;
1106 }
1107
1108 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1109 int in_len, int out_len)
1110 {
1111 struct rdma_ucm_reject cmd;
1112 struct ucma_context *ctx;
1113 int ret;
1114
1115 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1116 return -EFAULT;
1117
1118 ctx = ucma_get_ctx_dev(file, cmd.id);
1119 if (IS_ERR(ctx))
1120 return PTR_ERR(ctx);
1121
1122 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1123 ucma_put_ctx(ctx);
1124 return ret;
1125 }
1126
1127 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1128 int in_len, int out_len)
1129 {
1130 struct rdma_ucm_disconnect cmd;
1131 struct ucma_context *ctx;
1132 int ret;
1133
1134 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1135 return -EFAULT;
1136
1137 ctx = ucma_get_ctx_dev(file, cmd.id);
1138 if (IS_ERR(ctx))
1139 return PTR_ERR(ctx);
1140
1141 ret = rdma_disconnect(ctx->cm_id);
1142 ucma_put_ctx(ctx);
1143 return ret;
1144 }
1145
1146 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1147 const char __user *inbuf,
1148 int in_len, int out_len)
1149 {
1150 struct rdma_ucm_init_qp_attr cmd;
1151 struct ib_uverbs_qp_attr resp;
1152 struct ucma_context *ctx;
1153 struct ib_qp_attr qp_attr;
1154 int ret;
1155
1156 if (out_len < sizeof(resp))
1157 return -ENOSPC;
1158
1159 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1160 return -EFAULT;
1161
1162 ctx = ucma_get_ctx_dev(file, cmd.id);
1163 if (IS_ERR(ctx))
1164 return PTR_ERR(ctx);
1165
1166 resp.qp_attr_mask = 0;
1167 memset(&qp_attr, 0, sizeof qp_attr);
1168 qp_attr.qp_state = cmd.qp_state;
1169 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1170 if (ret)
1171 goto out;
1172
1173 ib_copy_qp_attr_to_user(&resp, &qp_attr);
1174 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1175 &resp, sizeof(resp)))
1176 ret = -EFAULT;
1177
1178 out:
1179 ucma_put_ctx(ctx);
1180 return ret;
1181 }
1182
1183 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1184 void *optval, size_t optlen)
1185 {
1186 int ret = 0;
1187
1188 switch (optname) {
1189 case RDMA_OPTION_ID_TOS:
1190 if (optlen != sizeof(u8)) {
1191 ret = -EINVAL;
1192 break;
1193 }
1194 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1195 break;
1196 case RDMA_OPTION_ID_REUSEADDR:
1197 if (optlen != sizeof(int)) {
1198 ret = -EINVAL;
1199 break;
1200 }
1201 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1202 break;
1203 case RDMA_OPTION_ID_AFONLY:
1204 if (optlen != sizeof(int)) {
1205 ret = -EINVAL;
1206 break;
1207 }
1208 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1209 break;
1210 case RDMA_OPTION_ID_ACK_TIMEOUT:
1211 if (optlen != sizeof(u8)) {
1212 ret = -EINVAL;
1213 break;
1214 }
1215 ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval));
1216 break;
1217 default:
1218 ret = -ENOSYS;
1219 }
1220
1221 return ret;
1222 }
1223
1224 static int ucma_set_ib_path(struct ucma_context *ctx,
1225 struct ib_path_rec_data *path_data, size_t optlen)
1226 {
1227 struct ib_sa_path_rec sa_path;
1228 struct rdma_cm_event event;
1229 int ret;
1230
1231 if (optlen % sizeof(*path_data))
1232 return -EINVAL;
1233
1234 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1235 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1236 IB_PATH_BIDIRECTIONAL))
1237 break;
1238 }
1239
1240 if (!optlen)
1241 return -EINVAL;
1242
1243 memset(&sa_path, 0, sizeof(sa_path));
1244
1245 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1246 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1247 if (ret)
1248 return ret;
1249
1250 memset(&event, 0, sizeof event);
1251 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1252 return ucma_event_handler(ctx->cm_id, &event);
1253 }
1254
1255 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1256 void *optval, size_t optlen)
1257 {
1258 int ret;
1259
1260 switch (optname) {
1261 case RDMA_OPTION_IB_PATH:
1262 ret = ucma_set_ib_path(ctx, optval, optlen);
1263 break;
1264 default:
1265 ret = -ENOSYS;
1266 }
1267
1268 return ret;
1269 }
1270
1271 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1272 int optname, void *optval, size_t optlen)
1273 {
1274 int ret;
1275
1276 switch (level) {
1277 case RDMA_OPTION_ID:
1278 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1279 break;
1280 case RDMA_OPTION_IB:
1281 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1282 break;
1283 default:
1284 ret = -ENOSYS;
1285 }
1286
1287 return ret;
1288 }
1289
1290 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1291 int in_len, int out_len)
1292 {
1293 struct rdma_ucm_set_option cmd;
1294 struct ucma_context *ctx;
1295 void *optval;
1296 int ret;
1297
1298 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1299 return -EFAULT;
1300
1301 ctx = ucma_get_ctx(file, cmd.id);
1302 if (IS_ERR(ctx))
1303 return PTR_ERR(ctx);
1304
1305 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1306 cmd.optlen);
1307 if (IS_ERR(optval)) {
1308 ret = PTR_ERR(optval);
1309 goto out;
1310 }
1311
1312 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1313 cmd.optlen);
1314 kfree(optval);
1315
1316 out:
1317 ucma_put_ctx(ctx);
1318 return ret;
1319 }
1320
1321 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1322 int in_len, int out_len)
1323 {
1324 struct rdma_ucm_notify cmd;
1325 struct ucma_context *ctx;
1326 int ret;
1327
1328 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1329 return -EFAULT;
1330
1331 ctx = ucma_get_ctx(file, cmd.id);
1332 if (IS_ERR(ctx))
1333 return PTR_ERR(ctx);
1334
1335 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1336 ucma_put_ctx(ctx);
1337 return ret;
1338 }
1339
1340 static ssize_t ucma_process_join(struct ucma_file *file,
1341 struct rdma_ucm_join_mcast *cmd, int out_len)
1342 {
1343 struct rdma_ucm_create_id_resp resp;
1344 struct ucma_context *ctx;
1345 struct ucma_multicast *mc;
1346 struct sockaddr *addr;
1347 int ret;
1348 u8 join_state;
1349
1350 if (out_len < sizeof(resp))
1351 return -ENOSPC;
1352
1353 addr = (struct sockaddr *) &cmd->addr;
1354 if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
1355 return -EINVAL;
1356
1357 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1358 join_state = BIT(FULLMEMBER_JOIN);
1359 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1360 join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1361 else
1362 return -EINVAL;
1363
1364 ctx = ucma_get_ctx_dev(file, cmd->id);
1365 if (IS_ERR(ctx))
1366 return PTR_ERR(ctx);
1367
1368 mutex_lock(&file->mut);
1369 mc = ucma_alloc_multicast(ctx);
1370 if (!mc) {
1371 ret = -ENOMEM;
1372 goto err1;
1373 }
1374 mc->join_state = join_state;
1375 mc->uid = cmd->uid;
1376 memcpy(&mc->addr, addr, cmd->addr_size);
1377 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1378 join_state, mc);
1379 if (ret)
1380 goto err2;
1381
1382 resp.id = mc->id;
1383 if (copy_to_user((void __user *)(unsigned long) cmd->response,
1384 &resp, sizeof(resp))) {
1385 ret = -EFAULT;
1386 goto err3;
1387 }
1388
1389 mutex_unlock(&file->mut);
1390 ucma_put_ctx(ctx);
1391 return 0;
1392
1393 err3:
1394 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1395 ucma_cleanup_mc_events(mc);
1396 err2:
1397 mutex_lock(&mut);
1398 idr_remove(&multicast_idr, mc->id);
1399 mutex_unlock(&mut);
1400 list_del(&mc->list);
1401 kfree(mc);
1402 err1:
1403 mutex_unlock(&file->mut);
1404 ucma_put_ctx(ctx);
1405 return ret;
1406 }
1407
1408 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1409 const char __user *inbuf,
1410 int in_len, int out_len)
1411 {
1412 struct rdma_ucm_join_ip_mcast cmd;
1413 struct rdma_ucm_join_mcast join_cmd;
1414
1415 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1416 return -EFAULT;
1417
1418 join_cmd.response = cmd.response;
1419 join_cmd.uid = cmd.uid;
1420 join_cmd.id = cmd.id;
1421 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1422 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1423 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1424
1425 return ucma_process_join(file, &join_cmd, out_len);
1426 }
1427
1428 static ssize_t ucma_join_multicast(struct ucma_file *file,
1429 const char __user *inbuf,
1430 int in_len, int out_len)
1431 {
1432 struct rdma_ucm_join_mcast cmd;
1433
1434 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1435 return -EFAULT;
1436
1437 if (!rdma_addr_size_kss(&cmd.addr))
1438 return -EINVAL;
1439
1440 return ucma_process_join(file, &cmd, out_len);
1441 }
1442
1443 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1444 const char __user *inbuf,
1445 int in_len, int out_len)
1446 {
1447 struct rdma_ucm_destroy_id cmd;
1448 struct rdma_ucm_destroy_id_resp resp;
1449 struct ucma_multicast *mc;
1450 int ret = 0;
1451
1452 if (out_len < sizeof(resp))
1453 return -ENOSPC;
1454
1455 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1456 return -EFAULT;
1457
1458 mutex_lock(&mut);
1459 mc = idr_find(&multicast_idr, cmd.id);
1460 if (!mc)
1461 mc = ERR_PTR(-ENOENT);
1462 else if (mc->ctx->file != file)
1463 mc = ERR_PTR(-EINVAL);
1464 else if (!atomic_inc_not_zero(&mc->ctx->ref))
1465 mc = ERR_PTR(-ENXIO);
1466 else
1467 idr_remove(&multicast_idr, mc->id);
1468 mutex_unlock(&mut);
1469
1470 if (IS_ERR(mc)) {
1471 ret = PTR_ERR(mc);
1472 goto out;
1473 }
1474
1475 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1476 mutex_lock(&mc->ctx->file->mut);
1477 ucma_cleanup_mc_events(mc);
1478 list_del(&mc->list);
1479 mutex_unlock(&mc->ctx->file->mut);
1480
1481 ucma_put_ctx(mc->ctx);
1482 resp.events_reported = mc->events_reported;
1483 kfree(mc);
1484
1485 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1486 &resp, sizeof(resp)))
1487 ret = -EFAULT;
1488 out:
1489 return ret;
1490 }
1491
1492 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1493 {
1494 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1495 if (file1 < file2) {
1496 mutex_lock(&file1->mut);
1497 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1498 } else {
1499 mutex_lock(&file2->mut);
1500 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1501 }
1502 }
1503
1504 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1505 {
1506 if (file1 < file2) {
1507 mutex_unlock(&file2->mut);
1508 mutex_unlock(&file1->mut);
1509 } else {
1510 mutex_unlock(&file1->mut);
1511 mutex_unlock(&file2->mut);
1512 }
1513 }
1514
1515 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1516 {
1517 struct ucma_event *uevent, *tmp;
1518
1519 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1520 if (uevent->ctx == ctx)
1521 list_move_tail(&uevent->list, &file->event_list);
1522 }
1523
1524 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1525 const char __user *inbuf,
1526 int in_len, int out_len)
1527 {
1528 struct rdma_ucm_migrate_id cmd;
1529 struct rdma_ucm_migrate_resp resp;
1530 struct ucma_context *ctx;
1531 struct fd f;
1532 struct ucma_file *cur_file;
1533 int ret = 0;
1534
1535 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1536 return -EFAULT;
1537
1538 /* Get current fd to protect against it being closed */
1539 f = fdget(cmd.fd);
1540 if (!f.file)
1541 return -ENOENT;
1542
1543 /* Validate current fd and prevent destruction of id. */
1544 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1545 if (IS_ERR(ctx)) {
1546 ret = PTR_ERR(ctx);
1547 goto file_put;
1548 }
1549
1550 cur_file = ctx->file;
1551 if (cur_file == new_file) {
1552 resp.events_reported = ctx->events_reported;
1553 goto response;
1554 }
1555
1556 /*
1557 * Migrate events between fd's, maintaining order, and avoiding new
1558 * events being added before existing events.
1559 */
1560 ucma_lock_files(cur_file, new_file);
1561 mutex_lock(&mut);
1562
1563 list_move_tail(&ctx->list, &new_file->ctx_list);
1564 ucma_move_events(ctx, new_file);
1565 ctx->file = new_file;
1566 resp.events_reported = ctx->events_reported;
1567
1568 mutex_unlock(&mut);
1569 ucma_unlock_files(cur_file, new_file);
1570
1571 response:
1572 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1573 &resp, sizeof(resp)))
1574 ret = -EFAULT;
1575
1576 ucma_put_ctx(ctx);
1577 file_put:
1578 fdput(f);
1579 return ret;
1580 }
1581
1582 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1583 const char __user *inbuf,
1584 int in_len, int out_len) = {
1585 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1586 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1587 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1588 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1589 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1590 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1591 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1592 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1593 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1594 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1595 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1596 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1597 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1598 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1599 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1600 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1601 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1602 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1603 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1604 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1605 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1606 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1607 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1608 };
1609
1610 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1611 size_t len, loff_t *pos)
1612 {
1613 struct ucma_file *file = filp->private_data;
1614 struct rdma_ucm_cmd_hdr hdr;
1615 ssize_t ret;
1616
1617 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1618 return -EACCES;
1619
1620 if (len < sizeof(hdr))
1621 return -EINVAL;
1622
1623 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1624 return -EFAULT;
1625
1626 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1627 return -EINVAL;
1628
1629 if (hdr.in + sizeof(hdr) > len)
1630 return -EINVAL;
1631
1632 if (!ucma_cmd_table[hdr.cmd])
1633 return -ENOSYS;
1634
1635 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1636 if (!ret)
1637 ret = len;
1638
1639 return ret;
1640 }
1641
1642 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1643 {
1644 struct ucma_file *file = filp->private_data;
1645 unsigned int mask = 0;
1646
1647 poll_wait(filp, &file->poll_wait, wait);
1648
1649 if (!list_empty(&file->event_list))
1650 mask = POLLIN | POLLRDNORM;
1651
1652 return mask;
1653 }
1654
1655 /*
1656 * ucma_open() does not need the BKL:
1657 *
1658 * - no global state is referred to;
1659 * - there is no ioctl method to race against;
1660 * - no further module initialization is required for open to work
1661 * after the device is registered.
1662 */
1663 static int ucma_open(struct inode *inode, struct file *filp)
1664 {
1665 struct ucma_file *file;
1666
1667 file = kmalloc(sizeof *file, GFP_KERNEL);
1668 if (!file)
1669 return -ENOMEM;
1670
1671 file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1672 WQ_MEM_RECLAIM);
1673 if (!file->close_wq) {
1674 kfree(file);
1675 return -ENOMEM;
1676 }
1677
1678 INIT_LIST_HEAD(&file->event_list);
1679 INIT_LIST_HEAD(&file->ctx_list);
1680 init_waitqueue_head(&file->poll_wait);
1681 mutex_init(&file->mut);
1682
1683 filp->private_data = file;
1684 file->filp = filp;
1685
1686 return nonseekable_open(inode, filp);
1687 }
1688
1689 static int ucma_close(struct inode *inode, struct file *filp)
1690 {
1691 struct ucma_file *file = filp->private_data;
1692 struct ucma_context *ctx, *tmp;
1693
1694 mutex_lock(&file->mut);
1695 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1696 ctx->destroying = 1;
1697 mutex_unlock(&file->mut);
1698
1699 mutex_lock(&mut);
1700 idr_remove(&ctx_idr, ctx->id);
1701 mutex_unlock(&mut);
1702
1703 flush_workqueue(file->close_wq);
1704 /* At that step once ctx was marked as destroying and workqueue
1705 * was flushed we are safe from any inflights handlers that
1706 * might put other closing task.
1707 */
1708 mutex_lock(&mut);
1709 if (!ctx->closing) {
1710 mutex_unlock(&mut);
1711 ucma_put_ctx(ctx);
1712 wait_for_completion(&ctx->comp);
1713 /* rdma_destroy_id ensures that no event handlers are
1714 * inflight for that id before releasing it.
1715 */
1716 rdma_destroy_id(ctx->cm_id);
1717 } else {
1718 mutex_unlock(&mut);
1719 }
1720
1721 ucma_free_ctx(ctx);
1722 mutex_lock(&file->mut);
1723 }
1724 mutex_unlock(&file->mut);
1725 destroy_workqueue(file->close_wq);
1726 kfree(file);
1727 return 0;
1728 }
1729
1730 static long
1731 ucma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1732 {
1733
1734 switch (cmd) {
1735 case FIONBIO:
1736 case FIOASYNC:
1737 return (0);
1738 default:
1739 return (-ENOTTY);
1740 }
1741 }
1742
1743 static const struct file_operations ucma_fops = {
1744 .owner = THIS_MODULE,
1745 .open = ucma_open,
1746 .release = ucma_close,
1747 .write = ucma_write,
1748 .unlocked_ioctl = ucma_ioctl,
1749 .poll = ucma_poll,
1750 .llseek = no_llseek,
1751 };
1752
1753 static struct miscdevice ucma_misc = {
1754 .minor = MISC_DYNAMIC_MINOR,
1755 .name = "rdma_cm",
1756 .nodename = "infiniband/rdma_cm",
1757 .mode = 0666,
1758 .fops = &ucma_fops,
1759 };
1760
1761 static ssize_t show_abi_version(struct device *dev,
1762 struct device_attribute *attr,
1763 char *buf)
1764 {
1765 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1766 }
1767 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1768
1769 static int __init ucma_init(void)
1770 {
1771 int ret;
1772
1773 ret = misc_register(&ucma_misc);
1774 if (ret)
1775 return ret;
1776
1777 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1778 if (ret) {
1779 pr_err("rdma_ucm: couldn't create abi_version attr\n");
1780 goto err1;
1781 }
1782
1783 return 0;
1784 err1:
1785 misc_deregister(&ucma_misc);
1786 return ret;
1787 }
1788
1789 static void __exit ucma_cleanup(void)
1790 {
1791 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1792 misc_deregister(&ucma_misc);
1793 idr_destroy(&ctx_idr);
1794 idr_destroy(&multicast_idr);
1795 }
1796
1797 module_init_order(ucma_init, SI_ORDER_FIFTH);
1798 module_exit_order(ucma_cleanup, SI_ORDER_FIFTH);
Cache object: 9d33694880fbb9e5b31fc38767a45455
|