1 /*
2 * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <asm/atomic.h>
34 #include <linux/file.h>
35 #include <linux/lockdep.h>
36 #include <rdma/ib_verbs.h>
37 #include <rdma/uverbs_types.h>
38 #include <linux/rcupdate.h>
39 #include <rdma/uverbs_ioctl.h>
40 #include <rdma/rdma_user_ioctl.h>
41 #include "uverbs.h"
42 #include "core_priv.h"
43 #include "rdma_core.h"
44
45 static void uverbs_uobject_free(struct kref *ref)
46 {
47 kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu);
48 }
49
50 /*
51 * In order to indicate we no longer needs this uobject, uverbs_uobject_put
52 * is called. When the reference count is decreased, the uobject is freed.
53 * For example, this is used when attaching a completion channel to a CQ.
54 */
55 void uverbs_uobject_put(struct ib_uobject *uobject)
56 {
57 kref_put(&uobject->ref, uverbs_uobject_free);
58 }
59 EXPORT_SYMBOL(uverbs_uobject_put);
60
61 static int uverbs_try_lock_object(struct ib_uobject *uobj,
62 enum rdma_lookup_mode mode)
63 {
64 /*
65 * When a shared access is required, we use a positive counter. Each
66 * shared access request checks that the value != -1 and increment it.
67 * Exclusive access is required for operations like write or destroy.
68 * In exclusive access mode, we check that the counter is zero (nobody
69 * claimed this object) and we set it to -1. Releasing a shared access
70 * lock is done simply by decreasing the counter. As for exclusive
71 * access locks, since only a single one of them is allowed
72 * concurrently, setting the counter to zero is enough for releasing
73 * this lock.
74 */
75 switch (mode) {
76 case UVERBS_LOOKUP_READ:
77 return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
78 -EBUSY : 0;
79 case UVERBS_LOOKUP_WRITE:
80 /* lock is exclusive */
81 return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
82 case UVERBS_LOOKUP_DESTROY:
83 return 0;
84 }
85 return 0;
86 }
87
88 static void assert_uverbs_usecnt(struct ib_uobject *uobj,
89 enum rdma_lookup_mode mode)
90 {
91 #ifdef CONFIG_LOCKDEP
92 switch (mode) {
93 case UVERBS_LOOKUP_READ:
94 WARN_ON(atomic_read(&uobj->usecnt) <= 0);
95 break;
96 case UVERBS_LOOKUP_WRITE:
97 WARN_ON(atomic_read(&uobj->usecnt) != -1);
98 break;
99 case UVERBS_LOOKUP_DESTROY:
100 break;
101 }
102 #endif
103 }
104
105 /*
106 * This must be called with the hw_destroy_rwsem locked for read or write,
107 * also the uobject itself must be locked for write.
108 *
109 * Upon return the HW object is guaranteed to be destroyed.
110 *
111 * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
112 * however the type's allocat_commit function cannot have been called and the
113 * uobject cannot be on the uobjects_lists
114 *
115 * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via
116 * rdma_lookup_get_uobject) and the object is left in a state where the caller
117 * needs to call rdma_lookup_put_uobject.
118 *
119 * For all other destroy modes this function internally unlocks the uobject
120 * and consumes the kref on the uobj.
121 */
122 static int uverbs_destroy_uobject(struct ib_uobject *uobj,
123 enum rdma_remove_reason reason,
124 struct uverbs_attr_bundle *attrs)
125 {
126 struct ib_uverbs_file *ufile = attrs->ufile;
127 unsigned long flags;
128 int ret;
129
130 lockdep_assert_held(&ufile->hw_destroy_rwsem);
131 assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
132
133 if (reason == RDMA_REMOVE_ABORT) {
134 WARN_ON(!list_empty(&uobj->list));
135 WARN_ON(!uobj->context);
136 uobj->uapi_object->type_class->alloc_abort(uobj);
137 } else if (uobj->object) {
138 ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
139 attrs);
140 if (ret) {
141 if (ib_is_destroy_retryable(ret, reason, uobj))
142 return ret;
143
144 /* Nothing to be done, dangle the memory and move on */
145 WARN(true,
146 "ib_uverbs: failed to remove uobject id %d, driver err=%d",
147 uobj->id, ret);
148 }
149
150 uobj->object = NULL;
151 }
152
153 uobj->context = NULL;
154
155 /*
156 * For DESTROY the usecnt is held write locked, the caller is expected
157 * to put it unlock and put the object when done with it. Only DESTROY
158 * can remove the IDR handle.
159 */
160 if (reason != RDMA_REMOVE_DESTROY)
161 atomic_set(&uobj->usecnt, 0);
162 else
163 uobj->uapi_object->type_class->remove_handle(uobj);
164
165 if (!list_empty(&uobj->list)) {
166 spin_lock_irqsave(&ufile->uobjects_lock, flags);
167 list_del_init(&uobj->list);
168 spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
169
170 /*
171 * Pairs with the get in rdma_alloc_commit_uobject(), could
172 * destroy uobj.
173 */
174 uverbs_uobject_put(uobj);
175 }
176
177 /*
178 * When aborting the stack kref remains owned by the core code, and is
179 * not transferred into the type. Pairs with the get in alloc_uobj
180 */
181 if (reason == RDMA_REMOVE_ABORT)
182 uverbs_uobject_put(uobj);
183
184 return 0;
185 }
186
187 /*
188 * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
189 * sequence. It should only be used from command callbacks. On success the
190 * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
191 * version requires the caller to have already obtained an
192 * LOOKUP_DESTROY uobject kref.
193 */
194 int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
195 {
196 struct ib_uverbs_file *ufile = attrs->ufile;
197 int ret;
198
199 down_read(&ufile->hw_destroy_rwsem);
200
201 ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
202 if (ret)
203 goto out_unlock;
204
205 ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs);
206 if (ret) {
207 atomic_set(&uobj->usecnt, 0);
208 goto out_unlock;
209 }
210
211 out_unlock:
212 up_read(&ufile->hw_destroy_rwsem);
213 return ret;
214 }
215
216 /*
217 * uobj_get_destroy destroys the HW object and returns a handle to the uobj
218 * with a NULL object pointer. The caller must pair this with
219 * uverbs_put_destroy.
220 */
221 struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
222 u32 id, struct uverbs_attr_bundle *attrs)
223 {
224 struct ib_uobject *uobj;
225 int ret;
226
227 uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id,
228 UVERBS_LOOKUP_DESTROY, attrs);
229 if (IS_ERR(uobj))
230 return uobj;
231
232 ret = uobj_destroy(uobj, attrs);
233 if (ret) {
234 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
235 return ERR_PTR(ret);
236 }
237
238 return uobj;
239 }
240
241 /*
242 * Does both uobj_get_destroy() and uobj_put_destroy(). Returns 0 on success
243 * (negative errno on failure). For use by callers that do not need the uobj.
244 */
245 int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
246 struct uverbs_attr_bundle *attrs)
247 {
248 struct ib_uobject *uobj;
249
250 uobj = __uobj_get_destroy(obj, id, attrs);
251 if (IS_ERR(uobj))
252 return PTR_ERR(uobj);
253
254 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
255 return 0;
256 }
257
258 /* alloc_uobj must be undone by uverbs_destroy_uobject() */
259 static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs,
260 const struct uverbs_api_object *obj)
261 {
262 struct ib_uverbs_file *ufile = attrs->ufile;
263 struct ib_uobject *uobj;
264
265 if (!attrs->context) {
266 struct ib_ucontext *ucontext =
267 ib_uverbs_get_ucontext_file(ufile);
268
269 if (IS_ERR(ucontext))
270 return ERR_CAST(ucontext);
271 attrs->context = ucontext;
272 }
273
274 uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL);
275 if (!uobj)
276 return ERR_PTR(-ENOMEM);
277 /*
278 * user_handle should be filled by the handler,
279 * The object is added to the list in the commit stage.
280 */
281 uobj->ufile = ufile;
282 uobj->context = attrs->context;
283 INIT_LIST_HEAD(&uobj->list);
284 uobj->uapi_object = obj;
285 /*
286 * Allocated objects start out as write locked to deny any other
287 * syscalls from accessing them until they are committed. See
288 * rdma_alloc_commit_uobject
289 */
290 atomic_set(&uobj->usecnt, -1);
291 kref_init(&uobj->ref);
292
293 return uobj;
294 }
295
296 #define NULL_IB_UOBJECT ((struct ib_uobject *)1)
297
298 static int idr_add_uobj(struct ib_uobject *uobj)
299 {
300 /*
301 * We start with allocating an idr pointing to NULL. This represents an
302 * object which isn't initialized yet. We'll replace it later on with
303 * the real object once we commit.
304 */
305 return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL_IB_UOBJECT, xa_limit_32b,
306 GFP_KERNEL);
307 }
308
309 /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
310 static struct ib_uobject *
311 lookup_get_idr_uobject(const struct uverbs_api_object *obj,
312 struct ib_uverbs_file *ufile, s64 id,
313 enum rdma_lookup_mode mode)
314 {
315 struct ib_uobject *uobj;
316
317 if (id < 0 || id > ULONG_MAX)
318 return ERR_PTR(-EINVAL);
319
320 rcu_read_lock();
321 /*
322 * The idr_find is guaranteed to return a pointer to something that
323 * isn't freed yet, or NULL, as the free after idr_remove goes through
324 * kfree_rcu(). However the object may still have been released and
325 * kfree() could be called at any time.
326 */
327 uobj = xa_load(&ufile->idr, id);
328 if (!uobj || uobj == NULL_IB_UOBJECT || !kref_get_unless_zero(&uobj->ref))
329 uobj = ERR_PTR(-ENOENT);
330 rcu_read_unlock();
331 return uobj;
332 }
333
334 static struct ib_uobject *
335 lookup_get_fd_uobject(const struct uverbs_api_object *obj,
336 struct ib_uverbs_file *ufile, s64 id,
337 enum rdma_lookup_mode mode)
338 {
339 const struct uverbs_obj_fd_type *fd_type;
340 struct file *f;
341 struct ib_uobject *uobject;
342 int fdno = id;
343
344 if (fdno != id)
345 return ERR_PTR(-EINVAL);
346
347 if (mode != UVERBS_LOOKUP_READ)
348 return ERR_PTR(-EOPNOTSUPP);
349
350 if (!obj->type_attrs)
351 return ERR_PTR(-EIO);
352 fd_type =
353 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
354
355 f = fget(fdno);
356 if (!f)
357 return ERR_PTR(-EBADF);
358
359 uobject = f->private_data;
360 /*
361 * fget(id) ensures we are not currently running
362 * uverbs_uobject_fd_release(), and the caller is expected to ensure
363 * that release is never done while a call to lookup is possible.
364 */
365 if (f->f_op != fd_type->fops) {
366 fput(f);
367 return ERR_PTR(-EBADF);
368 }
369
370 uverbs_uobject_get(uobject);
371 return uobject;
372 }
373
374 struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
375 struct ib_uverbs_file *ufile, s64 id,
376 enum rdma_lookup_mode mode,
377 struct uverbs_attr_bundle *attrs)
378 {
379 struct ib_uobject *uobj;
380 int ret;
381
382 if (obj == ERR_PTR(-ENOMSG)) {
383 /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */
384 uobj = lookup_get_idr_uobject(NULL, ufile, id, mode);
385 if (IS_ERR(uobj))
386 return uobj;
387 } else {
388 if (IS_ERR(obj))
389 return ERR_PTR(-EINVAL);
390
391 uobj = obj->type_class->lookup_get(obj, ufile, id, mode);
392 if (IS_ERR(uobj))
393 return uobj;
394
395 if (uobj->uapi_object != obj) {
396 ret = -EINVAL;
397 goto free;
398 }
399 }
400
401 /*
402 * If we have been disassociated block every command except for
403 * DESTROY based commands.
404 */
405 if (mode != UVERBS_LOOKUP_DESTROY &&
406 !srcu_dereference(ufile->device->ib_dev,
407 &ufile->device->disassociate_srcu)) {
408 ret = -EIO;
409 goto free;
410 }
411
412 ret = uverbs_try_lock_object(uobj, mode);
413 if (ret)
414 goto free;
415 if (attrs)
416 attrs->context = uobj->context;
417
418 return uobj;
419 free:
420 uobj->uapi_object->type_class->lookup_put(uobj, mode);
421 uverbs_uobject_put(uobj);
422 return ERR_PTR(ret);
423 }
424
425 static struct ib_uobject *
426 alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
427 struct uverbs_attr_bundle *attrs)
428 {
429 int ret;
430 struct ib_uobject *uobj;
431
432 uobj = alloc_uobj(attrs, obj);
433 if (IS_ERR(uobj))
434 return uobj;
435
436 ret = idr_add_uobj(uobj);
437 if (ret)
438 goto uobj_put;
439
440 ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device,
441 RDMACG_RESOURCE_HCA_OBJECT);
442 if (ret)
443 goto remove;
444
445 return uobj;
446
447 remove:
448 xa_erase(&attrs->ufile->idr, uobj->id);
449 uobj_put:
450 uverbs_uobject_put(uobj);
451 return ERR_PTR(ret);
452 }
453
454 static struct ib_uobject *
455 alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
456 struct uverbs_attr_bundle *attrs)
457 {
458 const struct uverbs_obj_fd_type *fd_type =
459 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
460 int new_fd;
461 struct ib_uobject *uobj;
462 struct file *filp;
463
464 if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release))
465 return ERR_PTR(-EINVAL);
466
467 new_fd = get_unused_fd_flags(O_CLOEXEC);
468 if (new_fd < 0)
469 return ERR_PTR(new_fd);
470
471 uobj = alloc_uobj(attrs, obj);
472 if (IS_ERR(uobj))
473 goto err_fd;
474
475 /* Note that uverbs_uobject_fd_release() is called during abort */
476 filp = alloc_file(fd_type->flags, fd_type->fops);
477 if (IS_ERR(filp)) {
478 uobj = ERR_CAST(filp);
479 goto err_uobj;
480 }
481 uobj->object = filp;
482
483 uobj->id = new_fd;
484 return uobj;
485
486 err_uobj:
487 uverbs_uobject_put(uobj);
488 err_fd:
489 put_unused_fd(new_fd);
490 return uobj;
491 }
492
493 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
494 struct uverbs_attr_bundle *attrs)
495 {
496 struct ib_uverbs_file *ufile = attrs->ufile;
497 struct ib_uobject *ret;
498
499 if (IS_ERR(obj))
500 return ERR_PTR(-EINVAL);
501
502 /*
503 * The hw_destroy_rwsem is held across the entire object creation and
504 * released during rdma_alloc_commit_uobject or
505 * rdma_alloc_abort_uobject
506 */
507 if (!down_read_trylock(&ufile->hw_destroy_rwsem))
508 return ERR_PTR(-EIO);
509
510 ret = obj->type_class->alloc_begin(obj, attrs);
511 if (IS_ERR(ret)) {
512 up_read(&ufile->hw_destroy_rwsem);
513 return ret;
514 }
515 return ret;
516 }
517
518 static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
519 {
520 xa_erase(&uobj->ufile->idr, uobj->id);
521 }
522
523 static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
524 enum rdma_remove_reason why,
525 struct uverbs_attr_bundle *attrs)
526 {
527 const struct uverbs_obj_idr_type *idr_type =
528 container_of(uobj->uapi_object->type_attrs,
529 struct uverbs_obj_idr_type, type);
530 int ret = idr_type->destroy_object(uobj, why, attrs);
531
532 /*
533 * We can only fail gracefully if the user requested to destroy the
534 * object or when a retry may be called upon an error.
535 * In the rest of the cases, just remove whatever you can.
536 */
537 if (ib_is_destroy_retryable(ret, why, uobj))
538 return ret;
539
540 if (why == RDMA_REMOVE_ABORT)
541 return 0;
542
543 return 0;
544 }
545
546 static void remove_handle_idr_uobject(struct ib_uobject *uobj)
547 {
548 xa_erase(&uobj->ufile->idr, uobj->id);
549 /* Matches the kref in alloc_commit_idr_uobject */
550 uverbs_uobject_put(uobj);
551 }
552
553 static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
554 {
555 struct file *filp = uobj->object;
556
557 fput(filp);
558 put_unused_fd(uobj->id);
559 }
560
561 static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
562 enum rdma_remove_reason why,
563 struct uverbs_attr_bundle *attrs)
564 {
565 const struct uverbs_obj_fd_type *fd_type = container_of(
566 uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
567 int ret = fd_type->destroy_object(uobj, why);
568
569 if (ib_is_destroy_retryable(ret, why, uobj))
570 return ret;
571
572 return 0;
573 }
574
575 static void remove_handle_fd_uobject(struct ib_uobject *uobj)
576 {
577 }
578
579 static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
580 {
581 struct ib_uverbs_file *ufile = uobj->ufile;
582 void *old;
583
584 /*
585 * We already allocated this IDR with a NULL object, so
586 * this shouldn't fail.
587 *
588 * NOTE: Storing the uobj transfers our kref on uobj to the XArray.
589 * It will be put by remove_commit_idr_uobject()
590 */
591 old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL);
592 WARN_ON(old != NULL_IB_UOBJECT);
593 }
594
595 static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
596 {
597 int fd = uobj->id;
598 struct file *filp = uobj->object;
599
600 /* Matching put will be done in uverbs_uobject_fd_release() */
601 kref_get(&uobj->ufile->ref);
602
603 /* This shouldn't be used anymore. Use the file object instead */
604 uobj->id = 0;
605
606 /*
607 * NOTE: Once we install the file we loose ownership of our kref on
608 * uobj. It will be put by uverbs_uobject_fd_release()
609 */
610 filp->private_data = uobj;
611 fd_install(fd, filp);
612 }
613
614 /*
615 * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the
616 * caller can no longer assume uobj is valid. If this function fails it
617 * destroys the uboject, including the attached HW object.
618 */
619 void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
620 struct uverbs_attr_bundle *attrs)
621 {
622 struct ib_uverbs_file *ufile = attrs->ufile;
623
624 /* alloc_commit consumes the uobj kref */
625 uobj->uapi_object->type_class->alloc_commit(uobj);
626
627 /* kref is held so long as the uobj is on the uobj list. */
628 uverbs_uobject_get(uobj);
629 spin_lock_irq(&ufile->uobjects_lock);
630 list_add(&uobj->list, &ufile->uobjects);
631 spin_unlock_irq(&ufile->uobjects_lock);
632
633 /* matches atomic_set(-1) in alloc_uobj */
634 atomic_set(&uobj->usecnt, 0);
635
636 /* Matches the down_read in rdma_alloc_begin_uobject */
637 up_read(&ufile->hw_destroy_rwsem);
638 }
639
640 /*
641 * This consumes the kref for uobj. It is up to the caller to unwind the HW
642 * object and anything else connected to uobj before calling this.
643 */
644 void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
645 struct uverbs_attr_bundle *attrs)
646 {
647 struct ib_uverbs_file *ufile = uobj->ufile;
648
649 uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
650
651 /* Matches the down_read in rdma_alloc_begin_uobject */
652 up_read(&ufile->hw_destroy_rwsem);
653 }
654
655 static void lookup_put_idr_uobject(struct ib_uobject *uobj,
656 enum rdma_lookup_mode mode)
657 {
658 }
659
660 static void lookup_put_fd_uobject(struct ib_uobject *uobj,
661 enum rdma_lookup_mode mode)
662 {
663 struct file *filp = uobj->object;
664
665 WARN_ON(mode != UVERBS_LOOKUP_READ);
666 /*
667 * This indirectly calls uverbs_uobject_fd_release() and free the
668 * object
669 */
670 fput(filp);
671 }
672
673 void rdma_lookup_put_uobject(struct ib_uobject *uobj,
674 enum rdma_lookup_mode mode)
675 {
676 assert_uverbs_usecnt(uobj, mode);
677 uobj->uapi_object->type_class->lookup_put(uobj, mode);
678 /*
679 * In order to unlock an object, either decrease its usecnt for
680 * read access or zero it in case of exclusive access. See
681 * uverbs_try_lock_object for locking schema information.
682 */
683 switch (mode) {
684 case UVERBS_LOOKUP_READ:
685 atomic_dec(&uobj->usecnt);
686 break;
687 case UVERBS_LOOKUP_WRITE:
688 atomic_set(&uobj->usecnt, 0);
689 break;
690 case UVERBS_LOOKUP_DESTROY:
691 break;
692 }
693
694 /* Pairs with the kref obtained by type->lookup_get */
695 uverbs_uobject_put(uobj);
696 }
697
698 void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile)
699 {
700 xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC);
701 }
702
703 void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
704 {
705 struct ib_uobject *entry;
706 unsigned long id;
707
708 /*
709 * At this point uverbs_cleanup_ufile() is guaranteed to have run, and
710 * there are no HW objects left, however the xarray is still populated
711 * with anything that has not been cleaned up by userspace. Since the
712 * kref on ufile is 0, nothing is allowed to call lookup_get.
713 *
714 * This is an optimized equivalent to remove_handle_idr_uobject
715 */
716 xa_for_each(&ufile->idr, id, entry) {
717 WARN_ON(entry->object);
718 uverbs_uobject_put(entry);
719 }
720
721 xa_destroy(&ufile->idr);
722 }
723
724 const struct uverbs_obj_type_class uverbs_idr_class = {
725 .alloc_begin = alloc_begin_idr_uobject,
726 .lookup_get = lookup_get_idr_uobject,
727 .alloc_commit = alloc_commit_idr_uobject,
728 .alloc_abort = alloc_abort_idr_uobject,
729 .lookup_put = lookup_put_idr_uobject,
730 .destroy_hw = destroy_hw_idr_uobject,
731 .remove_handle = remove_handle_idr_uobject,
732 };
733 EXPORT_SYMBOL(uverbs_idr_class);
734
735 /*
736 * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct
737 * file_operations release method.
738 */
739 int uverbs_uobject_fd_release(struct inode *inode, struct file *filp)
740 {
741 struct ib_uverbs_file *ufile;
742 struct ib_uobject *uobj;
743
744 /*
745 * This can only happen if the fput came from alloc_abort_fd_uobject()
746 */
747 if (!filp->private_data)
748 return 0;
749 uobj = filp->private_data;
750 ufile = uobj->ufile;
751
752 if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
753 struct uverbs_attr_bundle attrs = {
754 .context = uobj->context,
755 .ufile = ufile,
756 };
757
758 /*
759 * lookup_get_fd_uobject holds the kref on the struct file any
760 * time a FD uobj is locked, which prevents this release
761 * method from being invoked. Meaning we can always get the
762 * write lock here, or we have a kernel bug.
763 */
764 WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE));
765 uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs);
766 up_read(&ufile->hw_destroy_rwsem);
767 }
768
769 /* Matches the get in alloc_commit_fd_uobject() */
770 kref_put(&ufile->ref, ib_uverbs_release_file);
771
772 /* Pairs with filp->private_data in alloc_begin_fd_uobject */
773 uverbs_uobject_put(uobj);
774 return 0;
775 }
776 EXPORT_SYMBOL(uverbs_uobject_fd_release);
777
778 /*
779 * Drop the ucontext off the ufile and completely disconnect it from the
780 * ib_device
781 */
782 static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
783 enum rdma_remove_reason reason)
784 {
785 struct ib_ucontext *ucontext = ufile->ucontext;
786 struct ib_device *ib_dev = ucontext->device;
787
788 /*
789 * If we are closing the FD then the user mmap VMAs must have
790 * already been destroyed as they hold on to the filep, otherwise
791 * they need to be zap'd.
792 */
793 if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
794 uverbs_user_mmap_disassociate(ufile);
795 if (ib_dev->disassociate_ucontext)
796 ib_dev->disassociate_ucontext(ucontext);
797 }
798
799 ib_dev->dealloc_ucontext(ucontext);
800 WARN_ON(!xa_empty(&ucontext->mmap_xa));
801 kfree(ucontext);
802
803 ufile->ucontext = NULL;
804 }
805
806 static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
807 enum rdma_remove_reason reason)
808 {
809 struct ib_uobject *obj, *next_obj;
810 int ret = -EINVAL;
811 struct uverbs_attr_bundle attrs = { .ufile = ufile };
812
813 /*
814 * This shouldn't run while executing other commands on this
815 * context. Thus, the only thing we should take care of is
816 * releasing a FD while traversing this list. The FD could be
817 * closed and released from the _release fop of this FD.
818 * In order to mitigate this, we add a lock.
819 * We take and release the lock per traversal in order to let
820 * other threads (which might still use the FDs) chance to run.
821 */
822 list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
823 attrs.context = obj->context;
824 /*
825 * if we hit this WARN_ON, that means we are
826 * racing with a lookup_get.
827 */
828 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
829 if (!uverbs_destroy_uobject(obj, reason, &attrs))
830 ret = 0;
831 else
832 atomic_set(&obj->usecnt, 0);
833 }
834 return ret;
835 }
836
837 /*
838 * Destroy the uncontext and every uobject associated with it.
839 *
840 * This is internally locked and can be called in parallel from multiple
841 * contexts.
842 */
843 void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
844 enum rdma_remove_reason reason)
845 {
846 down_write(&ufile->hw_destroy_rwsem);
847
848 /*
849 * If a ucontext was never created then we can't have any uobjects to
850 * cleanup, nothing to do.
851 */
852 if (!ufile->ucontext)
853 goto done;
854
855 ufile->ucontext->closing = true;
856 ufile->ucontext->cleanup_retryable = true;
857 while (!list_empty(&ufile->uobjects))
858 if (__uverbs_cleanup_ufile(ufile, reason)) {
859 /*
860 * No entry was cleaned-up successfully during this
861 * iteration
862 */
863 break;
864 }
865
866 ufile->ucontext->cleanup_retryable = false;
867 if (!list_empty(&ufile->uobjects))
868 __uverbs_cleanup_ufile(ufile, reason);
869
870 ufile_destroy_ucontext(ufile, reason);
871
872 done:
873 up_write(&ufile->hw_destroy_rwsem);
874 }
875
876 const struct uverbs_obj_type_class uverbs_fd_class = {
877 .alloc_begin = alloc_begin_fd_uobject,
878 .lookup_get = lookup_get_fd_uobject,
879 .alloc_commit = alloc_commit_fd_uobject,
880 .alloc_abort = alloc_abort_fd_uobject,
881 .lookup_put = lookup_put_fd_uobject,
882 .destroy_hw = destroy_hw_fd_uobject,
883 .remove_handle = remove_handle_fd_uobject,
884 };
885 EXPORT_SYMBOL(uverbs_fd_class);
886
887 struct ib_uobject *
888 uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
889 s64 id, struct uverbs_attr_bundle *attrs)
890 {
891 const struct uverbs_api_object *obj =
892 uapi_get_object(attrs->ufile->device->uapi, object_id);
893
894 switch (access) {
895 case UVERBS_ACCESS_READ:
896 return rdma_lookup_get_uobject(obj, attrs->ufile, id,
897 UVERBS_LOOKUP_READ, attrs);
898 case UVERBS_ACCESS_DESTROY:
899 /* Actual destruction is done inside uverbs_handle_method */
900 return rdma_lookup_get_uobject(obj, attrs->ufile, id,
901 UVERBS_LOOKUP_DESTROY, attrs);
902 case UVERBS_ACCESS_WRITE:
903 return rdma_lookup_get_uobject(obj, attrs->ufile, id,
904 UVERBS_LOOKUP_WRITE, attrs);
905 case UVERBS_ACCESS_NEW:
906 return rdma_alloc_begin_uobject(obj, attrs);
907 default:
908 WARN_ON(true);
909 return ERR_PTR(-EOPNOTSUPP);
910 }
911 }
912
913 void uverbs_finalize_object(struct ib_uobject *uobj,
914 enum uverbs_obj_access access, bool commit,
915 struct uverbs_attr_bundle *attrs)
916 {
917 /*
918 * refcounts should be handled at the object level and not at the
919 * uobject level. Refcounts of the objects themselves are done in
920 * handlers.
921 */
922
923 switch (access) {
924 case UVERBS_ACCESS_READ:
925 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
926 break;
927 case UVERBS_ACCESS_WRITE:
928 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
929 break;
930 case UVERBS_ACCESS_DESTROY:
931 if (uobj)
932 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
933 break;
934 case UVERBS_ACCESS_NEW:
935 if (commit)
936 rdma_alloc_commit_uobject(uobj, attrs);
937 else
938 rdma_alloc_abort_uobject(uobj, attrs);
939 break;
940 default:
941 WARN_ON(true);
942 }
943 }
Cache object: f2d83d4efb77258fe2df55b3e61694aa
|