The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ofed/drivers/infiniband/core/ib_uverbs_cmd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
    3  *
    4  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
    5  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
    6  * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
    7  * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
    8  *
    9  * This software is available to you under a choice of one of two
   10  * licenses.  You may choose to be licensed under the terms of the GNU
   11  * General Public License (GPL) Version 2, available from the file
   12  * COPYING in the main directory of this source tree, or the
   13  * OpenIB.org BSD license below:
   14  *
   15  *     Redistribution and use in source and binary forms, with or
   16  *     without modification, are permitted provided that the following
   17  *     conditions are met:
   18  *
   19  *      - Redistributions of source code must retain the above
   20  *        copyright notice, this list of conditions and the following
   21  *        disclaimer.
   22  *
   23  *      - Redistributions in binary form must reproduce the above
   24  *        copyright notice, this list of conditions and the following
   25  *        disclaimer in the documentation and/or other materials
   26  *        provided with the distribution.
   27  *
   28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   35  * SOFTWARE.
   36  */
   37 
   38 #include <sys/cdefs.h>
   39 __FBSDID("$FreeBSD$");
   40 
   41 #define LINUXKPI_PARAM_PREFIX ibcore_
   42 
   43 #include <sys/priv.h>
   44 
   45 #include <linux/file.h>
   46 #include <linux/fs.h>
   47 #include <linux/slab.h>
   48 #include <linux/sched.h>
   49 
   50 #include <linux/uaccess.h>
   51 
   52 #include <rdma/uverbs_types.h>
   53 #include <rdma/uverbs_std_types.h>
   54 #include "rdma_core.h"
   55 
   56 #include "uverbs.h"
   57 #include "core_priv.h"
   58 
   59 /*
   60  * Copy a response to userspace. If the provided 'resp' is larger than the
   61  * user buffer it is silently truncated. If the user provided a larger buffer
   62  * then the trailing portion is zero filled.
   63  *
   64  * These semantics are intended to support future extension of the output
   65  * structures.
   66  */
   67 static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
   68                            size_t resp_len)
   69 {
   70         int ret;
   71 
   72         if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
   73                 return uverbs_copy_to_struct_or_zero(
   74                         attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
   75 
   76         if (copy_to_user(attrs->ucore.outbuf, resp,
   77                          min(attrs->ucore.outlen, resp_len)))
   78                 return -EFAULT;
   79 
   80         if (resp_len < attrs->ucore.outlen) {
   81                 /*
   82                  * Zero fill any extra memory that user
   83                  * space might have provided.
   84                  */
   85                 ret = clear_user(attrs->ucore.outbuf + resp_len,
   86                                  attrs->ucore.outlen - resp_len);
   87                 if (ret)
   88                         return -EFAULT;
   89         }
   90 
   91         return 0;
   92 }
   93 
   94 /*
   95  * Copy a request from userspace. If the provided 'req' is larger than the
   96  * user buffer then the user buffer is zero extended into the 'req'. If 'req'
   97  * is smaller than the user buffer then the uncopied bytes in the user buffer
   98  * must be zero.
   99  */
  100 static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req,
  101                           size_t req_len)
  102 {
  103         if (copy_from_user(req, attrs->ucore.inbuf,
  104                            min(attrs->ucore.inlen, req_len)))
  105                 return -EFAULT;
  106 
  107         if (attrs->ucore.inlen < req_len) {
  108                 memset((u8 *)req + attrs->ucore.inlen, 0,
  109                        req_len - attrs->ucore.inlen);
  110         } else if (attrs->ucore.inlen > req_len) {
  111                 if (!ib_is_buffer_cleared(attrs->ucore.inbuf + req_len,
  112                                           attrs->ucore.inlen - req_len))
  113                         return -EOPNOTSUPP;
  114         }
  115         return 0;
  116 }
  117 
  118 /*
  119  * Generate the value for the 'response_length' protocol used by write_ex.
  120  * This is the number of bytes the kernel actually wrote. Userspace can use
  121  * this to detect what structure members in the response the kernel
  122  * understood.
  123  */
  124 static u32 uverbs_response_length(struct uverbs_attr_bundle *attrs,
  125                                   size_t resp_len)
  126 {
  127         return min_t(size_t, attrs->ucore.outlen, resp_len);
  128 }
  129 
  130 /*
  131  * The iterator version of the request interface is for handlers that need to
  132  * step over a flex array at the end of a command header.
  133  */
  134 struct uverbs_req_iter {
  135         const u8 __user *cur;
  136         const u8 __user *end;
  137 };
  138 
  139 static int uverbs_request_start(struct uverbs_attr_bundle *attrs,
  140                                 struct uverbs_req_iter *iter,
  141                                 void *req,
  142                                 size_t req_len)
  143 {
  144         if (attrs->ucore.inlen < req_len)
  145                 return -ENOSPC;
  146 
  147         if (copy_from_user(req, attrs->ucore.inbuf, req_len))
  148                 return -EFAULT;
  149 
  150         iter->cur = attrs->ucore.inbuf + req_len;
  151         iter->end = attrs->ucore.inbuf + attrs->ucore.inlen;
  152         return 0;
  153 }
  154 
  155 static int uverbs_request_next(struct uverbs_req_iter *iter, void *val,
  156                                size_t len)
  157 {
  158         if (iter->cur + len > iter->end)
  159                 return -ENOSPC;
  160 
  161         if (copy_from_user(val, iter->cur, len))
  162                 return -EFAULT;
  163 
  164         iter->cur += len;
  165         return 0;
  166 }
  167 
  168 static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
  169                                                   size_t len)
  170 {
  171         const void __user *res = iter->cur;
  172 
  173         if (iter->cur + len > iter->end)
  174                 return (void __force __user *)ERR_PTR(-ENOSPC);
  175         iter->cur += len;
  176         return res;
  177 }
  178 
  179 static int uverbs_request_finish(struct uverbs_req_iter *iter)
  180 {
  181         if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
  182                 return -EOPNOTSUPP;
  183         return 0;
  184 }
  185 
  186 /*
  187  * When calling a destroy function during an error unwind we need to pass in
  188  * the udata that is sanitized of all user arguments. Ie from the driver
  189  * perspective it looks like no udata was passed.
  190  */
  191 struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs)
  192 {
  193         attrs->driver_udata = (struct ib_udata){};
  194         return &attrs->driver_udata;
  195 }
  196 
  197 static struct ib_uverbs_completion_event_file *
  198 _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
  199 {
  200         struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
  201                                                fd, attrs);
  202 
  203         if (IS_ERR(uobj))
  204                 return (void *)uobj;
  205 
  206         uverbs_uobject_get(uobj);
  207         uobj_put_read(uobj);
  208 
  209         return container_of(uobj, struct ib_uverbs_completion_event_file,
  210                             uobj);
  211 }
  212 #define ib_uverbs_lookup_comp_file(_fd, _ufile) ({                      \
  213         CTASSERT(sizeof(_fd) == sizeof(s32));                           \
  214         _ib_uverbs_lookup_comp_file(_fd, _ufile);                       \
  215 })
  216 
  217 int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs)
  218 {
  219         struct ib_uverbs_file *ufile = attrs->ufile;
  220         struct ib_ucontext *ucontext;
  221         struct ib_device *ib_dev;
  222 
  223         ib_dev = srcu_dereference(ufile->device->ib_dev,
  224                                   &ufile->device->disassociate_srcu);
  225         if (!ib_dev)
  226                 return -EIO;
  227 
  228         ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
  229         if (!ucontext)
  230                 return -ENOMEM;
  231 
  232         ucontext->device = ib_dev;
  233         ucontext->ufile = ufile;
  234         xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
  235         attrs->context = ucontext;
  236         return 0;
  237 }
  238 
  239 int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
  240 {
  241         struct ib_ucontext *ucontext = attrs->context;
  242         struct ib_uverbs_file *file = attrs->ufile;
  243         int ret;
  244 
  245         if (!down_read_trylock(&file->hw_destroy_rwsem))
  246                 return -EIO;
  247         mutex_lock(&file->ucontext_lock);
  248         if (file->ucontext) {
  249                 ret = -EINVAL;
  250                 goto err;
  251         }
  252 
  253         ret = ucontext->device->alloc_ucontext(ucontext,
  254                                                    &attrs->driver_udata);
  255         if (ret)
  256                 goto err_uncharge;
  257 
  258         /*
  259          * Make sure that ib_uverbs_get_ucontext() sees the pointer update
  260          * only after all writes to setup the ucontext have completed
  261          */
  262         atomic_store_rel_ptr((uintptr_t *)&file->ucontext, (uintptr_t)ucontext);
  263 
  264         mutex_unlock(&file->ucontext_lock);
  265         up_read(&file->hw_destroy_rwsem);
  266         return 0;
  267 
  268 err_uncharge:
  269 err:
  270         mutex_unlock(&file->ucontext_lock);
  271         up_read(&file->hw_destroy_rwsem);
  272         return ret;
  273 }
  274 
  275 static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
  276 {
  277         struct ib_uverbs_get_context_resp resp;
  278         struct ib_uverbs_get_context cmd;
  279         struct ib_device *ib_dev;
  280         struct ib_uobject *uobj;
  281         int ret;
  282 
  283         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  284         if (ret)
  285                 return ret;
  286 
  287         ret = ib_alloc_ucontext(attrs);
  288         if (ret)
  289                 return ret;
  290 
  291         uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev);
  292         if (IS_ERR(uobj)) {
  293                 ret = PTR_ERR(uobj);
  294                 goto err_ucontext;
  295         }
  296 
  297         resp = (struct ib_uverbs_get_context_resp){
  298                 .num_comp_vectors = attrs->ufile->device->num_comp_vectors,
  299                 .async_fd = uobj->id,
  300         };
  301         ret = uverbs_response(attrs, &resp, sizeof(resp));
  302         if (ret)
  303                 goto err_uobj;
  304 
  305         ret = ib_init_ucontext(attrs);
  306         if (ret)
  307                 goto err_uobj;
  308 
  309         ib_uverbs_init_async_event_file(
  310                 container_of(uobj, struct ib_uverbs_async_event_file, uobj));
  311         rdma_alloc_commit_uobject(uobj, attrs);
  312         return 0;
  313 
  314 err_uobj:
  315         rdma_alloc_abort_uobject(uobj, attrs);
  316 err_ucontext:
  317         kfree(attrs->context);
  318         attrs->context = NULL;
  319         return ret;
  320 }
  321 
  322 static void copy_query_dev_fields(struct ib_ucontext *ucontext,
  323                                   struct ib_uverbs_query_device_resp *resp,
  324                                   struct ib_device_attr *attr)
  325 {
  326         struct ib_device *ib_dev = ucontext->device;
  327 
  328         resp->fw_ver            = attr->fw_ver;
  329         resp->node_guid         = ib_dev->node_guid;
  330         resp->sys_image_guid    = attr->sys_image_guid;
  331         resp->max_mr_size       = attr->max_mr_size;
  332         resp->page_size_cap     = attr->page_size_cap;
  333         resp->vendor_id         = attr->vendor_id;
  334         resp->vendor_part_id    = attr->vendor_part_id;
  335         resp->hw_ver            = attr->hw_ver;
  336         resp->max_qp            = attr->max_qp;
  337         resp->max_qp_wr         = attr->max_qp_wr;
  338         resp->device_cap_flags  = (u32)attr->device_cap_flags;
  339         resp->max_sge           = min(attr->max_send_sge, attr->max_recv_sge);
  340         resp->max_sge_rd        = attr->max_sge_rd;
  341         resp->max_cq            = attr->max_cq;
  342         resp->max_cqe           = attr->max_cqe;
  343         resp->max_mr            = attr->max_mr;
  344         resp->max_pd            = attr->max_pd;
  345         resp->max_qp_rd_atom    = attr->max_qp_rd_atom;
  346         resp->max_ee_rd_atom    = attr->max_ee_rd_atom;
  347         resp->max_res_rd_atom   = attr->max_res_rd_atom;
  348         resp->max_qp_init_rd_atom       = attr->max_qp_init_rd_atom;
  349         resp->max_ee_init_rd_atom       = attr->max_ee_init_rd_atom;
  350         resp->atomic_cap                = attr->atomic_cap;
  351         resp->max_ee                    = attr->max_ee;
  352         resp->max_rdd                   = attr->max_rdd;
  353         resp->max_mw                    = attr->max_mw;
  354         resp->max_raw_ipv6_qp           = attr->max_raw_ipv6_qp;
  355         resp->max_raw_ethy_qp           = attr->max_raw_ethy_qp;
  356         resp->max_mcast_grp             = attr->max_mcast_grp;
  357         resp->max_mcast_qp_attach       = attr->max_mcast_qp_attach;
  358         resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
  359         resp->max_ah                    = attr->max_ah;
  360         resp->max_fmr                   = attr->max_fmr;
  361         resp->max_map_per_fmr           = attr->max_map_per_fmr;
  362         resp->max_srq                   = attr->max_srq;
  363         resp->max_srq_wr                = attr->max_srq_wr;
  364         resp->max_srq_sge               = attr->max_srq_sge;
  365         resp->max_pkeys                 = attr->max_pkeys;
  366         resp->local_ca_ack_delay        = attr->local_ca_ack_delay;
  367         resp->phys_port_cnt             = ib_dev->phys_port_cnt;
  368 }
  369 
  370 static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
  371 {
  372         struct ib_uverbs_query_device      cmd;
  373         struct ib_uverbs_query_device_resp resp;
  374         struct ib_ucontext *ucontext;
  375         int ret;
  376 
  377         ucontext = ib_uverbs_get_ucontext(attrs);
  378         if (IS_ERR(ucontext))
  379                 return PTR_ERR(ucontext);
  380 
  381         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  382         if (ret)
  383                 return ret;
  384 
  385         memset(&resp, 0, sizeof resp);
  386         copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
  387 
  388         return uverbs_response(attrs, &resp, sizeof(resp));
  389 }
  390 
  391 static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs)
  392 {
  393         struct ib_uverbs_query_port      cmd;
  394         struct ib_uverbs_query_port_resp resp;
  395         struct ib_port_attr              attr;
  396         int                              ret;
  397         struct ib_ucontext *ucontext;
  398         struct ib_device *ib_dev;
  399 
  400         ucontext = ib_uverbs_get_ucontext(attrs);
  401         if (IS_ERR(ucontext))
  402                 return PTR_ERR(ucontext);
  403         ib_dev = ucontext->device;
  404 
  405         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  406         if (ret)
  407                 return ret;
  408 
  409         ret = ib_query_port(ib_dev, cmd.port_num, &attr);
  410         if (ret)
  411                 return ret;
  412 
  413         memset(&resp, 0, sizeof resp);
  414         copy_port_attr_to_resp(&attr, &resp, ib_dev, cmd.port_num);
  415 
  416         return uverbs_response(attrs, &resp, sizeof(resp));
  417 }
  418 
  419 static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
  420 {
  421         struct ib_uverbs_alloc_pd      cmd;
  422         struct ib_uverbs_alloc_pd_resp resp;
  423         struct ib_uobject             *uobj;
  424         struct ib_pd                  *pd;
  425         int                            ret;
  426         struct ib_device *ib_dev;
  427 
  428         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  429         if (ret)
  430                 return ret;
  431 
  432         uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev);
  433         if (IS_ERR(uobj))
  434                 return PTR_ERR(uobj);
  435 
  436         pd = rdma_zalloc_drv_obj(ib_dev, ib_pd);
  437         if (!pd) {
  438                 ret = -ENOMEM;
  439                 goto err;
  440         }
  441 
  442         pd->device  = ib_dev;
  443         pd->uobject = uobj;
  444         pd->__internal_mr = NULL;
  445         atomic_set(&pd->usecnt, 0);
  446 
  447         ret = ib_dev->alloc_pd(pd, &attrs->driver_udata);
  448         if (ret)
  449                 goto err_alloc;
  450 
  451         uobj->object = pd;
  452         memset(&resp, 0, sizeof resp);
  453         resp.pd_handle = uobj->id;
  454 
  455         ret = uverbs_response(attrs, &resp, sizeof(resp));
  456         if (ret)
  457                 goto err_copy;
  458 
  459         rdma_alloc_commit_uobject(uobj, attrs);
  460         return 0;
  461 
  462 err_copy:
  463         ib_dealloc_pd_user(pd, uverbs_get_cleared_udata(attrs));
  464         pd = NULL;
  465 err_alloc:
  466         kfree(pd);
  467 err:
  468         uobj_alloc_abort(uobj, attrs);
  469         return ret;
  470 }
  471 
  472 static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs)
  473 {
  474         struct ib_uverbs_dealloc_pd cmd;
  475         int ret;
  476 
  477         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  478         if (ret)
  479                 return ret;
  480 
  481         return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
  482 }
  483 
  484 struct xrcd_table_entry {
  485         struct rb_node  node;
  486         struct ib_xrcd *xrcd;
  487         struct vnode   *vnode;
  488 };
  489 
  490 static int xrcd_table_insert(struct ib_uverbs_device *dev,
  491                             struct vnode *vnode,
  492                             struct ib_xrcd *xrcd)
  493 {
  494         struct xrcd_table_entry *entry, *scan;
  495         struct rb_node **p = &dev->xrcd_tree.rb_node;
  496         struct rb_node *parent = NULL;
  497 
  498         entry = kmalloc(sizeof *entry, GFP_KERNEL);
  499         if (!entry)
  500                 return -ENOMEM;
  501 
  502         entry->xrcd  = xrcd;
  503         entry->vnode = vnode;
  504 
  505         while (*p) {
  506                 parent = *p;
  507                 scan = rb_entry(parent, struct xrcd_table_entry, node);
  508 
  509                 if ((uintptr_t)vnode < (uintptr_t)scan->vnode) {
  510                         p = &(*p)->rb_left;
  511                 } else if ((uintptr_t)vnode > (uintptr_t)scan->vnode) {
  512                         p = &(*p)->rb_right;
  513                 } else {
  514                         kfree(entry);
  515                         return -EEXIST;
  516                 }
  517         }
  518 
  519         rb_link_node(&entry->node, parent, p);
  520         rb_insert_color(&entry->node, &dev->xrcd_tree);
  521         vrefact(vnode);
  522         return 0;
  523 }
  524 
  525 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
  526                                                   struct vnode *vnode)
  527 {
  528         struct xrcd_table_entry *entry;
  529         struct rb_node *p = dev->xrcd_tree.rb_node;
  530 
  531         while (p) {
  532                 entry = rb_entry(p, struct xrcd_table_entry, node);
  533 
  534                 if ((uintptr_t)vnode < (uintptr_t)entry->vnode)
  535                         p = p->rb_left;
  536                 else if ((uintptr_t)vnode > (uintptr_t)entry->vnode)
  537                         p = p->rb_right;
  538                 else
  539                         return entry;
  540         }
  541 
  542         return NULL;
  543 }
  544 
  545 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct vnode *vnode)
  546 {
  547         struct xrcd_table_entry *entry;
  548 
  549         entry = xrcd_table_search(dev, vnode);
  550         if (!entry)
  551                 return NULL;
  552 
  553         return entry->xrcd;
  554 }
  555 
  556 static void xrcd_table_delete(struct ib_uverbs_device *dev,
  557                               struct vnode *vnode)
  558 {
  559         struct xrcd_table_entry *entry;
  560 
  561         entry = xrcd_table_search(dev, vnode);
  562         if (entry) {
  563                 vrele(vnode);
  564                 rb_erase(&entry->node, &dev->xrcd_tree);
  565                 kfree(entry);
  566         }
  567 }
  568 
  569 static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
  570 {
  571         struct ib_uverbs_device *ibudev = attrs->ufile->device;
  572         struct ib_uverbs_open_xrcd      cmd;
  573         struct ib_uverbs_open_xrcd_resp resp;
  574         struct ib_uxrcd_object         *obj;
  575         struct ib_xrcd                 *xrcd = NULL;
  576         struct vnode                   *vnode = NULL;
  577         int                             ret = 0;
  578         int                             new_xrcd = 0;
  579         struct ib_device *ib_dev;
  580 
  581         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  582         if (ret)
  583                 return ret;
  584 
  585         mutex_lock(&ibudev->xrcd_tree_mutex);
  586 
  587         if (cmd.fd != -1) {
  588                 /* search for file descriptor */
  589                 ret = -fgetvp(curthread, cmd.fd, &cap_no_rights, &vnode);
  590                 if (ret != 0)
  591                         goto err_tree_mutex_unlock;
  592 
  593                 xrcd = find_xrcd(ibudev, vnode);
  594                 if (!xrcd && !(cmd.oflags & O_CREAT)) {
  595                         /* no file descriptor. Need CREATE flag */
  596                         ret = -EAGAIN;
  597                         goto err_tree_mutex_unlock;
  598                 }
  599 
  600                 if (xrcd && cmd.oflags & O_EXCL) {
  601                         ret = -EINVAL;
  602                         goto err_tree_mutex_unlock;
  603                 }
  604         }
  605 
  606         obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs,
  607                                                    &ib_dev);
  608         if (IS_ERR(obj)) {
  609                 ret = PTR_ERR(obj);
  610                 goto err_tree_mutex_unlock;
  611         }
  612 
  613         if (!xrcd) {
  614                 xrcd = ib_dev->alloc_xrcd(ib_dev, &attrs->driver_udata);
  615                 if (IS_ERR(xrcd)) {
  616                         ret = PTR_ERR(xrcd);
  617                         goto err;
  618                 }
  619 
  620                 xrcd->vnode   = vnode;
  621                 xrcd->device  = ib_dev;
  622                 atomic_set(&xrcd->usecnt, 0);
  623                 mutex_init(&xrcd->tgt_qp_mutex);
  624                 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
  625                 new_xrcd = 1;
  626         }
  627 
  628         atomic_set(&obj->refcnt, 0);
  629         obj->uobject.object = xrcd;
  630         memset(&resp, 0, sizeof resp);
  631         resp.xrcd_handle = obj->uobject.id;
  632 
  633         if (vnode != NULL) {
  634                 if (new_xrcd) {
  635                         /* create new vnode/xrcd table entry */
  636                         ret = xrcd_table_insert(ibudev, vnode, xrcd);
  637                         if (ret)
  638                                 goto err_dealloc_xrcd;
  639                 }
  640                 atomic_inc(&xrcd->usecnt);
  641         }
  642 
  643         ret = uverbs_response(attrs, &resp, sizeof(resp));
  644         if (ret)
  645                 goto err_copy;
  646 
  647         if (vnode != NULL)
  648                 vrele(vnode);
  649 
  650         mutex_unlock(&ibudev->xrcd_tree_mutex);
  651 
  652         rdma_alloc_commit_uobject(&obj->uobject, attrs);
  653         return 0;
  654 
  655 err_copy:
  656         if (vnode != NULL) {
  657                 if (new_xrcd)
  658                         xrcd_table_delete(ibudev, vnode);
  659                 atomic_dec(&xrcd->usecnt);
  660         }
  661 
  662 err_dealloc_xrcd:
  663         ib_dealloc_xrcd(xrcd, uverbs_get_cleared_udata(attrs));
  664 
  665 err:
  666         uobj_alloc_abort(&obj->uobject, attrs);
  667 
  668 err_tree_mutex_unlock:
  669         if (vnode != NULL)
  670                 vrele(vnode);
  671 
  672         mutex_unlock(&ibudev->xrcd_tree_mutex);
  673 
  674         return ret;
  675 }
  676 
  677 static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs)
  678 {
  679         struct ib_uverbs_close_xrcd cmd;
  680         int ret;
  681 
  682         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  683         if (ret)
  684                 return ret;
  685 
  686         return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
  687 }
  688 
  689 int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
  690                            enum rdma_remove_reason why,
  691                            struct uverbs_attr_bundle *attrs)
  692 {
  693         struct vnode *vnode;
  694         int ret;
  695         struct ib_uverbs_device *dev = attrs->ufile->device;
  696 
  697         vnode = xrcd->vnode;
  698         if (vnode && !atomic_dec_and_test(&xrcd->usecnt))
  699                 return 0;
  700 
  701         ret = ib_dealloc_xrcd(xrcd, &attrs->driver_udata);
  702 
  703         if (ib_is_destroy_retryable(ret, why, uobject)) {
  704                 atomic_inc(&xrcd->usecnt);
  705                 return ret;
  706         }
  707 
  708         if (vnode)
  709                 xrcd_table_delete(dev, vnode);
  710 
  711         return ret;
  712 }
  713 
  714 static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
  715 {
  716         struct ib_uverbs_reg_mr      cmd;
  717         struct ib_uverbs_reg_mr_resp resp;
  718         struct ib_uobject           *uobj;
  719         struct ib_pd                *pd;
  720         struct ib_mr                *mr;
  721         int                          ret;
  722         struct ib_device *ib_dev;
  723 
  724         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  725         if (ret)
  726                 return ret;
  727 
  728         if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
  729                 return -EINVAL;
  730 
  731         ret = ib_check_mr_access(cmd.access_flags);
  732         if (ret)
  733                 return ret;
  734 
  735         uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
  736         if (IS_ERR(uobj))
  737                 return PTR_ERR(uobj);
  738 
  739         pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
  740         if (!pd) {
  741                 ret = -EINVAL;
  742                 goto err_free;
  743         }
  744 
  745         if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
  746                 if (!(pd->device->attrs.device_cap_flags &
  747                       IB_DEVICE_ON_DEMAND_PAGING)) {
  748                         pr_debug("ODP support not available\n");
  749                         ret = -EINVAL;
  750                         goto err_put;
  751                 }
  752         }
  753 
  754         mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
  755                                          cmd.access_flags,
  756                                          &attrs->driver_udata);
  757         if (IS_ERR(mr)) {
  758                 ret = PTR_ERR(mr);
  759                 goto err_put;
  760         }
  761 
  762         mr->device  = pd->device;
  763         mr->pd      = pd;
  764         mr->type    = IB_MR_TYPE_USER;
  765         mr->dm      = NULL;
  766         mr->sig_attrs = NULL;
  767         mr->uobject = uobj;
  768         atomic_inc(&pd->usecnt);
  769 
  770         uobj->object = mr;
  771 
  772         memset(&resp, 0, sizeof resp);
  773         resp.lkey      = mr->lkey;
  774         resp.rkey      = mr->rkey;
  775         resp.mr_handle = uobj->id;
  776 
  777         ret = uverbs_response(attrs, &resp, sizeof(resp));
  778         if (ret)
  779                 goto err_copy;
  780 
  781         uobj_put_obj_read(pd);
  782 
  783         rdma_alloc_commit_uobject(uobj, attrs);
  784         return 0;
  785 
  786 err_copy:
  787         ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
  788 
  789 err_put:
  790         uobj_put_obj_read(pd);
  791 
  792 err_free:
  793         uobj_alloc_abort(uobj, attrs);
  794         return ret;
  795 }
  796 
  797 static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
  798 {
  799         struct ib_uverbs_rereg_mr      cmd;
  800         struct ib_uverbs_rereg_mr_resp resp;
  801         struct ib_pd                *pd = NULL;
  802         struct ib_mr                *mr;
  803         struct ib_pd                *old_pd;
  804         int                          ret;
  805         struct ib_uobject           *uobj;
  806 
  807         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  808         if (ret)
  809                 return ret;
  810 
  811         if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
  812                 return -EINVAL;
  813 
  814         if ((cmd.flags & IB_MR_REREG_TRANS) &&
  815             (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
  816              (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
  817                         return -EINVAL;
  818 
  819         uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
  820         if (IS_ERR(uobj))
  821                 return PTR_ERR(uobj);
  822 
  823         mr = uobj->object;
  824 
  825         if (mr->dm) {
  826                 ret = -EINVAL;
  827                 goto put_uobjs;
  828         }
  829 
  830         if (cmd.flags & IB_MR_REREG_ACCESS) {
  831                 ret = ib_check_mr_access(cmd.access_flags);
  832                 if (ret)
  833                         goto put_uobjs;
  834         }
  835 
  836         if (cmd.flags & IB_MR_REREG_PD) {
  837                 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
  838                                        attrs);
  839                 if (!pd) {
  840                         ret = -EINVAL;
  841                         goto put_uobjs;
  842                 }
  843         }
  844 
  845         old_pd = mr->pd;
  846         ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
  847                                             cmd.length, cmd.hca_va,
  848                                             cmd.access_flags, pd,
  849                                             &attrs->driver_udata);
  850         if (ret)
  851                 goto put_uobj_pd;
  852 
  853         if (cmd.flags & IB_MR_REREG_PD) {
  854                 atomic_inc(&pd->usecnt);
  855                 mr->pd = pd;
  856                 atomic_dec(&old_pd->usecnt);
  857         }
  858 
  859         memset(&resp, 0, sizeof(resp));
  860         resp.lkey      = mr->lkey;
  861         resp.rkey      = mr->rkey;
  862 
  863         ret = uverbs_response(attrs, &resp, sizeof(resp));
  864 
  865 put_uobj_pd:
  866         if (cmd.flags & IB_MR_REREG_PD)
  867                 uobj_put_obj_read(pd);
  868 
  869 put_uobjs:
  870         uobj_put_write(uobj);
  871 
  872         return ret;
  873 }
  874 
  875 static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs)
  876 {
  877         struct ib_uverbs_dereg_mr cmd;
  878         int ret;
  879 
  880         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  881         if (ret)
  882                 return ret;
  883 
  884         return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
  885 }
  886 
  887 static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
  888 {
  889         struct ib_uverbs_alloc_mw      cmd;
  890         struct ib_uverbs_alloc_mw_resp resp;
  891         struct ib_uobject             *uobj;
  892         struct ib_pd                  *pd;
  893         struct ib_mw                  *mw;
  894         int                            ret;
  895         struct ib_device *ib_dev;
  896 
  897         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  898         if (ret)
  899                 return ret;
  900 
  901         uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev);
  902         if (IS_ERR(uobj))
  903                 return PTR_ERR(uobj);
  904 
  905         pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
  906         if (!pd) {
  907                 ret = -EINVAL;
  908                 goto err_free;
  909         }
  910 
  911         if (cmd.mw_type != IB_MW_TYPE_1 && cmd.mw_type != IB_MW_TYPE_2) {
  912                 ret = -EINVAL;
  913                 goto err_put;
  914         }
  915 
  916         mw = pd->device->alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
  917         if (IS_ERR(mw)) {
  918                 ret = PTR_ERR(mw);
  919                 goto err_put;
  920         }
  921 
  922         mw->device  = pd->device;
  923         mw->pd      = pd;
  924         mw->uobject = uobj;
  925         atomic_inc(&pd->usecnt);
  926 
  927         uobj->object = mw;
  928 
  929         memset(&resp, 0, sizeof(resp));
  930         resp.rkey      = mw->rkey;
  931         resp.mw_handle = uobj->id;
  932 
  933         ret = uverbs_response(attrs, &resp, sizeof(resp));
  934         if (ret)
  935                 goto err_copy;
  936 
  937         uobj_put_obj_read(pd);
  938         rdma_alloc_commit_uobject(uobj, attrs);
  939         return 0;
  940 
  941 err_copy:
  942         uverbs_dealloc_mw(mw);
  943 err_put:
  944         uobj_put_obj_read(pd);
  945 err_free:
  946         uobj_alloc_abort(uobj, attrs);
  947         return ret;
  948 }
  949 
  950 static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs)
  951 {
  952         struct ib_uverbs_dealloc_mw cmd;
  953         int ret;
  954 
  955         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  956         if (ret)
  957                 return ret;
  958 
  959         return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs);
  960 }
  961 
  962 static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs)
  963 {
  964         struct ib_uverbs_create_comp_channel       cmd;
  965         struct ib_uverbs_create_comp_channel_resp  resp;
  966         struct ib_uobject                         *uobj;
  967         struct ib_uverbs_completion_event_file    *ev_file;
  968         struct ib_device *ib_dev;
  969         int ret;
  970 
  971         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
  972         if (ret)
  973                 return ret;
  974 
  975         uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev);
  976         if (IS_ERR(uobj))
  977                 return PTR_ERR(uobj);
  978 
  979         resp.fd = uobj->id;
  980 
  981         ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
  982                                uobj);
  983         ib_uverbs_init_event_queue(&ev_file->ev_queue);
  984 
  985         ret = uverbs_response(attrs, &resp, sizeof(resp));
  986         if (ret) {
  987                 uobj_alloc_abort(uobj, attrs);
  988                 return ret;
  989         }
  990 
  991         rdma_alloc_commit_uobject(uobj, attrs);
  992         return 0;
  993 }
  994 
  995 static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
  996                                        struct ib_uverbs_ex_create_cq *cmd)
  997 {
  998         struct ib_ucq_object           *obj;
  999         struct ib_uverbs_completion_event_file    *ev_file = NULL;
 1000         struct ib_cq                   *cq;
 1001         int                             ret;
 1002         struct ib_uverbs_ex_create_cq_resp resp;
 1003         struct ib_cq_init_attr attr = {};
 1004         struct ib_device *ib_dev;
 1005 
 1006         if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors)
 1007                 return ERR_PTR(-EINVAL);
 1008 
 1009         obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs,
 1010                                                  &ib_dev);
 1011         if (IS_ERR(obj))
 1012                 return obj;
 1013 
 1014         if (cmd->comp_channel >= 0) {
 1015                 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs);
 1016                 if (IS_ERR(ev_file)) {
 1017                         ret = PTR_ERR(ev_file);
 1018                         goto err;
 1019                 }
 1020         }
 1021 
 1022         obj->uevent.uobject.user_handle = cmd->user_handle;
 1023         INIT_LIST_HEAD(&obj->comp_list);
 1024         INIT_LIST_HEAD(&obj->uevent.event_list);
 1025 
 1026         attr.cqe = cmd->cqe;
 1027         attr.comp_vector = cmd->comp_vector;
 1028         attr.flags = cmd->flags;
 1029 
 1030         cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
 1031         if (!cq) {
 1032                 ret = -ENOMEM;
 1033                 goto err_file;
 1034         }
 1035         cq->device        = ib_dev;
 1036         cq->uobject       = obj;
 1037         cq->comp_handler  = ib_uverbs_comp_handler;
 1038         cq->event_handler = ib_uverbs_cq_event_handler;
 1039         cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
 1040         atomic_set(&cq->usecnt, 0);
 1041 
 1042         ret = ib_dev->create_cq(cq, &attr, &attrs->driver_udata);
 1043         if (ret)
 1044                 goto err_free;
 1045 
 1046         obj->uevent.uobject.object = cq;
 1047         memset(&resp, 0, sizeof resp);
 1048         resp.base.cq_handle = obj->uevent.uobject.id;
 1049         resp.base.cqe       = cq->cqe;
 1050         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
 1051 
 1052         ret = uverbs_response(attrs, &resp, sizeof(resp));
 1053         if (ret)
 1054                 goto err_cb;
 1055 
 1056         rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
 1057         return obj;
 1058 
 1059 err_cb:
 1060         ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
 1061         cq = NULL;
 1062 err_free:
 1063         kfree(cq);
 1064 err_file:
 1065         if (ev_file)
 1066                 ib_uverbs_release_ucq(ev_file, obj);
 1067 
 1068 err:
 1069         uobj_alloc_abort(&obj->uevent.uobject, attrs);
 1070 
 1071         return ERR_PTR(ret);
 1072 }
 1073 
 1074 static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs)
 1075 {
 1076         struct ib_uverbs_create_cq      cmd;
 1077         struct ib_uverbs_ex_create_cq   cmd_ex;
 1078         struct ib_ucq_object           *obj;
 1079         int ret;
 1080 
 1081         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1082         if (ret)
 1083                 return ret;
 1084 
 1085         memset(&cmd_ex, 0, sizeof(cmd_ex));
 1086         cmd_ex.user_handle = cmd.user_handle;
 1087         cmd_ex.cqe = cmd.cqe;
 1088         cmd_ex.comp_vector = cmd.comp_vector;
 1089         cmd_ex.comp_channel = cmd.comp_channel;
 1090 
 1091         obj = create_cq(attrs, &cmd_ex);
 1092         return PTR_ERR_OR_ZERO(obj);
 1093 }
 1094 
 1095 static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs)
 1096 {
 1097         struct ib_uverbs_ex_create_cq  cmd;
 1098         struct ib_ucq_object           *obj;
 1099         int ret;
 1100 
 1101         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1102         if (ret)
 1103                 return ret;
 1104 
 1105         if (cmd.comp_mask)
 1106                 return -EINVAL;
 1107 
 1108         if (cmd.reserved)
 1109                 return -EINVAL;
 1110 
 1111         obj = create_cq(attrs, &cmd);
 1112         return PTR_ERR_OR_ZERO(obj);
 1113 }
 1114 
 1115 static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
 1116 {
 1117         struct ib_uverbs_resize_cq      cmd;
 1118         struct ib_uverbs_resize_cq_resp resp = {};
 1119         struct ib_cq                    *cq;
 1120         int                             ret = -EINVAL;
 1121 
 1122         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1123         if (ret)
 1124                 return ret;
 1125 
 1126         cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
 1127         if (!cq)
 1128                 return -EINVAL;
 1129 
 1130         ret = cq->device->resize_cq(cq, cmd.cqe, &attrs->driver_udata);
 1131         if (ret)
 1132                 goto out;
 1133 
 1134         resp.cqe = cq->cqe;
 1135 
 1136         ret = uverbs_response(attrs, &resp, sizeof(resp));
 1137 out:
 1138         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
 1139                                 UVERBS_LOOKUP_READ);
 1140 
 1141         return ret;
 1142 }
 1143 
 1144 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
 1145                            struct ib_wc *wc)
 1146 {
 1147         struct ib_uverbs_wc tmp;
 1148 
 1149         tmp.wr_id               = wc->wr_id;
 1150         tmp.status              = wc->status;
 1151         tmp.opcode              = wc->opcode;
 1152         tmp.vendor_err          = wc->vendor_err;
 1153         tmp.byte_len            = wc->byte_len;
 1154         tmp.ex.imm_data         = wc->ex.imm_data;
 1155         tmp.qp_num              = wc->qp->qp_num;
 1156         tmp.src_qp              = wc->src_qp;
 1157         tmp.wc_flags            = wc->wc_flags;
 1158         tmp.pkey_index          = wc->pkey_index;
 1159         tmp.slid                = wc->slid;
 1160         tmp.sl                  = wc->sl;
 1161         tmp.dlid_path_bits      = wc->dlid_path_bits;
 1162         tmp.port_num            = wc->port_num;
 1163         tmp.reserved            = 0;
 1164 
 1165         if (copy_to_user(dest, &tmp, sizeof tmp))
 1166                 return -EFAULT;
 1167 
 1168         return 0;
 1169 }
 1170 
 1171 static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
 1172 {
 1173         struct ib_uverbs_poll_cq       cmd;
 1174         struct ib_uverbs_poll_cq_resp  resp;
 1175         u8 __user                     *header_ptr;
 1176         u8 __user                     *data_ptr;
 1177         struct ib_cq                  *cq;
 1178         struct ib_wc                   wc;
 1179         int                            ret;
 1180 
 1181         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1182         if (ret)
 1183                 return ret;
 1184 
 1185         cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
 1186         if (!cq)
 1187                 return -EINVAL;
 1188 
 1189         /* we copy a struct ib_uverbs_poll_cq_resp to user space */
 1190         header_ptr = attrs->ucore.outbuf;
 1191         data_ptr = header_ptr + sizeof resp;
 1192 
 1193         memset(&resp, 0, sizeof resp);
 1194         while (resp.count < cmd.ne) {
 1195                 ret = ib_poll_cq(cq, 1, &wc);
 1196                 if (ret < 0)
 1197                         goto out_put;
 1198                 if (!ret)
 1199                         break;
 1200 
 1201                 ret = copy_wc_to_user(cq->device, data_ptr, &wc);
 1202                 if (ret)
 1203                         goto out_put;
 1204 
 1205                 data_ptr += sizeof(struct ib_uverbs_wc);
 1206                 ++resp.count;
 1207         }
 1208 
 1209         if (copy_to_user(header_ptr, &resp, sizeof resp)) {
 1210                 ret = -EFAULT;
 1211                 goto out_put;
 1212         }
 1213         ret = 0;
 1214 
 1215         if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
 1216                 ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
 1217 
 1218 out_put:
 1219         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
 1220                                 UVERBS_LOOKUP_READ);
 1221         return ret;
 1222 }
 1223 
 1224 static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
 1225 {
 1226         struct ib_uverbs_req_notify_cq cmd;
 1227         struct ib_cq                  *cq;
 1228         int ret;
 1229 
 1230         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1231         if (ret)
 1232                 return ret;
 1233 
 1234         cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
 1235         if (!cq)
 1236                 return -EINVAL;
 1237 
 1238         ib_req_notify_cq(cq, cmd.solicited_only ?
 1239                          IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
 1240 
 1241         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
 1242                                 UVERBS_LOOKUP_READ);
 1243         return 0;
 1244 }
 1245 
 1246 static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs)
 1247 {
 1248         struct ib_uverbs_destroy_cq      cmd;
 1249         struct ib_uverbs_destroy_cq_resp resp;
 1250         struct ib_uobject               *uobj;
 1251         struct ib_ucq_object            *obj;
 1252         int ret;
 1253 
 1254         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1255         if (ret)
 1256                 return ret;
 1257 
 1258         uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
 1259         if (IS_ERR(uobj))
 1260                 return PTR_ERR(uobj);
 1261 
 1262         obj = container_of(uobj, struct ib_ucq_object, uevent.uobject);
 1263         memset(&resp, 0, sizeof(resp));
 1264         resp.comp_events_reported  = obj->comp_events_reported;
 1265         resp.async_events_reported = obj->uevent.events_reported;
 1266 
 1267         uobj_put_destroy(uobj);
 1268 
 1269         return uverbs_response(attrs, &resp, sizeof(resp));
 1270 }
 1271 
 1272 static int create_qp(struct uverbs_attr_bundle *attrs,
 1273                      struct ib_uverbs_ex_create_qp *cmd)
 1274 {
 1275         struct ib_uqp_object            *obj;
 1276         struct ib_device                *device;
 1277         struct ib_pd                    *pd = NULL;
 1278         struct ib_xrcd                  *xrcd = NULL;
 1279         struct ib_uobject               *xrcd_uobj = ERR_PTR(-ENOENT);
 1280         struct ib_cq                    *scq = NULL, *rcq = NULL;
 1281         struct ib_srq                   *srq = NULL;
 1282         struct ib_qp                    *qp;
 1283         struct ib_qp_init_attr          attr = {};
 1284         struct ib_uverbs_ex_create_qp_resp resp;
 1285         int                             ret;
 1286         struct ib_rwq_ind_table *ind_tbl = NULL;
 1287         bool has_sq = true;
 1288         struct ib_device *ib_dev;
 1289 
 1290         if (cmd->qp_type == IB_QPT_RAW_PACKET && priv_check(curthread, PRIV_NET_RAW) != 0)
 1291                 return -EPERM;
 1292 
 1293         obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
 1294                                                  &ib_dev);
 1295         if (IS_ERR(obj))
 1296                 return PTR_ERR(obj);
 1297         obj->uxrcd = NULL;
 1298         obj->uevent.uobject.user_handle = cmd->user_handle;
 1299         mutex_init(&obj->mcast_lock);
 1300 
 1301         if (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE) {
 1302                 ind_tbl = uobj_get_obj_read(rwq_ind_table,
 1303                                             UVERBS_OBJECT_RWQ_IND_TBL,
 1304                                             cmd->rwq_ind_tbl_handle, attrs);
 1305                 if (!ind_tbl) {
 1306                         ret = -EINVAL;
 1307                         goto err_put;
 1308                 }
 1309 
 1310                 attr.rwq_ind_tbl = ind_tbl;
 1311         }
 1312 
 1313         if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
 1314                 ret = -EINVAL;
 1315                 goto err_put;
 1316         }
 1317 
 1318         if (ind_tbl && !cmd->max_send_wr)
 1319                 has_sq = false;
 1320 
 1321         if (cmd->qp_type == IB_QPT_XRC_TGT) {
 1322                 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
 1323                                           attrs);
 1324 
 1325                 if (IS_ERR(xrcd_uobj)) {
 1326                         ret = -EINVAL;
 1327                         goto err_put;
 1328                 }
 1329 
 1330                 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
 1331                 if (!xrcd) {
 1332                         ret = -EINVAL;
 1333                         goto err_put;
 1334                 }
 1335                 device = xrcd->device;
 1336         } else {
 1337                 if (cmd->qp_type == IB_QPT_XRC_INI) {
 1338                         cmd->max_recv_wr = 0;
 1339                         cmd->max_recv_sge = 0;
 1340                 } else {
 1341                         if (cmd->is_srq) {
 1342                                 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
 1343                                                         cmd->srq_handle, attrs);
 1344                                 if (!srq || srq->srq_type == IB_SRQT_XRC) {
 1345                                         ret = -EINVAL;
 1346                                         goto err_put;
 1347                                 }
 1348                         }
 1349 
 1350                         if (!ind_tbl) {
 1351                                 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
 1352                                         rcq = uobj_get_obj_read(
 1353                                                 cq, UVERBS_OBJECT_CQ,
 1354                                                 cmd->recv_cq_handle, attrs);
 1355                                         if (!rcq) {
 1356                                                 ret = -EINVAL;
 1357                                                 goto err_put;
 1358                                         }
 1359                                 }
 1360                         }
 1361                 }
 1362 
 1363                 if (has_sq)
 1364                         scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
 1365                                                 cmd->send_cq_handle, attrs);
 1366                 if (!ind_tbl)
 1367                         rcq = rcq ?: scq;
 1368                 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
 1369                                        attrs);
 1370                 if (!pd || (!scq && has_sq)) {
 1371                         ret = -EINVAL;
 1372                         goto err_put;
 1373                 }
 1374 
 1375                 device = pd->device;
 1376         }
 1377 
 1378         attr.event_handler = ib_uverbs_qp_event_handler;
 1379         attr.send_cq       = scq;
 1380         attr.recv_cq       = rcq;
 1381         attr.srq           = srq;
 1382         attr.xrcd          = xrcd;
 1383         attr.sq_sig_type   = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
 1384                                               IB_SIGNAL_REQ_WR;
 1385         attr.qp_type       = cmd->qp_type;
 1386         attr.create_flags  = 0;
 1387 
 1388         attr.cap.max_send_wr     = cmd->max_send_wr;
 1389         attr.cap.max_recv_wr     = cmd->max_recv_wr;
 1390         attr.cap.max_send_sge    = cmd->max_send_sge;
 1391         attr.cap.max_recv_sge    = cmd->max_recv_sge;
 1392         attr.cap.max_inline_data = cmd->max_inline_data;
 1393 
 1394         INIT_LIST_HEAD(&obj->uevent.event_list);
 1395         INIT_LIST_HEAD(&obj->mcast_list);
 1396 
 1397         attr.create_flags = cmd->create_flags;
 1398         if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
 1399                                 IB_QP_CREATE_CROSS_CHANNEL |
 1400                                 IB_QP_CREATE_MANAGED_SEND |
 1401                                 IB_QP_CREATE_MANAGED_RECV |
 1402                                 IB_QP_CREATE_SCATTER_FCS |
 1403                                 IB_QP_CREATE_CVLAN_STRIPPING |
 1404                                 IB_QP_CREATE_SOURCE_QPN |
 1405                                 IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
 1406                 ret = -EINVAL;
 1407                 goto err_put;
 1408         }
 1409 
 1410         if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
 1411                 if (priv_check(curthread, PRIV_NET_RAW)) {
 1412                         ret = -EPERM;
 1413                         goto err_put;
 1414                 }
 1415 
 1416                 attr.source_qpn = cmd->source_qpn;
 1417         }
 1418 
 1419         if (cmd->qp_type == IB_QPT_XRC_TGT)
 1420                 qp = ib_create_qp(pd, &attr);
 1421         else
 1422                 qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
 1423                                    obj);
 1424 
 1425         if (IS_ERR(qp)) {
 1426                 ret = PTR_ERR(qp);
 1427                 goto err_put;
 1428         }
 1429 
 1430         if (cmd->qp_type != IB_QPT_XRC_TGT) {
 1431                 atomic_inc(&pd->usecnt);
 1432                 if (attr.send_cq)
 1433                         atomic_inc(&attr.send_cq->usecnt);
 1434                 if (attr.recv_cq)
 1435                         atomic_inc(&attr.recv_cq->usecnt);
 1436                 if (attr.srq)
 1437                         atomic_inc(&attr.srq->usecnt);
 1438                 if (ind_tbl)
 1439                         atomic_inc(&ind_tbl->usecnt);
 1440         } else {
 1441                 /* It is done in _ib_create_qp for other QP types */
 1442                 qp->uobject = obj;
 1443         }
 1444 
 1445         obj->uevent.uobject.object = qp;
 1446 
 1447         memset(&resp, 0, sizeof resp);
 1448         resp.base.qpn             = qp->qp_num;
 1449         resp.base.qp_handle       = obj->uevent.uobject.id;
 1450         resp.base.max_recv_sge    = attr.cap.max_recv_sge;
 1451         resp.base.max_send_sge    = attr.cap.max_send_sge;
 1452         resp.base.max_recv_wr     = attr.cap.max_recv_wr;
 1453         resp.base.max_send_wr     = attr.cap.max_send_wr;
 1454         resp.base.max_inline_data = attr.cap.max_inline_data;
 1455         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
 1456 
 1457         ret = uverbs_response(attrs, &resp, sizeof(resp));
 1458         if (ret)
 1459                 goto err_cb;
 1460 
 1461         if (xrcd) {
 1462                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
 1463                                           uobject);
 1464                 atomic_inc(&obj->uxrcd->refcnt);
 1465                 uobj_put_read(xrcd_uobj);
 1466         }
 1467 
 1468         if (pd)
 1469                 uobj_put_obj_read(pd);
 1470         if (scq)
 1471                 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
 1472                                         UVERBS_LOOKUP_READ);
 1473         if (rcq && rcq != scq)
 1474                 rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
 1475                                         UVERBS_LOOKUP_READ);
 1476         if (srq)
 1477                 rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
 1478                                         UVERBS_LOOKUP_READ);
 1479         if (ind_tbl)
 1480                 uobj_put_obj_read(ind_tbl);
 1481 
 1482         rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
 1483         return 0;
 1484 err_cb:
 1485         ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
 1486 
 1487 err_put:
 1488         if (!IS_ERR(xrcd_uobj))
 1489                 uobj_put_read(xrcd_uobj);
 1490         if (pd)
 1491                 uobj_put_obj_read(pd);
 1492         if (scq)
 1493                 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
 1494                                         UVERBS_LOOKUP_READ);
 1495         if (rcq && rcq != scq)
 1496                 rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
 1497                                         UVERBS_LOOKUP_READ);
 1498         if (srq)
 1499                 rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
 1500                                         UVERBS_LOOKUP_READ);
 1501         if (ind_tbl)
 1502                 uobj_put_obj_read(ind_tbl);
 1503 
 1504         uobj_alloc_abort(&obj->uevent.uobject, attrs);
 1505         return ret;
 1506 }
 1507 
 1508 static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs)
 1509 {
 1510         struct ib_uverbs_create_qp      cmd;
 1511         struct ib_uverbs_ex_create_qp   cmd_ex;
 1512         int ret;
 1513 
 1514         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1515         if (ret)
 1516                 return ret;
 1517 
 1518         memset(&cmd_ex, 0, sizeof(cmd_ex));
 1519         cmd_ex.user_handle = cmd.user_handle;
 1520         cmd_ex.pd_handle = cmd.pd_handle;
 1521         cmd_ex.send_cq_handle = cmd.send_cq_handle;
 1522         cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
 1523         cmd_ex.srq_handle = cmd.srq_handle;
 1524         cmd_ex.max_send_wr = cmd.max_send_wr;
 1525         cmd_ex.max_recv_wr = cmd.max_recv_wr;
 1526         cmd_ex.max_send_sge = cmd.max_send_sge;
 1527         cmd_ex.max_recv_sge = cmd.max_recv_sge;
 1528         cmd_ex.max_inline_data = cmd.max_inline_data;
 1529         cmd_ex.sq_sig_all = cmd.sq_sig_all;
 1530         cmd_ex.qp_type = cmd.qp_type;
 1531         cmd_ex.is_srq = cmd.is_srq;
 1532 
 1533         return create_qp(attrs, &cmd_ex);
 1534 }
 1535 
 1536 static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs)
 1537 {
 1538         struct ib_uverbs_ex_create_qp cmd;
 1539         int ret;
 1540 
 1541         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1542         if (ret)
 1543                 return ret;
 1544 
 1545         if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
 1546                 return -EINVAL;
 1547 
 1548         if (cmd.reserved)
 1549                 return -EINVAL;
 1550 
 1551         return create_qp(attrs, &cmd);
 1552 }
 1553 
 1554 static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
 1555 {
 1556         struct ib_uverbs_open_qp        cmd;
 1557         struct ib_uverbs_create_qp_resp resp;
 1558         struct ib_uqp_object           *obj;
 1559         struct ib_xrcd                 *xrcd;
 1560         struct ib_uobject              *uninitialized_var(xrcd_uobj);
 1561         struct ib_qp                   *qp;
 1562         struct ib_qp_open_attr          attr = {};
 1563         int ret;
 1564         struct ib_device *ib_dev;
 1565 
 1566         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1567         if (ret)
 1568                 return ret;
 1569 
 1570         obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
 1571                                                  &ib_dev);
 1572         if (IS_ERR(obj))
 1573                 return PTR_ERR(obj);
 1574 
 1575         xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs);
 1576         if (IS_ERR(xrcd_uobj)) {
 1577                 ret = -EINVAL;
 1578                 goto err_put;
 1579         }
 1580 
 1581         xrcd = (struct ib_xrcd *)xrcd_uobj->object;
 1582         if (!xrcd) {
 1583                 ret = -EINVAL;
 1584                 goto err_xrcd;
 1585         }
 1586 
 1587         attr.event_handler = ib_uverbs_qp_event_handler;
 1588         attr.qp_num        = cmd.qpn;
 1589         attr.qp_type       = cmd.qp_type;
 1590 
 1591         INIT_LIST_HEAD(&obj->uevent.event_list);
 1592         INIT_LIST_HEAD(&obj->mcast_list);
 1593 
 1594         qp = ib_open_qp(xrcd, &attr);
 1595         if (IS_ERR(qp)) {
 1596                 ret = PTR_ERR(qp);
 1597                 goto err_xrcd;
 1598         }
 1599 
 1600         obj->uevent.uobject.object = qp;
 1601         obj->uevent.uobject.user_handle = cmd.user_handle;
 1602 
 1603         memset(&resp, 0, sizeof resp);
 1604         resp.qpn       = qp->qp_num;
 1605         resp.qp_handle = obj->uevent.uobject.id;
 1606 
 1607         ret = uverbs_response(attrs, &resp, sizeof(resp));
 1608         if (ret)
 1609                 goto err_destroy;
 1610 
 1611         obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
 1612         atomic_inc(&obj->uxrcd->refcnt);
 1613         qp->uobject = obj;
 1614         uobj_put_read(xrcd_uobj);
 1615 
 1616         rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
 1617         return 0;
 1618 
 1619 err_destroy:
 1620         ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
 1621 err_xrcd:
 1622         uobj_put_read(xrcd_uobj);
 1623 err_put:
 1624         uobj_alloc_abort(&obj->uevent.uobject, attrs);
 1625         return ret;
 1626 }
 1627 
 1628 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
 1629                                    struct ib_ah_attr *rdma_attr)
 1630 {
 1631         uverb_attr->dlid              = rdma_attr->dlid;
 1632         uverb_attr->sl                = rdma_attr->sl;
 1633         uverb_attr->src_path_bits     = rdma_attr->src_path_bits;
 1634         uverb_attr->static_rate       = rdma_attr->static_rate;
 1635         uverb_attr->is_global         = !!(rdma_attr->ah_flags & IB_AH_GRH);
 1636         if (uverb_attr->is_global) {
 1637                 const struct ib_global_route *grh = &rdma_attr->grh;
 1638 
 1639                 memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
 1640                 uverb_attr->flow_label        = grh->flow_label;
 1641                 uverb_attr->sgid_index        = grh->sgid_index;
 1642                 uverb_attr->hop_limit         = grh->hop_limit;
 1643                 uverb_attr->traffic_class     = grh->traffic_class;
 1644         }
 1645         uverb_attr->port_num          = rdma_attr->port_num;
 1646 }
 1647 
 1648 static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
 1649 {
 1650         struct ib_uverbs_query_qp      cmd;
 1651         struct ib_uverbs_query_qp_resp resp;
 1652         struct ib_qp                   *qp;
 1653         struct ib_qp_attr              *attr;
 1654         struct ib_qp_init_attr         *init_attr;
 1655         int                            ret;
 1656 
 1657         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1658         if (ret)
 1659                 return ret;
 1660 
 1661         attr      = kmalloc(sizeof *attr, GFP_KERNEL);
 1662         init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
 1663         if (!attr || !init_attr) {
 1664                 ret = -ENOMEM;
 1665                 goto out;
 1666         }
 1667 
 1668         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
 1669         if (!qp) {
 1670                 ret = -EINVAL;
 1671                 goto out;
 1672         }
 1673 
 1674         ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
 1675 
 1676         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
 1677                                 UVERBS_LOOKUP_READ);
 1678 
 1679         if (ret)
 1680                 goto out;
 1681 
 1682         memset(&resp, 0, sizeof resp);
 1683 
 1684         resp.qp_state               = attr->qp_state;
 1685         resp.cur_qp_state           = attr->cur_qp_state;
 1686         resp.path_mtu               = attr->path_mtu;
 1687         resp.path_mig_state         = attr->path_mig_state;
 1688         resp.qkey                   = attr->qkey;
 1689         resp.rq_psn                 = attr->rq_psn;
 1690         resp.sq_psn                 = attr->sq_psn;
 1691         resp.dest_qp_num            = attr->dest_qp_num;
 1692         resp.qp_access_flags        = attr->qp_access_flags;
 1693         resp.pkey_index             = attr->pkey_index;
 1694         resp.alt_pkey_index         = attr->alt_pkey_index;
 1695         resp.sq_draining            = attr->sq_draining;
 1696         resp.max_rd_atomic          = attr->max_rd_atomic;
 1697         resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
 1698         resp.min_rnr_timer          = attr->min_rnr_timer;
 1699         resp.port_num               = attr->port_num;
 1700         resp.timeout                = attr->timeout;
 1701         resp.retry_cnt              = attr->retry_cnt;
 1702         resp.rnr_retry              = attr->rnr_retry;
 1703         resp.alt_port_num           = attr->alt_port_num;
 1704         resp.alt_timeout            = attr->alt_timeout;
 1705 
 1706         copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
 1707         copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
 1708 
 1709         resp.max_send_wr            = init_attr->cap.max_send_wr;
 1710         resp.max_recv_wr            = init_attr->cap.max_recv_wr;
 1711         resp.max_send_sge           = init_attr->cap.max_send_sge;
 1712         resp.max_recv_sge           = init_attr->cap.max_recv_sge;
 1713         resp.max_inline_data        = init_attr->cap.max_inline_data;
 1714         resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
 1715 
 1716         ret = uverbs_response(attrs, &resp, sizeof(resp));
 1717 
 1718 out:
 1719         kfree(attr);
 1720         kfree(init_attr);
 1721 
 1722         return ret;
 1723 }
 1724 
 1725 /* Remove ignored fields set in the attribute mask */
 1726 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
 1727 {
 1728         switch (qp_type) {
 1729         case IB_QPT_XRC_INI:
 1730                 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
 1731         case IB_QPT_XRC_TGT:
 1732                 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
 1733                                 IB_QP_RNR_RETRY);
 1734         default:
 1735                 return mask;
 1736         }
 1737 }
 1738 
 1739 static void copy_ah_attr_from_uverbs(struct ib_device *dev,
 1740                                      struct ib_ah_attr *rdma_attr,
 1741                                      struct ib_uverbs_qp_dest *uverb_attr)
 1742 {
 1743         if (uverb_attr->is_global) {
 1744                 struct ib_global_route *grh = &rdma_attr->grh;
 1745 
 1746                 grh->flow_label = uverb_attr->flow_label;
 1747                 grh->sgid_index = uverb_attr->sgid_index;
 1748                 grh->hop_limit = uverb_attr->hop_limit;
 1749                 grh->traffic_class = uverb_attr->traffic_class;
 1750                 memcpy(grh->dgid.raw, uverb_attr->dgid, sizeof(grh->dgid));
 1751                 rdma_attr->ah_flags = IB_AH_GRH;
 1752         } else {
 1753                 rdma_attr->ah_flags = 0;
 1754         }
 1755         rdma_attr->dlid = uverb_attr->dlid;
 1756         rdma_attr->sl = uverb_attr->sl;
 1757         rdma_attr->src_path_bits = uverb_attr->src_path_bits;
 1758         rdma_attr->static_rate = uverb_attr->static_rate;
 1759         rdma_attr->port_num = uverb_attr->port_num;
 1760 }
 1761 
 1762 static int modify_qp(struct uverbs_attr_bundle *attrs,
 1763                      struct ib_uverbs_ex_modify_qp *cmd)
 1764 {
 1765         struct ib_qp_attr *attr;
 1766         struct ib_qp *qp;
 1767         int ret;
 1768 
 1769         attr = kzalloc(sizeof(*attr), GFP_KERNEL);
 1770         if (!attr)
 1771                 return -ENOMEM;
 1772 
 1773         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
 1774                                attrs);
 1775         if (!qp) {
 1776                 ret = -EINVAL;
 1777                 goto out;
 1778         }
 1779 
 1780         if ((cmd->base.attr_mask & IB_QP_PORT) &&
 1781             !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
 1782                 ret = -EINVAL;
 1783                 goto release_qp;
 1784         }
 1785 
 1786         if ((cmd->base.attr_mask & IB_QP_AV)) {
 1787                 if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
 1788                         ret = -EINVAL;
 1789                         goto release_qp;
 1790                 }
 1791 
 1792                 if (cmd->base.attr_mask & IB_QP_STATE &&
 1793                     cmd->base.qp_state == IB_QPS_RTR) {
 1794                 /* We are in INIT->RTR TRANSITION (if we are not,
 1795                  * this transition will be rejected in subsequent checks).
 1796                  * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
 1797                  * but the IB_QP_STATE flag is required.
 1798                  *
 1799                  * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
 1800                  * when IB_QP_AV is set, has required inclusion of a valid
 1801                  * port number in the primary AV. (AVs are created and handled
 1802                  * differently for infiniband and ethernet (RoCE) ports).
 1803                  *
 1804                  * Check the port number included in the primary AV against
 1805                  * the port number in the qp struct, which was set (and saved)
 1806                  * in the RST->INIT transition.
 1807                  */
 1808                         if (cmd->base.dest.port_num != qp->real_qp->port) {
 1809                                 ret = -EINVAL;
 1810                                 goto release_qp;
 1811                         }
 1812                 } else {
 1813                 /* We are in SQD->SQD. (If we are not, this transition will
 1814                  * be rejected later in the verbs layer checks).
 1815                  * Check for both IB_QP_PORT and IB_QP_AV, these can be set
 1816                  * together in the SQD->SQD transition.
 1817                  *
 1818                  * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
 1819                  * verbs layer driver does not track primary port changes
 1820                  * resulting from path migration. Thus, in SQD, if the primary
 1821                  * AV is modified, the primary port should also be modified).
 1822                  *
 1823                  * Note that in this transition, the IB_QP_STATE flag
 1824                  * is not allowed.
 1825                  */
 1826                         if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
 1827                              == (IB_QP_AV | IB_QP_PORT)) &&
 1828                             cmd->base.port_num != cmd->base.dest.port_num) {
 1829                                 ret = -EINVAL;
 1830                                 goto release_qp;
 1831                         }
 1832                         if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
 1833                             == IB_QP_AV) {
 1834                                 cmd->base.attr_mask |= IB_QP_PORT;
 1835                                 cmd->base.port_num = cmd->base.dest.port_num;
 1836                         }
 1837                 }
 1838         }
 1839 
 1840         if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
 1841             (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
 1842             !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
 1843             cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
 1844                 ret = -EINVAL;
 1845                 goto release_qp;
 1846         }
 1847 
 1848         if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
 1849             cmd->base.cur_qp_state > IB_QPS_ERR) ||
 1850             (cmd->base.attr_mask & IB_QP_STATE &&
 1851             cmd->base.qp_state > IB_QPS_ERR)) {
 1852                 ret = -EINVAL;
 1853                 goto release_qp;
 1854         }
 1855 
 1856         if (cmd->base.attr_mask & IB_QP_STATE)
 1857                 attr->qp_state = cmd->base.qp_state;
 1858         if (cmd->base.attr_mask & IB_QP_CUR_STATE)
 1859                 attr->cur_qp_state = cmd->base.cur_qp_state;
 1860         if (cmd->base.attr_mask & IB_QP_PATH_MTU)
 1861                 attr->path_mtu = cmd->base.path_mtu;
 1862         if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
 1863                 attr->path_mig_state = cmd->base.path_mig_state;
 1864         if (cmd->base.attr_mask & IB_QP_QKEY)
 1865                 attr->qkey = cmd->base.qkey;
 1866         if (cmd->base.attr_mask & IB_QP_RQ_PSN)
 1867                 attr->rq_psn = cmd->base.rq_psn;
 1868         if (cmd->base.attr_mask & IB_QP_SQ_PSN)
 1869                 attr->sq_psn = cmd->base.sq_psn;
 1870         if (cmd->base.attr_mask & IB_QP_DEST_QPN)
 1871                 attr->dest_qp_num = cmd->base.dest_qp_num;
 1872         if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
 1873                 attr->qp_access_flags = cmd->base.qp_access_flags;
 1874         if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
 1875                 attr->pkey_index = cmd->base.pkey_index;
 1876         if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
 1877                 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
 1878         if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
 1879                 attr->max_rd_atomic = cmd->base.max_rd_atomic;
 1880         if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
 1881                 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
 1882         if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
 1883                 attr->min_rnr_timer = cmd->base.min_rnr_timer;
 1884         if (cmd->base.attr_mask & IB_QP_PORT)
 1885                 attr->port_num = cmd->base.port_num;
 1886         if (cmd->base.attr_mask & IB_QP_TIMEOUT)
 1887                 attr->timeout = cmd->base.timeout;
 1888         if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
 1889                 attr->retry_cnt = cmd->base.retry_cnt;
 1890         if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
 1891                 attr->rnr_retry = cmd->base.rnr_retry;
 1892         if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
 1893                 attr->alt_port_num = cmd->base.alt_port_num;
 1894                 attr->alt_timeout = cmd->base.alt_timeout;
 1895                 attr->alt_pkey_index = cmd->base.alt_pkey_index;
 1896         }
 1897         if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
 1898                 attr->rate_limit = cmd->rate_limit;
 1899 
 1900         if (cmd->base.attr_mask & IB_QP_AV)
 1901                 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
 1902                                          &cmd->base.dest);
 1903 
 1904         if (cmd->base.attr_mask & IB_QP_ALT_PATH)
 1905                 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
 1906                                          &cmd->base.alt_dest);
 1907 
 1908         ret = ib_modify_qp_with_udata(qp, attr,
 1909                                       modify_qp_mask(qp->qp_type,
 1910                                                      cmd->base.attr_mask),
 1911                                       &attrs->driver_udata);
 1912 
 1913 release_qp:
 1914         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
 1915                                 UVERBS_LOOKUP_READ);
 1916 out:
 1917         kfree(attr);
 1918 
 1919         return ret;
 1920 }
 1921 
 1922 static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs)
 1923 {
 1924         struct ib_uverbs_ex_modify_qp cmd;
 1925         int ret;
 1926 
 1927         ret = uverbs_request(attrs, &cmd.base, sizeof(cmd.base));
 1928         if (ret)
 1929                 return ret;
 1930 
 1931         if (cmd.base.attr_mask &
 1932             ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
 1933                 return -EOPNOTSUPP;
 1934 
 1935         return modify_qp(attrs, &cmd);
 1936 }
 1937 
 1938 static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs)
 1939 {
 1940         struct ib_uverbs_ex_modify_qp cmd;
 1941         struct ib_uverbs_ex_modify_qp_resp resp = {
 1942                 .response_length = uverbs_response_length(attrs, sizeof(resp))
 1943         };
 1944         int ret;
 1945 
 1946         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1947         if (ret)
 1948                 return ret;
 1949 
 1950         /*
 1951          * Last bit is reserved for extending the attr_mask by
 1952          * using another field.
 1953          */
 1954         BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
 1955 
 1956         if (cmd.base.attr_mask &
 1957             ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
 1958                 return -EOPNOTSUPP;
 1959 
 1960         ret = modify_qp(attrs, &cmd);
 1961         if (ret)
 1962                 return ret;
 1963 
 1964         return uverbs_response(attrs, &resp, sizeof(resp));
 1965 }
 1966 
 1967 static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs)
 1968 {
 1969         struct ib_uverbs_destroy_qp      cmd;
 1970         struct ib_uverbs_destroy_qp_resp resp;
 1971         struct ib_uobject               *uobj;
 1972         struct ib_uqp_object            *obj;
 1973         int ret;
 1974 
 1975         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 1976         if (ret)
 1977                 return ret;
 1978 
 1979         uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
 1980         if (IS_ERR(uobj))
 1981                 return PTR_ERR(uobj);
 1982 
 1983         obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
 1984         memset(&resp, 0, sizeof(resp));
 1985         resp.events_reported = obj->uevent.events_reported;
 1986 
 1987         uobj_put_destroy(uobj);
 1988 
 1989         return uverbs_response(attrs, &resp, sizeof(resp));
 1990 }
 1991 
 1992 static void *alloc_wr(size_t wr_size, __u32 num_sge)
 1993 {
 1994         if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
 1995                        sizeof (struct ib_sge))
 1996                 return NULL;
 1997 
 1998         return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
 1999                          num_sge * sizeof (struct ib_sge), GFP_KERNEL);
 2000 }
 2001 
 2002 static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
 2003 {
 2004         struct ib_uverbs_post_send      cmd;
 2005         struct ib_uverbs_post_send_resp resp;
 2006         struct ib_uverbs_send_wr       *user_wr;
 2007         struct ib_send_wr              *wr = NULL, *last, *next;
 2008         const struct ib_send_wr        *bad_wr;
 2009         struct ib_qp                   *qp;
 2010         int                             i, sg_ind;
 2011         int                             is_ud;
 2012         int ret, ret2;
 2013         size_t                          next_size;
 2014         const struct ib_sge __user *sgls;
 2015         const void __user *wqes;
 2016         struct uverbs_req_iter iter;
 2017 
 2018         ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
 2019         if (ret)
 2020                 return ret;
 2021         wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
 2022         if (IS_ERR(wqes))
 2023                 return PTR_ERR(wqes);
 2024         sgls = uverbs_request_next_ptr(
 2025                 &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
 2026         if (IS_ERR(sgls))
 2027                 return PTR_ERR(sgls);
 2028         ret = uverbs_request_finish(&iter);
 2029         if (ret)
 2030                 return ret;
 2031 
 2032         user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
 2033         if (!user_wr)
 2034                 return -ENOMEM;
 2035 
 2036         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
 2037         if (!qp) {
 2038                 ret = -EINVAL;
 2039                 goto out;
 2040         }
 2041 
 2042         is_ud = qp->qp_type == IB_QPT_UD;
 2043         sg_ind = 0;
 2044         last = NULL;
 2045         for (i = 0; i < cmd.wr_count; ++i) {
 2046                 if (copy_from_user(user_wr, (const u8 *)wqes + i * cmd.wqe_size,
 2047                                    cmd.wqe_size)) {
 2048                         ret = -EFAULT;
 2049                         goto out_put;
 2050                 }
 2051 
 2052                 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
 2053                         ret = -EINVAL;
 2054                         goto out_put;
 2055                 }
 2056 
 2057                 if (is_ud) {
 2058                         struct ib_ud_wr *ud;
 2059 
 2060                         if (user_wr->opcode != IB_WR_SEND &&
 2061                             user_wr->opcode != IB_WR_SEND_WITH_IMM) {
 2062                                 ret = -EINVAL;
 2063                                 goto out_put;
 2064                         }
 2065 
 2066                         next_size = sizeof(*ud);
 2067                         ud = alloc_wr(next_size, user_wr->num_sge);
 2068                         if (!ud) {
 2069                                 ret = -ENOMEM;
 2070                                 goto out_put;
 2071                         }
 2072 
 2073                         ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
 2074                                                    user_wr->wr.ud.ah, attrs);
 2075                         if (!ud->ah) {
 2076                                 kfree(ud);
 2077                                 ret = -EINVAL;
 2078                                 goto out_put;
 2079                         }
 2080                         ud->remote_qpn = user_wr->wr.ud.remote_qpn;
 2081                         ud->remote_qkey = user_wr->wr.ud.remote_qkey;
 2082 
 2083                         next = &ud->wr;
 2084                 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
 2085                            user_wr->opcode == IB_WR_RDMA_WRITE ||
 2086                            user_wr->opcode == IB_WR_RDMA_READ) {
 2087                         struct ib_rdma_wr *rdma;
 2088 
 2089                         next_size = sizeof(*rdma);
 2090                         rdma = alloc_wr(next_size, user_wr->num_sge);
 2091                         if (!rdma) {
 2092                                 ret = -ENOMEM;
 2093                                 goto out_put;
 2094                         }
 2095 
 2096                         rdma->remote_addr = user_wr->wr.rdma.remote_addr;
 2097                         rdma->rkey = user_wr->wr.rdma.rkey;
 2098 
 2099                         next = &rdma->wr;
 2100                 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
 2101                            user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
 2102                         struct ib_atomic_wr *atomic;
 2103 
 2104                         next_size = sizeof(*atomic);
 2105                         atomic = alloc_wr(next_size, user_wr->num_sge);
 2106                         if (!atomic) {
 2107                                 ret = -ENOMEM;
 2108                                 goto out_put;
 2109                         }
 2110 
 2111                         atomic->remote_addr = user_wr->wr.atomic.remote_addr;
 2112                         atomic->compare_add = user_wr->wr.atomic.compare_add;
 2113                         atomic->swap = user_wr->wr.atomic.swap;
 2114                         atomic->rkey = user_wr->wr.atomic.rkey;
 2115 
 2116                         next = &atomic->wr;
 2117                 } else if (user_wr->opcode == IB_WR_SEND ||
 2118                            user_wr->opcode == IB_WR_SEND_WITH_IMM ||
 2119                            user_wr->opcode == IB_WR_SEND_WITH_INV) {
 2120                         next_size = sizeof(*next);
 2121                         next = alloc_wr(next_size, user_wr->num_sge);
 2122                         if (!next) {
 2123                                 ret = -ENOMEM;
 2124                                 goto out_put;
 2125                         }
 2126                 } else {
 2127                         ret = -EINVAL;
 2128                         goto out_put;
 2129                 }
 2130 
 2131                 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
 2132                     user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
 2133                         next->ex.imm_data =
 2134                                         (__be32 __force) user_wr->ex.imm_data;
 2135                 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
 2136                         next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
 2137                 }
 2138 
 2139                 if (!last)
 2140                         wr = next;
 2141                 else
 2142                         last->next = next;
 2143                 last = next;
 2144 
 2145                 next->next       = NULL;
 2146                 next->wr_id      = user_wr->wr_id;
 2147                 next->num_sge    = user_wr->num_sge;
 2148                 next->opcode     = user_wr->opcode;
 2149                 next->send_flags = user_wr->send_flags;
 2150 
 2151                 if (next->num_sge) {
 2152                         next->sg_list = (void *)((char *)next +
 2153                                 ALIGN(next_size, sizeof(struct ib_sge)));
 2154                         if (copy_from_user(next->sg_list, sgls + sg_ind,
 2155                                            next->num_sge *
 2156                                                    sizeof(struct ib_sge))) {
 2157                                 ret = -EFAULT;
 2158                                 goto out_put;
 2159                         }
 2160                         sg_ind += next->num_sge;
 2161                 } else
 2162                         next->sg_list = NULL;
 2163         }
 2164 
 2165         resp.bad_wr = 0;
 2166         ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
 2167         if (ret)
 2168                 for (next = wr; next; next = next->next) {
 2169                         ++resp.bad_wr;
 2170                         if (next == bad_wr)
 2171                                 break;
 2172                 }
 2173 
 2174         ret2 = uverbs_response(attrs, &resp, sizeof(resp));
 2175         if (ret2)
 2176                 ret = ret2;
 2177 
 2178 out_put:
 2179         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
 2180                                 UVERBS_LOOKUP_READ);
 2181 
 2182         while (wr) {
 2183                 if (is_ud && ud_wr(wr)->ah)
 2184                         uobj_put_obj_read(ud_wr(wr)->ah);
 2185                 next = wr->next;
 2186                 kfree(wr);
 2187                 wr = next;
 2188         }
 2189 
 2190 out:
 2191         kfree(user_wr);
 2192 
 2193         return ret;
 2194 }
 2195 
 2196 static struct ib_recv_wr *
 2197 ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
 2198                           u32 wqe_size, u32 sge_count)
 2199 {
 2200         struct ib_uverbs_recv_wr *user_wr;
 2201         struct ib_recv_wr        *wr = NULL, *last, *next;
 2202         int                       sg_ind;
 2203         int                       i;
 2204         int                       ret;
 2205         const struct ib_sge __user *sgls;
 2206         const void __user *wqes;
 2207 
 2208         if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
 2209                 return ERR_PTR(-EINVAL);
 2210 
 2211         wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
 2212         if (IS_ERR(wqes))
 2213                 return ERR_CAST(wqes);
 2214         sgls = uverbs_request_next_ptr(
 2215                 iter, sge_count * sizeof(struct ib_uverbs_sge));
 2216         if (IS_ERR(sgls))
 2217                 return ERR_CAST(sgls);
 2218         ret = uverbs_request_finish(iter);
 2219         if (ret)
 2220                 return ERR_PTR(ret);
 2221 
 2222         user_wr = kmalloc(wqe_size, GFP_KERNEL);
 2223         if (!user_wr)
 2224                 return ERR_PTR(-ENOMEM);
 2225 
 2226         sg_ind = 0;
 2227         last = NULL;
 2228         for (i = 0; i < wr_count; ++i) {
 2229                 if (copy_from_user(user_wr, (const char *)wqes + i * wqe_size,
 2230                                    wqe_size)) {
 2231                         ret = -EFAULT;
 2232                         goto err;
 2233                 }
 2234 
 2235                 if (user_wr->num_sge + sg_ind > sge_count) {
 2236                         ret = -EINVAL;
 2237                         goto err;
 2238                 }
 2239 
 2240                 if (user_wr->num_sge >=
 2241                     (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
 2242                     sizeof (struct ib_sge)) {
 2243                         ret = -EINVAL;
 2244                         goto err;
 2245                 }
 2246 
 2247                 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
 2248                                user_wr->num_sge * sizeof (struct ib_sge),
 2249                                GFP_KERNEL);
 2250                 if (!next) {
 2251                         ret = -ENOMEM;
 2252                         goto err;
 2253                 }
 2254 
 2255                 if (!last)
 2256                         wr = next;
 2257                 else
 2258                         last->next = next;
 2259                 last = next;
 2260 
 2261                 next->next       = NULL;
 2262                 next->wr_id      = user_wr->wr_id;
 2263                 next->num_sge    = user_wr->num_sge;
 2264 
 2265                 if (next->num_sge) {
 2266                         next->sg_list = (void *)((char *)next +
 2267                                 ALIGN(sizeof *next, sizeof (struct ib_sge)));
 2268                         if (copy_from_user(next->sg_list, sgls + sg_ind,
 2269                                            next->num_sge *
 2270                                                    sizeof(struct ib_sge))) {
 2271                                 ret = -EFAULT;
 2272                                 goto err;
 2273                         }
 2274                         sg_ind += next->num_sge;
 2275                 } else
 2276                         next->sg_list = NULL;
 2277         }
 2278 
 2279         kfree(user_wr);
 2280         return wr;
 2281 
 2282 err:
 2283         kfree(user_wr);
 2284 
 2285         while (wr) {
 2286                 next = wr->next;
 2287                 kfree(wr);
 2288                 wr = next;
 2289         }
 2290 
 2291         return ERR_PTR(ret);
 2292 }
 2293 
 2294 static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
 2295 {
 2296         struct ib_uverbs_post_recv      cmd;
 2297         struct ib_uverbs_post_recv_resp resp;
 2298         struct ib_recv_wr              *wr, *next;
 2299         const struct ib_recv_wr        *bad_wr;
 2300         struct ib_qp                   *qp;
 2301         int ret, ret2;
 2302         struct uverbs_req_iter iter;
 2303 
 2304         ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
 2305         if (ret)
 2306                 return ret;
 2307 
 2308         wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
 2309                                        cmd.sge_count);
 2310         if (IS_ERR(wr))
 2311                 return PTR_ERR(wr);
 2312 
 2313         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
 2314         if (!qp) {
 2315                 ret = -EINVAL;
 2316                 goto out;
 2317         }
 2318 
 2319         resp.bad_wr = 0;
 2320         ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
 2321 
 2322         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
 2323                                 UVERBS_LOOKUP_READ);
 2324         if (ret) {
 2325                 for (next = wr; next; next = next->next) {
 2326                         ++resp.bad_wr;
 2327                         if (next == bad_wr)
 2328                                 break;
 2329                 }
 2330         }
 2331 
 2332         ret2 = uverbs_response(attrs, &resp, sizeof(resp));
 2333         if (ret2)
 2334                 ret = ret2;
 2335 out:
 2336         while (wr) {
 2337                 next = wr->next;
 2338                 kfree(wr);
 2339                 wr = next;
 2340         }
 2341 
 2342         return ret;
 2343 }
 2344 
 2345 static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
 2346 {
 2347         struct ib_uverbs_post_srq_recv      cmd;
 2348         struct ib_uverbs_post_srq_recv_resp resp;
 2349         struct ib_recv_wr                  *wr, *next;
 2350         const struct ib_recv_wr            *bad_wr;
 2351         struct ib_srq                      *srq;
 2352         int ret, ret2;
 2353         struct uverbs_req_iter iter;
 2354 
 2355         ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
 2356         if (ret)
 2357                 return ret;
 2358 
 2359         wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
 2360                                        cmd.sge_count);
 2361         if (IS_ERR(wr))
 2362                 return PTR_ERR(wr);
 2363 
 2364         srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
 2365         if (!srq) {
 2366                 ret = -EINVAL;
 2367                 goto out;
 2368         }
 2369 
 2370         resp.bad_wr = 0;
 2371         ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
 2372 
 2373         rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
 2374                                 UVERBS_LOOKUP_READ);
 2375 
 2376         if (ret)
 2377                 for (next = wr; next; next = next->next) {
 2378                         ++resp.bad_wr;
 2379                         if (next == bad_wr)
 2380                                 break;
 2381                 }
 2382 
 2383         ret2 = uverbs_response(attrs, &resp, sizeof(resp));
 2384         if (ret2)
 2385                 ret = ret2;
 2386 
 2387 out:
 2388         while (wr) {
 2389                 next = wr->next;
 2390                 kfree(wr);
 2391                 wr = next;
 2392         }
 2393 
 2394         return ret;
 2395 }
 2396 
 2397 static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
 2398 {
 2399         struct ib_uverbs_create_ah       cmd;
 2400         struct ib_uverbs_create_ah_resp  resp;
 2401         struct ib_uobject               *uobj;
 2402         struct ib_pd                    *pd;
 2403         struct ib_ah                    *ah;
 2404         struct ib_ah_attr               attr = {};
 2405         int ret;
 2406         struct ib_device *ib_dev;
 2407 
 2408         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 2409         if (ret)
 2410                 return ret;
 2411 
 2412         uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev);
 2413         if (IS_ERR(uobj))
 2414                 return PTR_ERR(uobj);
 2415 
 2416         if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
 2417                 ret = -EINVAL;
 2418                 goto err;
 2419         }
 2420 
 2421         pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
 2422         if (!pd) {
 2423                 ret = -EINVAL;
 2424                 goto err;
 2425         }
 2426 
 2427         attr.dlid = cmd.attr.dlid;
 2428         attr.sl = cmd.attr.sl;
 2429         attr.src_path_bits = cmd.attr.src_path_bits;
 2430         attr.static_rate = cmd.attr.static_rate;
 2431         attr.port_num = cmd.attr.port_num;
 2432 
 2433         if (cmd.attr.is_global) {
 2434                 struct ib_global_route *grh = &attr.grh;
 2435 
 2436                 grh->flow_label = cmd.attr.grh.flow_label;
 2437                 grh->sgid_index = cmd.attr.grh.sgid_index;
 2438                 grh->hop_limit = cmd.attr.grh.hop_limit;
 2439                 grh->traffic_class = cmd.attr.grh.traffic_class;
 2440                 memcpy(grh->dgid.raw, cmd.attr.grh.dgid, sizeof(grh->dgid));
 2441                 attr.ah_flags = IB_AH_GRH;
 2442         } else {
 2443                 attr.ah_flags = 0;
 2444         }
 2445 
 2446         ah = ib_create_user_ah(pd, &attr, &attrs->driver_udata);
 2447         if (IS_ERR(ah)) {
 2448                 ret = PTR_ERR(ah);
 2449                 goto err_put;
 2450         }
 2451 
 2452         ah->uobject  = uobj;
 2453         uobj->user_handle = cmd.user_handle;
 2454         uobj->object = ah;
 2455 
 2456         resp.ah_handle = uobj->id;
 2457 
 2458         ret = uverbs_response(attrs, &resp, sizeof(resp));
 2459         if (ret)
 2460                 goto err_copy;
 2461 
 2462         uobj_put_obj_read(pd);
 2463         rdma_alloc_commit_uobject(uobj, attrs);
 2464         return 0;
 2465 
 2466 err_copy:
 2467         ib_destroy_ah_user(ah, RDMA_DESTROY_AH_SLEEPABLE,
 2468                            uverbs_get_cleared_udata(attrs));
 2469 
 2470 err_put:
 2471         uobj_put_obj_read(pd);
 2472 
 2473 err:
 2474         uobj_alloc_abort(uobj, attrs);
 2475         return ret;
 2476 }
 2477 
 2478 static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs)
 2479 {
 2480         struct ib_uverbs_destroy_ah cmd;
 2481         int ret;
 2482 
 2483         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 2484         if (ret)
 2485                 return ret;
 2486 
 2487         return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs);
 2488 }
 2489 
 2490 static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
 2491 {
 2492         struct ib_uverbs_attach_mcast cmd;
 2493         struct ib_qp                 *qp;
 2494         struct ib_uqp_object         *obj;
 2495         struct ib_uverbs_mcast_entry *mcast;
 2496         int                           ret;
 2497 
 2498         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 2499         if (ret)
 2500                 return ret;
 2501 
 2502         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
 2503         if (!qp)
 2504                 return -EINVAL;
 2505 
 2506         obj = qp->uobject;
 2507 
 2508         mutex_lock(&obj->mcast_lock);
 2509         list_for_each_entry(mcast, &obj->mcast_list, list)
 2510                 if (cmd.mlid == mcast->lid &&
 2511                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
 2512                         ret = 0;
 2513                         goto out_put;
 2514                 }
 2515 
 2516         mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
 2517         if (!mcast) {
 2518                 ret = -ENOMEM;
 2519                 goto out_put;
 2520         }
 2521 
 2522         mcast->lid = cmd.mlid;
 2523         memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
 2524 
 2525         ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
 2526         if (!ret)
 2527                 list_add_tail(&mcast->list, &obj->mcast_list);
 2528         else
 2529                 kfree(mcast);
 2530 
 2531 out_put:
 2532         mutex_unlock(&obj->mcast_lock);
 2533         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
 2534                                 UVERBS_LOOKUP_READ);
 2535 
 2536         return ret;
 2537 }
 2538 
 2539 static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
 2540 {
 2541         struct ib_uverbs_detach_mcast cmd;
 2542         struct ib_uqp_object         *obj;
 2543         struct ib_qp                 *qp;
 2544         struct ib_uverbs_mcast_entry *mcast;
 2545         int                           ret;
 2546         bool                          found = false;
 2547 
 2548         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 2549         if (ret)
 2550                 return ret;
 2551 
 2552         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
 2553         if (!qp)
 2554                 return -EINVAL;
 2555 
 2556         obj = qp->uobject;
 2557         mutex_lock(&obj->mcast_lock);
 2558 
 2559         list_for_each_entry(mcast, &obj->mcast_list, list)
 2560                 if (cmd.mlid == mcast->lid &&
 2561                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
 2562                         list_del(&mcast->list);
 2563                         kfree(mcast);
 2564                         found = true;
 2565                         break;
 2566                 }
 2567 
 2568         if (!found) {
 2569                 ret = -EINVAL;
 2570                 goto out_put;
 2571         }
 2572 
 2573         ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
 2574 
 2575 out_put:
 2576         mutex_unlock(&obj->mcast_lock);
 2577         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
 2578                                 UVERBS_LOOKUP_READ);
 2579         return ret;
 2580 }
 2581 
 2582 struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
 2583 {
 2584         struct ib_uflow_resources *resources;
 2585 
 2586         resources = kzalloc(sizeof(*resources), GFP_KERNEL);
 2587 
 2588         if (!resources)
 2589                 return NULL;
 2590 
 2591         if (!num_specs)
 2592                 goto out;
 2593 
 2594         resources->counters =
 2595                 kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
 2596         resources->collection =
 2597                 kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
 2598 
 2599         if (!resources->counters || !resources->collection)
 2600                 goto err;
 2601 
 2602 out:
 2603         resources->max = num_specs;
 2604         return resources;
 2605 
 2606 err:
 2607         kfree(resources->counters);
 2608         kfree(resources);
 2609 
 2610         return NULL;
 2611 }
 2612 EXPORT_SYMBOL(flow_resources_alloc);
 2613 
 2614 void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
 2615 {
 2616         unsigned int i;
 2617 
 2618         if (!uflow_res)
 2619                 return;
 2620 
 2621         for (i = 0; i < uflow_res->collection_num; i++)
 2622                 atomic_dec(&uflow_res->collection[i]->usecnt);
 2623 
 2624         for (i = 0; i < uflow_res->counters_num; i++)
 2625                 atomic_dec(&uflow_res->counters[i]->usecnt);
 2626 
 2627         kfree(uflow_res->collection);
 2628         kfree(uflow_res->counters);
 2629         kfree(uflow_res);
 2630 }
 2631 EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
 2632 
 2633 void flow_resources_add(struct ib_uflow_resources *uflow_res,
 2634                         enum ib_flow_spec_type type,
 2635                         void *ibobj)
 2636 {
 2637         WARN_ON(uflow_res->num >= uflow_res->max);
 2638 
 2639         switch (type) {
 2640         case IB_FLOW_SPEC_ACTION_HANDLE:
 2641                 atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
 2642                 uflow_res->collection[uflow_res->collection_num++] =
 2643                         (struct ib_flow_action *)ibobj;
 2644                 break;
 2645         case IB_FLOW_SPEC_ACTION_COUNT:
 2646                 atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
 2647                 uflow_res->counters[uflow_res->counters_num++] =
 2648                         (struct ib_counters *)ibobj;
 2649                 break;
 2650         default:
 2651                 WARN_ON(1);
 2652         }
 2653 
 2654         uflow_res->num++;
 2655 }
 2656 EXPORT_SYMBOL(flow_resources_add);
 2657 
 2658 static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
 2659                                        struct ib_uverbs_flow_spec *kern_spec,
 2660                                        union ib_flow_spec *ib_spec,
 2661                                        struct ib_uflow_resources *uflow_res)
 2662 {
 2663         ib_spec->type = kern_spec->type;
 2664         switch (ib_spec->type) {
 2665         case IB_FLOW_SPEC_ACTION_TAG:
 2666                 if (kern_spec->flow_tag.size !=
 2667                     sizeof(struct ib_uverbs_flow_spec_action_tag))
 2668                         return -EINVAL;
 2669 
 2670                 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
 2671                 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
 2672                 break;
 2673         case IB_FLOW_SPEC_ACTION_DROP:
 2674                 if (kern_spec->drop.size !=
 2675                     sizeof(struct ib_uverbs_flow_spec_action_drop))
 2676                         return -EINVAL;
 2677 
 2678                 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
 2679                 break;
 2680         case IB_FLOW_SPEC_ACTION_HANDLE:
 2681                 if (kern_spec->action.size !=
 2682                     sizeof(struct ib_uverbs_flow_spec_action_handle))
 2683                         return -EOPNOTSUPP;
 2684                 ib_spec->action.act = uobj_get_obj_read(flow_action,
 2685                                                         UVERBS_OBJECT_FLOW_ACTION,
 2686                                                         kern_spec->action.handle,
 2687                                                         attrs);
 2688                 if (!ib_spec->action.act)
 2689                         return -EINVAL;
 2690                 ib_spec->action.size =
 2691                         sizeof(struct ib_flow_spec_action_handle);
 2692                 flow_resources_add(uflow_res,
 2693                                    IB_FLOW_SPEC_ACTION_HANDLE,
 2694                                    ib_spec->action.act);
 2695                 uobj_put_obj_read(ib_spec->action.act);
 2696                 break;
 2697         case IB_FLOW_SPEC_ACTION_COUNT:
 2698                 if (kern_spec->flow_count.size !=
 2699                         sizeof(struct ib_uverbs_flow_spec_action_count))
 2700                         return -EINVAL;
 2701                 ib_spec->flow_count.counters =
 2702                         uobj_get_obj_read(counters,
 2703                                           UVERBS_OBJECT_COUNTERS,
 2704                                           kern_spec->flow_count.handle,
 2705                                           attrs);
 2706                 if (!ib_spec->flow_count.counters)
 2707                         return -EINVAL;
 2708                 ib_spec->flow_count.size =
 2709                                 sizeof(struct ib_flow_spec_action_count);
 2710                 flow_resources_add(uflow_res,
 2711                                    IB_FLOW_SPEC_ACTION_COUNT,
 2712                                    ib_spec->flow_count.counters);
 2713                 uobj_put_obj_read(ib_spec->flow_count.counters);
 2714                 break;
 2715         default:
 2716                 return -EINVAL;
 2717         }
 2718         return 0;
 2719 }
 2720 
 2721 static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
 2722                                 u16 ib_real_filter_sz)
 2723 {
 2724         /*
 2725          * User space filter structures must be 64 bit aligned, otherwise this
 2726          * may pass, but we won't handle additional new attributes.
 2727          */
 2728 
 2729         if (kern_filter_size > ib_real_filter_sz) {
 2730                 if (memchr_inv((const char *)kern_spec_filter +
 2731                                ib_real_filter_sz, 0,
 2732                                kern_filter_size - ib_real_filter_sz))
 2733                         return -EINVAL;
 2734                 return ib_real_filter_sz;
 2735         }
 2736         return kern_filter_size;
 2737 }
 2738 
 2739 int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
 2740                                           const void *kern_spec_mask,
 2741                                           const void *kern_spec_val,
 2742                                           size_t kern_filter_sz,
 2743                                           union ib_flow_spec *ib_spec)
 2744 {
 2745         ssize_t actual_filter_sz;
 2746         ssize_t ib_filter_sz;
 2747 
 2748         /* User flow spec size must be aligned to 4 bytes */
 2749         if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
 2750                 return -EINVAL;
 2751 
 2752         ib_spec->type = type;
 2753 
 2754         if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
 2755                 return -EINVAL;
 2756 
 2757         switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
 2758         case IB_FLOW_SPEC_ETH:
 2759                 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
 2760                 actual_filter_sz = spec_filter_size(kern_spec_mask,
 2761                                                     kern_filter_sz,
 2762                                                     ib_filter_sz);
 2763                 if (actual_filter_sz <= 0)
 2764                         return -EINVAL;
 2765                 ib_spec->size = sizeof(struct ib_flow_spec_eth);
 2766                 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
 2767                 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
 2768                 break;
 2769         case IB_FLOW_SPEC_IPV4:
 2770                 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
 2771                 actual_filter_sz = spec_filter_size(kern_spec_mask,
 2772                                                     kern_filter_sz,
 2773                                                     ib_filter_sz);
 2774                 if (actual_filter_sz <= 0)
 2775                         return -EINVAL;
 2776                 ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
 2777                 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
 2778                 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
 2779                 break;
 2780         case IB_FLOW_SPEC_IPV6:
 2781                 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
 2782                 actual_filter_sz = spec_filter_size(kern_spec_mask,
 2783                                                     kern_filter_sz,
 2784                                                     ib_filter_sz);
 2785                 if (actual_filter_sz <= 0)
 2786                         return -EINVAL;
 2787                 ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
 2788                 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
 2789                 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
 2790 
 2791                 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
 2792                     (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
 2793                         return -EINVAL;
 2794                 break;
 2795         case IB_FLOW_SPEC_TCP:
 2796         case IB_FLOW_SPEC_UDP:
 2797                 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
 2798                 actual_filter_sz = spec_filter_size(kern_spec_mask,
 2799                                                     kern_filter_sz,
 2800                                                     ib_filter_sz);
 2801                 if (actual_filter_sz <= 0)
 2802                         return -EINVAL;
 2803                 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
 2804                 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
 2805                 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
 2806                 break;
 2807         case IB_FLOW_SPEC_VXLAN_TUNNEL:
 2808                 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
 2809                 actual_filter_sz = spec_filter_size(kern_spec_mask,
 2810                                                     kern_filter_sz,
 2811                                                     ib_filter_sz);
 2812                 if (actual_filter_sz <= 0)
 2813                         return -EINVAL;
 2814                 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
 2815                 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
 2816                 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
 2817 
 2818                 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
 2819                     (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
 2820                         return -EINVAL;
 2821                 break;
 2822         case IB_FLOW_SPEC_ESP:
 2823                 ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
 2824                 actual_filter_sz = spec_filter_size(kern_spec_mask,
 2825                                                     kern_filter_sz,
 2826                                                     ib_filter_sz);
 2827                 if (actual_filter_sz <= 0)
 2828                         return -EINVAL;
 2829                 ib_spec->esp.size = sizeof(struct ib_flow_spec_esp);
 2830                 memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
 2831                 memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
 2832                 break;
 2833         case IB_FLOW_SPEC_GRE:
 2834                 ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
 2835                 actual_filter_sz = spec_filter_size(kern_spec_mask,
 2836                                                     kern_filter_sz,
 2837                                                     ib_filter_sz);
 2838                 if (actual_filter_sz <= 0)
 2839                         return -EINVAL;
 2840                 ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
 2841                 memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
 2842                 memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
 2843                 break;
 2844         case IB_FLOW_SPEC_MPLS:
 2845                 ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
 2846                 actual_filter_sz = spec_filter_size(kern_spec_mask,
 2847                                                     kern_filter_sz,
 2848                                                     ib_filter_sz);
 2849                 if (actual_filter_sz <= 0)
 2850                         return -EINVAL;
 2851                 ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
 2852                 memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
 2853                 memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
 2854                 break;
 2855         default:
 2856                 return -EINVAL;
 2857         }
 2858         return 0;
 2859 }
 2860 
 2861 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
 2862                                        union ib_flow_spec *ib_spec)
 2863 {
 2864         size_t kern_filter_sz;
 2865         void *kern_spec_mask;
 2866         void *kern_spec_val;
 2867 
 2868         if (kern_spec->hdr.size < sizeof(struct ib_uverbs_flow_spec_hdr))
 2869                 return -EINVAL;
 2870         kern_filter_sz = kern_spec->hdr.size - sizeof(struct ib_uverbs_flow_spec_hdr);
 2871         kern_filter_sz /= 2;
 2872 
 2873         kern_spec_val = (u8 *)kern_spec +
 2874                 sizeof(struct ib_uverbs_flow_spec_hdr);
 2875         kern_spec_mask = (u8 *)kern_spec_val + kern_filter_sz;
 2876 
 2877         return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type,
 2878                                                      kern_spec_mask,
 2879                                                      kern_spec_val,
 2880                                                      kern_filter_sz, ib_spec);
 2881 }
 2882 
 2883 static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs,
 2884                                 struct ib_uverbs_flow_spec *kern_spec,
 2885                                 union ib_flow_spec *ib_spec,
 2886                                 struct ib_uflow_resources *uflow_res)
 2887 {
 2888         if (kern_spec->reserved)
 2889                 return -EINVAL;
 2890 
 2891         if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
 2892                 return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec,
 2893                                                    uflow_res);
 2894         else
 2895                 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
 2896 }
 2897 
 2898 static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
 2899 {
 2900         struct ib_uverbs_ex_create_wq cmd;
 2901         struct ib_uverbs_ex_create_wq_resp resp = {};
 2902         struct ib_uwq_object           *obj;
 2903         int err = 0;
 2904         struct ib_cq *cq;
 2905         struct ib_pd *pd;
 2906         struct ib_wq *wq;
 2907         struct ib_wq_init_attr wq_init_attr = {};
 2908         struct ib_device *ib_dev;
 2909 
 2910         err = uverbs_request(attrs, &cmd, sizeof(cmd));
 2911         if (err)
 2912                 return err;
 2913 
 2914         if (cmd.comp_mask)
 2915                 return -EOPNOTSUPP;
 2916 
 2917         obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs,
 2918                                                  &ib_dev);
 2919         if (IS_ERR(obj))
 2920                 return PTR_ERR(obj);
 2921 
 2922         pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
 2923         if (!pd) {
 2924                 err = -EINVAL;
 2925                 goto err_uobj;
 2926         }
 2927 
 2928         cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
 2929         if (!cq) {
 2930                 err = -EINVAL;
 2931                 goto err_put_pd;
 2932         }
 2933 
 2934         wq_init_attr.cq = cq;
 2935         wq_init_attr.max_sge = cmd.max_sge;
 2936         wq_init_attr.max_wr = cmd.max_wr;
 2937         wq_init_attr.wq_context = attrs->ufile;
 2938         wq_init_attr.wq_type = cmd.wq_type;
 2939         wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
 2940         wq_init_attr.create_flags = cmd.create_flags;
 2941         INIT_LIST_HEAD(&obj->uevent.event_list);
 2942 
 2943         wq = pd->device->create_wq(pd, &wq_init_attr, &attrs->driver_udata);
 2944         if (IS_ERR(wq)) {
 2945                 err = PTR_ERR(wq);
 2946                 goto err_put_cq;
 2947         }
 2948 
 2949         wq->uobject = obj;
 2950         obj->uevent.uobject.object = wq;
 2951         wq->wq_type = wq_init_attr.wq_type;
 2952         wq->cq = cq;
 2953         wq->pd = pd;
 2954         wq->device = pd->device;
 2955         wq->wq_context = wq_init_attr.wq_context;
 2956         atomic_set(&wq->usecnt, 0);
 2957         atomic_inc(&pd->usecnt);
 2958         atomic_inc(&cq->usecnt);
 2959         wq->uobject = obj;
 2960         obj->uevent.uobject.object = wq;
 2961 
 2962         memset(&resp, 0, sizeof(resp));
 2963         resp.wq_handle = obj->uevent.uobject.id;
 2964         resp.max_sge = wq_init_attr.max_sge;
 2965         resp.max_wr = wq_init_attr.max_wr;
 2966         resp.wqn = wq->wq_num;
 2967         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
 2968         err = uverbs_response(attrs, &resp, sizeof(resp));
 2969         if (err)
 2970                 goto err_copy;
 2971 
 2972         uobj_put_obj_read(pd);
 2973         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
 2974                                 UVERBS_LOOKUP_READ);
 2975         rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
 2976         return 0;
 2977 
 2978 err_copy:
 2979         ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
 2980 err_put_cq:
 2981         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
 2982                                 UVERBS_LOOKUP_READ);
 2983 err_put_pd:
 2984         uobj_put_obj_read(pd);
 2985 err_uobj:
 2986         uobj_alloc_abort(&obj->uevent.uobject, attrs);
 2987 
 2988         return err;
 2989 }
 2990 
 2991 static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs)
 2992 {
 2993         struct ib_uverbs_ex_destroy_wq  cmd;
 2994         struct ib_uverbs_ex_destroy_wq_resp     resp = {};
 2995         struct ib_uobject               *uobj;
 2996         struct ib_uwq_object            *obj;
 2997         int                             ret;
 2998 
 2999         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 3000         if (ret)
 3001                 return ret;
 3002 
 3003         if (cmd.comp_mask)
 3004                 return -EOPNOTSUPP;
 3005 
 3006         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
 3007         uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
 3008         if (IS_ERR(uobj))
 3009                 return PTR_ERR(uobj);
 3010 
 3011         obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
 3012         resp.events_reported = obj->uevent.events_reported;
 3013 
 3014         uobj_put_destroy(uobj);
 3015 
 3016         return uverbs_response(attrs, &resp, sizeof(resp));
 3017 }
 3018 
 3019 static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
 3020 {
 3021         struct ib_uverbs_ex_modify_wq cmd;
 3022         struct ib_wq *wq;
 3023         struct ib_wq_attr wq_attr = {};
 3024         int ret;
 3025 
 3026         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 3027         if (ret)
 3028                 return ret;
 3029 
 3030         if (!cmd.attr_mask)
 3031                 return -EINVAL;
 3032 
 3033         if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
 3034                 return -EINVAL;
 3035 
 3036         wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
 3037         if (!wq)
 3038                 return -EINVAL;
 3039 
 3040         wq_attr.curr_wq_state = cmd.curr_wq_state;
 3041         wq_attr.wq_state = cmd.wq_state;
 3042         if (cmd.attr_mask & IB_WQ_FLAGS) {
 3043                 wq_attr.flags = cmd.flags;
 3044                 wq_attr.flags_mask = cmd.flags_mask;
 3045         }
 3046         ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask,
 3047                                         &attrs->driver_udata);
 3048         rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
 3049                                 UVERBS_LOOKUP_READ);
 3050         return ret;
 3051 }
 3052 
 3053 static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
 3054 {
 3055         struct ib_uverbs_ex_create_rwq_ind_table cmd;
 3056         struct ib_uverbs_ex_create_rwq_ind_table_resp  resp = {};
 3057         struct ib_uobject                 *uobj;
 3058         int err;
 3059         struct ib_rwq_ind_table_init_attr init_attr = {};
 3060         struct ib_rwq_ind_table *rwq_ind_tbl;
 3061         struct ib_wq    **wqs = NULL;
 3062         u32 *wqs_handles = NULL;
 3063         struct ib_wq    *wq = NULL;
 3064         int i, j, num_read_wqs;
 3065         u32 num_wq_handles;
 3066         struct uverbs_req_iter iter;
 3067         struct ib_device *ib_dev;
 3068 
 3069         err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
 3070         if (err)
 3071                 return err;
 3072 
 3073         if (cmd.comp_mask)
 3074                 return -EOPNOTSUPP;
 3075 
 3076         if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
 3077                 return -EINVAL;
 3078 
 3079         num_wq_handles = 1 << cmd.log_ind_tbl_size;
 3080         wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
 3081                               GFP_KERNEL);
 3082         if (!wqs_handles)
 3083                 return -ENOMEM;
 3084 
 3085         err = uverbs_request_next(&iter, wqs_handles,
 3086                                   num_wq_handles * sizeof(__u32));
 3087         if (err)
 3088                 goto err_free;
 3089 
 3090         err = uverbs_request_finish(&iter);
 3091         if (err)
 3092                 goto err_free;
 3093 
 3094         wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
 3095         if (!wqs) {
 3096                 err = -ENOMEM;
 3097                 goto  err_free;
 3098         }
 3099 
 3100         for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
 3101                         num_read_wqs++) {
 3102                 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
 3103                                        wqs_handles[num_read_wqs], attrs);
 3104                 if (!wq) {
 3105                         err = -EINVAL;
 3106                         goto put_wqs;
 3107                 }
 3108 
 3109                 wqs[num_read_wqs] = wq;
 3110         }
 3111 
 3112         uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev);
 3113         if (IS_ERR(uobj)) {
 3114                 err = PTR_ERR(uobj);
 3115                 goto put_wqs;
 3116         }
 3117 
 3118         init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
 3119         init_attr.ind_tbl = wqs;
 3120 
 3121         rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr,
 3122                                                        &attrs->driver_udata);
 3123 
 3124         if (IS_ERR(rwq_ind_tbl)) {
 3125                 err = PTR_ERR(rwq_ind_tbl);
 3126                 goto err_uobj;
 3127         }
 3128 
 3129         rwq_ind_tbl->ind_tbl = wqs;
 3130         rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
 3131         rwq_ind_tbl->uobject = uobj;
 3132         uobj->object = rwq_ind_tbl;
 3133         rwq_ind_tbl->device = ib_dev;
 3134         atomic_set(&rwq_ind_tbl->usecnt, 0);
 3135 
 3136         for (i = 0; i < num_wq_handles; i++)
 3137                 atomic_inc(&wqs[i]->usecnt);
 3138 
 3139         resp.ind_tbl_handle = uobj->id;
 3140         resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
 3141         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
 3142 
 3143         err = uverbs_response(attrs, &resp, sizeof(resp));
 3144         if (err)
 3145                 goto err_copy;
 3146 
 3147         kfree(wqs_handles);
 3148 
 3149         for (j = 0; j < num_read_wqs; j++)
 3150                 rdma_lookup_put_uobject(&wqs[j]->uobject->uevent.uobject,
 3151                                         UVERBS_LOOKUP_READ);
 3152 
 3153         rdma_alloc_commit_uobject(uobj, attrs);
 3154         return 0;
 3155 
 3156 err_copy:
 3157         ib_destroy_rwq_ind_table(rwq_ind_tbl);
 3158 err_uobj:
 3159         uobj_alloc_abort(uobj, attrs);
 3160 put_wqs:
 3161         for (j = 0; j < num_read_wqs; j++)
 3162                 rdma_lookup_put_uobject(&wqs[j]->uobject->uevent.uobject,
 3163                                         UVERBS_LOOKUP_READ);
 3164 err_free:
 3165         kfree(wqs_handles);
 3166         kfree(wqs);
 3167         return err;
 3168 }
 3169 
 3170 static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs)
 3171 {
 3172         struct ib_uverbs_ex_destroy_rwq_ind_table cmd;
 3173         int ret;
 3174 
 3175         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 3176         if (ret)
 3177                 return ret;
 3178 
 3179         if (cmd.comp_mask)
 3180                 return -EOPNOTSUPP;
 3181 
 3182         return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
 3183                                     cmd.ind_tbl_handle, attrs);
 3184 }
 3185 
 3186 static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
 3187 {
 3188         struct ib_uverbs_create_flow      cmd;
 3189         struct ib_uverbs_create_flow_resp resp;
 3190         struct ib_uobject                 *uobj;
 3191         struct ib_flow                    *flow_id;
 3192         struct ib_uverbs_flow_attr        *kern_flow_attr;
 3193         struct ib_flow_attr               *flow_attr;
 3194         struct ib_qp                      *qp;
 3195         struct ib_uflow_resources         *uflow_res;
 3196         struct ib_uverbs_flow_spec_hdr    *kern_spec;
 3197         struct uverbs_req_iter iter;
 3198         int err;
 3199         void *ib_spec;
 3200         int i;
 3201         struct ib_device *ib_dev;
 3202 
 3203         err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
 3204         if (err)
 3205                 return err;
 3206 
 3207         if (cmd.comp_mask)
 3208                 return -EINVAL;
 3209 
 3210         if (priv_check(curthread, PRIV_NET_RAW) != 0)
 3211                 return -EPERM;
 3212 
 3213         if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
 3214                 return -EINVAL;
 3215 
 3216         if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
 3217             ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
 3218              (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
 3219                 return -EINVAL;
 3220 
 3221         if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
 3222                 return -EINVAL;
 3223 
 3224         if (cmd.flow_attr.size >
 3225             (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
 3226                 return -EINVAL;
 3227 
 3228         if (cmd.flow_attr.reserved[0] ||
 3229             cmd.flow_attr.reserved[1])
 3230                 return -EINVAL;
 3231 
 3232         if (cmd.flow_attr.num_of_specs) {
 3233                 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
 3234                                          GFP_KERNEL);
 3235                 if (!kern_flow_attr)
 3236                         return -ENOMEM;
 3237 
 3238                 *kern_flow_attr = cmd.flow_attr;
 3239                 err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs,
 3240                                           cmd.flow_attr.size);
 3241                 if (err)
 3242                         goto err_free_attr;
 3243         } else {
 3244                 kern_flow_attr = &cmd.flow_attr;
 3245         }
 3246 
 3247         err = uverbs_request_finish(&iter);
 3248         if (err)
 3249                 goto err_free_attr;
 3250 
 3251         uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev);
 3252         if (IS_ERR(uobj)) {
 3253                 err = PTR_ERR(uobj);
 3254                 goto err_free_attr;
 3255         }
 3256 
 3257         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
 3258         if (!qp) {
 3259                 err = -EINVAL;
 3260                 goto err_uobj;
 3261         }
 3262 
 3263         if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
 3264                 err = -EINVAL;
 3265                 goto err_put;
 3266         }
 3267 
 3268         flow_attr = kzalloc(struct_size(flow_attr, flows,
 3269                                 cmd.flow_attr.num_of_specs), GFP_KERNEL);
 3270         if (!flow_attr) {
 3271                 err = -ENOMEM;
 3272                 goto err_put;
 3273         }
 3274         uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs);
 3275         if (!uflow_res) {
 3276                 err = -ENOMEM;
 3277                 goto err_free_flow_attr;
 3278         }
 3279 
 3280         flow_attr->type = kern_flow_attr->type;
 3281         flow_attr->priority = kern_flow_attr->priority;
 3282         flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
 3283         flow_attr->port = kern_flow_attr->port;
 3284         flow_attr->flags = kern_flow_attr->flags;
 3285         flow_attr->size = sizeof(*flow_attr);
 3286 
 3287         kern_spec = kern_flow_attr->flow_specs;
 3288         ib_spec = flow_attr + 1;
 3289         for (i = 0; i < flow_attr->num_of_specs &&
 3290                         cmd.flow_attr.size >= sizeof(*kern_spec) &&
 3291                         cmd.flow_attr.size >= kern_spec->size;
 3292              i++) {
 3293                 err = kern_spec_to_ib_spec(
 3294                                 attrs, (struct ib_uverbs_flow_spec *)kern_spec,
 3295                                 ib_spec, uflow_res);
 3296                 if (err)
 3297                         goto err_free;
 3298 
 3299                 flow_attr->size +=
 3300                         ((union ib_flow_spec *) ib_spec)->size;
 3301                 cmd.flow_attr.size -= kern_spec->size;
 3302                 kern_spec = (struct ib_uverbs_flow_spec_hdr *)((u8 *)kern_spec + kern_spec->size);
 3303                 ib_spec = (u8 *)ib_spec + ((union ib_flow_spec *) ib_spec)->size;
 3304         }
 3305         if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
 3306                 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
 3307                         i, cmd.flow_attr.size);
 3308                 err = -EINVAL;
 3309                 goto err_free;
 3310         }
 3311 
 3312         flow_id = qp->device->create_flow(
 3313                 qp, flow_attr, IB_FLOW_DOMAIN_USER, &attrs->driver_udata);
 3314 
 3315         if (IS_ERR(flow_id)) {
 3316                 err = PTR_ERR(flow_id);
 3317                 goto err_free;
 3318         }
 3319 
 3320         ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
 3321 
 3322         memset(&resp, 0, sizeof(resp));
 3323         resp.flow_handle = uobj->id;
 3324 
 3325         err = uverbs_response(attrs, &resp, sizeof(resp));
 3326         if (err)
 3327                 goto err_copy;
 3328 
 3329         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
 3330                                 UVERBS_LOOKUP_READ);
 3331         kfree(flow_attr);
 3332         if (cmd.flow_attr.num_of_specs)
 3333                 kfree(kern_flow_attr);
 3334         rdma_alloc_commit_uobject(uobj, attrs);
 3335         return 0;
 3336 err_copy:
 3337         if (!qp->device->destroy_flow(flow_id))
 3338                 atomic_dec(&qp->usecnt);
 3339 err_free:
 3340         ib_uverbs_flow_resources_free(uflow_res);
 3341 err_free_flow_attr:
 3342         kfree(flow_attr);
 3343 err_put:
 3344         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
 3345                                 UVERBS_LOOKUP_READ);
 3346 err_uobj:
 3347         uobj_alloc_abort(uobj, attrs);
 3348 err_free_attr:
 3349         if (cmd.flow_attr.num_of_specs)
 3350                 kfree(kern_flow_attr);
 3351         return err;
 3352 }
 3353 
 3354 static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs)
 3355 {
 3356         struct ib_uverbs_destroy_flow   cmd;
 3357         int                             ret;
 3358 
 3359         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 3360         if (ret)
 3361                 return ret;
 3362 
 3363         if (cmd.comp_mask)
 3364                 return -EINVAL;
 3365 
 3366         return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs);
 3367 }
 3368 
 3369 static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
 3370                                 struct ib_uverbs_create_xsrq *cmd,
 3371                                 struct ib_udata *udata)
 3372 {
 3373         struct ib_uverbs_create_srq_resp resp;
 3374         struct ib_usrq_object           *obj;
 3375         struct ib_pd                    *pd;
 3376         struct ib_srq                   *srq;
 3377         struct ib_uobject               *uninitialized_var(xrcd_uobj);
 3378         struct ib_srq_init_attr          attr;
 3379         int ret;
 3380         struct ib_device *ib_dev;
 3381 
 3382         obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs,
 3383                                                   &ib_dev);
 3384         if (IS_ERR(obj))
 3385                 return PTR_ERR(obj);
 3386 
 3387         if (cmd->srq_type == IB_SRQT_TM)
 3388                 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
 3389 
 3390         if (cmd->srq_type == IB_SRQT_XRC) {
 3391                 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
 3392                                           attrs);
 3393                 if (IS_ERR(xrcd_uobj)) {
 3394                         ret = -EINVAL;
 3395                         goto err;
 3396                 }
 3397 
 3398                 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
 3399                 if (!attr.ext.xrc.xrcd) {
 3400                         ret = -EINVAL;
 3401                         goto err_put_xrcd;
 3402                 }
 3403 
 3404                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
 3405                 atomic_inc(&obj->uxrcd->refcnt);
 3406         }
 3407 
 3408         if (ib_srq_has_cq(cmd->srq_type)) {
 3409                 attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
 3410                                                 cmd->cq_handle, attrs);
 3411                 if (!attr.ext.cq) {
 3412                         ret = -EINVAL;
 3413                         goto err_put_xrcd;
 3414                 }
 3415         }
 3416 
 3417         pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
 3418         if (!pd) {
 3419                 ret = -EINVAL;
 3420                 goto err_put_cq;
 3421         }
 3422 
 3423         attr.event_handler  = ib_uverbs_srq_event_handler;
 3424         attr.srq_context    = attrs->ufile;
 3425         attr.srq_type       = cmd->srq_type;
 3426         attr.attr.max_wr    = cmd->max_wr;
 3427         attr.attr.max_sge   = cmd->max_sge;
 3428         attr.attr.srq_limit = cmd->srq_limit;
 3429 
 3430         INIT_LIST_HEAD(&obj->uevent.event_list);
 3431 
 3432         srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
 3433         if (!srq) {
 3434                 ret = -ENOMEM;
 3435                 goto err_put;
 3436         }
 3437 
 3438         srq->device        = pd->device;
 3439         srq->pd            = pd;
 3440         srq->srq_type      = cmd->srq_type;
 3441         srq->uobject       = obj;
 3442         srq->event_handler = attr.event_handler;
 3443         srq->srq_context   = attr.srq_context;
 3444 
 3445         ret = pd->device->create_srq(srq, &attr, udata);
 3446         if (ret)
 3447                 goto err_free;
 3448 
 3449         if (ib_srq_has_cq(cmd->srq_type)) {
 3450                 srq->ext.cq       = attr.ext.cq;
 3451                 atomic_inc(&attr.ext.cq->usecnt);
 3452         }
 3453 
 3454         if (cmd->srq_type == IB_SRQT_XRC) {
 3455                 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
 3456                 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
 3457         }
 3458 
 3459         atomic_inc(&pd->usecnt);
 3460         atomic_set(&srq->usecnt, 0);
 3461 
 3462         obj->uevent.uobject.object = srq;
 3463         obj->uevent.uobject.user_handle = cmd->user_handle;
 3464 
 3465         memset(&resp, 0, sizeof resp);
 3466         resp.srq_handle = obj->uevent.uobject.id;
 3467         resp.max_wr     = attr.attr.max_wr;
 3468         resp.max_sge    = attr.attr.max_sge;
 3469         if (cmd->srq_type == IB_SRQT_XRC)
 3470                 resp.srqn = srq->ext.xrc.srq_num;
 3471 
 3472         ret = uverbs_response(attrs, &resp, sizeof(resp));
 3473         if (ret)
 3474                 goto err_copy;
 3475 
 3476         if (cmd->srq_type == IB_SRQT_XRC)
 3477                 uobj_put_read(xrcd_uobj);
 3478 
 3479         if (ib_srq_has_cq(cmd->srq_type))
 3480                 rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
 3481                                         UVERBS_LOOKUP_READ);
 3482 
 3483         uobj_put_obj_read(pd);
 3484         rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
 3485         return 0;
 3486 
 3487 err_copy:
 3488         ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
 3489         /* It was released in ib_destroy_srq_user */
 3490         srq = NULL;
 3491 err_free:
 3492         kfree(srq);
 3493 err_put:
 3494         uobj_put_obj_read(pd);
 3495 
 3496 err_put_cq:
 3497         if (ib_srq_has_cq(cmd->srq_type))
 3498                 rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
 3499                                         UVERBS_LOOKUP_READ);
 3500 
 3501 err_put_xrcd:
 3502         if (cmd->srq_type == IB_SRQT_XRC) {
 3503                 atomic_dec(&obj->uxrcd->refcnt);
 3504                 uobj_put_read(xrcd_uobj);
 3505         }
 3506 
 3507 err:
 3508         uobj_alloc_abort(&obj->uevent.uobject, attrs);
 3509         return ret;
 3510 }
 3511 
 3512 static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs)
 3513 {
 3514         struct ib_uverbs_create_srq      cmd;
 3515         struct ib_uverbs_create_xsrq     xcmd;
 3516         int ret;
 3517 
 3518         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 3519         if (ret)
 3520                 return ret;
 3521 
 3522         memset(&xcmd, 0, sizeof(xcmd));
 3523         xcmd.response    = cmd.response;
 3524         xcmd.user_handle = cmd.user_handle;
 3525         xcmd.srq_type    = IB_SRQT_BASIC;
 3526         xcmd.pd_handle   = cmd.pd_handle;
 3527         xcmd.max_wr      = cmd.max_wr;
 3528         xcmd.max_sge     = cmd.max_sge;
 3529         xcmd.srq_limit   = cmd.srq_limit;
 3530 
 3531         return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata);
 3532 }
 3533 
 3534 static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs)
 3535 {
 3536         struct ib_uverbs_create_xsrq     cmd;
 3537         int ret;
 3538 
 3539         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 3540         if (ret)
 3541                 return ret;
 3542 
 3543         return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata);
 3544 }
 3545 
 3546 static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
 3547 {
 3548         struct ib_uverbs_modify_srq cmd;
 3549         struct ib_srq              *srq;
 3550         struct ib_srq_attr          attr;
 3551         int                         ret;
 3552 
 3553         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 3554         if (ret)
 3555                 return ret;
 3556 
 3557         srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
 3558         if (!srq)
 3559                 return -EINVAL;
 3560 
 3561         attr.max_wr    = cmd.max_wr;
 3562         attr.srq_limit = cmd.srq_limit;
 3563 
 3564         ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask,
 3565                                           &attrs->driver_udata);
 3566 
 3567         rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
 3568                                 UVERBS_LOOKUP_READ);
 3569 
 3570         return ret;
 3571 }
 3572 
 3573 static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
 3574 {
 3575         struct ib_uverbs_query_srq      cmd;
 3576         struct ib_uverbs_query_srq_resp resp;
 3577         struct ib_srq_attr              attr;
 3578         struct ib_srq                   *srq;
 3579         int                             ret;
 3580 
 3581         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 3582         if (ret)
 3583                 return ret;
 3584 
 3585         srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
 3586         if (!srq)
 3587                 return -EINVAL;
 3588 
 3589         ret = ib_query_srq(srq, &attr);
 3590 
 3591         rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
 3592                                 UVERBS_LOOKUP_READ);
 3593 
 3594         if (ret)
 3595                 return ret;
 3596 
 3597         memset(&resp, 0, sizeof resp);
 3598 
 3599         resp.max_wr    = attr.max_wr;
 3600         resp.max_sge   = attr.max_sge;
 3601         resp.srq_limit = attr.srq_limit;
 3602 
 3603         return uverbs_response(attrs, &resp, sizeof(resp));
 3604 }
 3605 
 3606 static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs)
 3607 {
 3608         struct ib_uverbs_destroy_srq      cmd;
 3609         struct ib_uverbs_destroy_srq_resp resp;
 3610         struct ib_uobject                *uobj;
 3611         struct ib_uevent_object          *obj;
 3612         int ret;
 3613 
 3614         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 3615         if (ret)
 3616                 return ret;
 3617 
 3618         uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
 3619         if (IS_ERR(uobj))
 3620                 return PTR_ERR(uobj);
 3621 
 3622         obj = container_of(uobj, struct ib_uevent_object, uobject);
 3623         memset(&resp, 0, sizeof(resp));
 3624         resp.events_reported = obj->events_reported;
 3625 
 3626         uobj_put_destroy(uobj);
 3627 
 3628         return uverbs_response(attrs, &resp, sizeof(resp));
 3629 }
 3630 
 3631 static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
 3632 {
 3633         struct ib_uverbs_ex_query_device_resp resp = {};
 3634         struct ib_uverbs_ex_query_device  cmd;
 3635         struct ib_device_attr attr = {0};
 3636         struct ib_ucontext *ucontext;
 3637         struct ib_device *ib_dev;
 3638         int err;
 3639 
 3640         ucontext = ib_uverbs_get_ucontext(attrs);
 3641         if (IS_ERR(ucontext))
 3642                 return PTR_ERR(ucontext);
 3643         ib_dev = ucontext->device;
 3644 
 3645         err = uverbs_request(attrs, &cmd, sizeof(cmd));
 3646         if (err)
 3647                 return err;
 3648 
 3649         if (cmd.comp_mask)
 3650                 return -EINVAL;
 3651 
 3652         if (cmd.reserved)
 3653                 return -EINVAL;
 3654 
 3655         err = ib_dev->query_device(ib_dev, &attr, &attrs->driver_udata);
 3656         if (err)
 3657                 return err;
 3658 
 3659         copy_query_dev_fields(ucontext, &resp.base, &attr);
 3660 
 3661         resp.odp_caps.general_caps = attr.odp_caps.general_caps;
 3662         resp.odp_caps.per_transport_caps.rc_odp_caps =
 3663                 attr.odp_caps.per_transport_caps.rc_odp_caps;
 3664         resp.odp_caps.per_transport_caps.uc_odp_caps =
 3665                 attr.odp_caps.per_transport_caps.uc_odp_caps;
 3666         resp.odp_caps.per_transport_caps.ud_odp_caps =
 3667                 attr.odp_caps.per_transport_caps.ud_odp_caps;
 3668         resp.xrc_odp_caps = attr.odp_caps.per_transport_caps.xrc_odp_caps;
 3669 
 3670         resp.timestamp_mask = attr.timestamp_mask;
 3671         resp.hca_core_clock = attr.hca_core_clock;
 3672         resp.device_cap_flags_ex = attr.device_cap_flags;
 3673         resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
 3674         resp.rss_caps.max_rwq_indirection_tables =
 3675                 attr.rss_caps.max_rwq_indirection_tables;
 3676         resp.rss_caps.max_rwq_indirection_table_size =
 3677                 attr.rss_caps.max_rwq_indirection_table_size;
 3678         resp.max_wq_type_rq = attr.max_wq_type_rq;
 3679         resp.raw_packet_caps = attr.raw_packet_caps;
 3680         resp.tm_caps.max_rndv_hdr_size  = attr.tm_caps.max_rndv_hdr_size;
 3681         resp.tm_caps.max_num_tags       = attr.tm_caps.max_num_tags;
 3682         resp.tm_caps.max_ops            = attr.tm_caps.max_ops;
 3683         resp.tm_caps.max_sge            = attr.tm_caps.max_sge;
 3684         resp.tm_caps.flags              = attr.tm_caps.flags;
 3685         resp.cq_moderation_caps.max_cq_moderation_count  =
 3686                 attr.cq_caps.max_cq_moderation_count;
 3687         resp.cq_moderation_caps.max_cq_moderation_period =
 3688                 attr.cq_caps.max_cq_moderation_period;
 3689         resp.max_dm_size = attr.max_dm_size;
 3690         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
 3691 
 3692         return uverbs_response(attrs, &resp, sizeof(resp));
 3693 }
 3694 
 3695 static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
 3696 {
 3697         struct ib_uverbs_ex_modify_cq cmd;
 3698         struct ib_cq *cq;
 3699         int ret;
 3700 
 3701         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 3702         if (ret)
 3703                 return ret;
 3704 
 3705         if (!cmd.attr_mask || cmd.reserved)
 3706                 return -EINVAL;
 3707 
 3708         if (cmd.attr_mask > IB_CQ_MODERATE)
 3709                 return -EOPNOTSUPP;
 3710 
 3711         cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
 3712         if (!cq)
 3713                 return -EINVAL;
 3714 
 3715         ret = ib_modify_cq(cq, cmd.attr.cq_count, cmd.attr.cq_period);
 3716 
 3717         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
 3718                                 UVERBS_LOOKUP_READ);
 3719         return ret;
 3720 }
 3721 
 3722 /*
 3723  * Describe the input structs for write(). Some write methods have an input
 3724  * only struct, most have an input and output. If the struct has an output then
 3725  * the 'response' u64 must be the first field in the request structure.
 3726  *
 3727  * If udata is present then both the request and response structs have a
 3728  * trailing driver_data flex array. In this case the size of the base struct
 3729  * cannot be changed.
 3730  */
 3731 #define UAPI_DEF_WRITE_IO(req, resp)                                           \
 3732         .write.has_resp = 1 +                                                  \
 3733                           BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) +    \
 3734                           BUILD_BUG_ON_ZERO(sizeof(((req *)0)->response) !=    \
 3735                                             sizeof(u64)),                      \
 3736         .write.req_size = sizeof(req), .write.resp_size = sizeof(resp)
 3737 
 3738 #define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req)
 3739 
 3740 #define UAPI_DEF_WRITE_UDATA_IO(req, resp)                                     \
 3741         UAPI_DEF_WRITE_IO(req, resp),                                          \
 3742                 .write.has_udata =                                             \
 3743                         1 +                                                    \
 3744                         BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=        \
 3745                                           sizeof(req)) +                       \
 3746                         BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) !=       \
 3747                                           sizeof(resp))
 3748 
 3749 #define UAPI_DEF_WRITE_UDATA_I(req)                                            \
 3750         UAPI_DEF_WRITE_I(req),                                                 \
 3751                 .write.has_udata =                                             \
 3752                         1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=    \
 3753                                               sizeof(req))
 3754 
 3755 /*
 3756  * The _EX versions are for use with WRITE_EX and allow the last struct member
 3757  * to be specified. Buffers that do not include that member will be rejected.
 3758  */
 3759 #define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member)     \
 3760         .write.has_resp = 1,                                                   \
 3761         .write.req_size = offsetofend(req, req_last_member),                   \
 3762         .write.resp_size = offsetofend(resp, resp_last_member)
 3763 
 3764 #define UAPI_DEF_WRITE_I_EX(req, req_last_member)                              \
 3765         .write.req_size = offsetofend(req, req_last_member)
 3766 
 3767 const struct uapi_definition uverbs_def_write_intf[] = {
 3768         DECLARE_UVERBS_OBJECT(
 3769                 UVERBS_OBJECT_AH,
 3770                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH,
 3771                                      ib_uverbs_create_ah,
 3772                                      UAPI_DEF_WRITE_UDATA_IO(
 3773                                              struct ib_uverbs_create_ah,
 3774                                              struct ib_uverbs_create_ah_resp),
 3775                                      UAPI_DEF_METHOD_NEEDS_FN(create_ah)),
 3776                 DECLARE_UVERBS_WRITE(
 3777                         IB_USER_VERBS_CMD_DESTROY_AH,
 3778                         ib_uverbs_destroy_ah,
 3779                         UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah),
 3780                         UAPI_DEF_METHOD_NEEDS_FN(destroy_ah))),
 3781 
 3782         DECLARE_UVERBS_OBJECT(
 3783                 UVERBS_OBJECT_COMP_CHANNEL,
 3784                 DECLARE_UVERBS_WRITE(
 3785                         IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
 3786                         ib_uverbs_create_comp_channel,
 3787                         UAPI_DEF_WRITE_IO(
 3788                                 struct ib_uverbs_create_comp_channel,
 3789                                 struct ib_uverbs_create_comp_channel_resp))),
 3790 
 3791         DECLARE_UVERBS_OBJECT(
 3792                 UVERBS_OBJECT_CQ,
 3793                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ,
 3794                                      ib_uverbs_create_cq,
 3795                                      UAPI_DEF_WRITE_UDATA_IO(
 3796                                              struct ib_uverbs_create_cq,
 3797                                              struct ib_uverbs_create_cq_resp),
 3798                                      UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
 3799                 DECLARE_UVERBS_WRITE(
 3800                         IB_USER_VERBS_CMD_DESTROY_CQ,
 3801                         ib_uverbs_destroy_cq,
 3802                         UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq,
 3803                                           struct ib_uverbs_destroy_cq_resp),
 3804                         UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)),
 3805                 DECLARE_UVERBS_WRITE(
 3806                         IB_USER_VERBS_CMD_POLL_CQ,
 3807                         ib_uverbs_poll_cq,
 3808                         UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq,
 3809                                           struct ib_uverbs_poll_cq_resp),
 3810                         UAPI_DEF_METHOD_NEEDS_FN(poll_cq)),
 3811                 DECLARE_UVERBS_WRITE(
 3812                         IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
 3813                         ib_uverbs_req_notify_cq,
 3814                         UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq),
 3815                         UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)),
 3816                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ,
 3817                                      ib_uverbs_resize_cq,
 3818                                      UAPI_DEF_WRITE_UDATA_IO(
 3819                                              struct ib_uverbs_resize_cq,
 3820                                              struct ib_uverbs_resize_cq_resp),
 3821                                      UAPI_DEF_METHOD_NEEDS_FN(resize_cq)),
 3822                 DECLARE_UVERBS_WRITE_EX(
 3823                         IB_USER_VERBS_EX_CMD_CREATE_CQ,
 3824                         ib_uverbs_ex_create_cq,
 3825                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq,
 3826                                              reserved,
 3827                                              struct ib_uverbs_ex_create_cq_resp,
 3828                                              response_length),
 3829                         UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
 3830                 DECLARE_UVERBS_WRITE_EX(
 3831                         IB_USER_VERBS_EX_CMD_MODIFY_CQ,
 3832                         ib_uverbs_ex_modify_cq,
 3833                         UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
 3834                         UAPI_DEF_METHOD_NEEDS_FN(create_cq))),
 3835 
 3836         DECLARE_UVERBS_OBJECT(
 3837                 UVERBS_OBJECT_DEVICE,
 3838                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT,
 3839                                      ib_uverbs_get_context,
 3840                                      UAPI_DEF_WRITE_UDATA_IO(
 3841                                              struct ib_uverbs_get_context,
 3842                                              struct ib_uverbs_get_context_resp)),
 3843                 DECLARE_UVERBS_WRITE(
 3844                         IB_USER_VERBS_CMD_QUERY_DEVICE,
 3845                         ib_uverbs_query_device,
 3846                         UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device,
 3847                                           struct ib_uverbs_query_device_resp)),
 3848                 DECLARE_UVERBS_WRITE(
 3849                         IB_USER_VERBS_CMD_QUERY_PORT,
 3850                         ib_uverbs_query_port,
 3851                         UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port,
 3852                                           struct ib_uverbs_query_port_resp),
 3853                         UAPI_DEF_METHOD_NEEDS_FN(query_port)),
 3854                 DECLARE_UVERBS_WRITE_EX(
 3855                         IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
 3856                         ib_uverbs_ex_query_device,
 3857                         UAPI_DEF_WRITE_IO_EX(
 3858                                 struct ib_uverbs_ex_query_device,
 3859                                 reserved,
 3860                                 struct ib_uverbs_ex_query_device_resp,
 3861                                 response_length),
 3862                         UAPI_DEF_METHOD_NEEDS_FN(query_device)),
 3863                 UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext),
 3864                 UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)),
 3865 
 3866         DECLARE_UVERBS_OBJECT(
 3867                 UVERBS_OBJECT_FLOW,
 3868                 DECLARE_UVERBS_WRITE_EX(
 3869                         IB_USER_VERBS_EX_CMD_CREATE_FLOW,
 3870                         ib_uverbs_ex_create_flow,
 3871                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow,
 3872                                              flow_attr,
 3873                                              struct ib_uverbs_create_flow_resp,
 3874                                              flow_handle),
 3875                         UAPI_DEF_METHOD_NEEDS_FN(create_flow)),
 3876                 DECLARE_UVERBS_WRITE_EX(
 3877                         IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
 3878                         ib_uverbs_ex_destroy_flow,
 3879                         UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow),
 3880                         UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))),
 3881 
 3882         DECLARE_UVERBS_OBJECT(
 3883                 UVERBS_OBJECT_MR,
 3884                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR,
 3885                                      ib_uverbs_dereg_mr,
 3886                                      UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr),
 3887                                      UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)),
 3888                 DECLARE_UVERBS_WRITE(
 3889                         IB_USER_VERBS_CMD_REG_MR,
 3890                         ib_uverbs_reg_mr,
 3891                         UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr,
 3892                                                 struct ib_uverbs_reg_mr_resp),
 3893                         UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)),
 3894                 DECLARE_UVERBS_WRITE(
 3895                         IB_USER_VERBS_CMD_REREG_MR,
 3896                         ib_uverbs_rereg_mr,
 3897                         UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr,
 3898                                                 struct ib_uverbs_rereg_mr_resp),
 3899                         UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))),
 3900 
 3901         DECLARE_UVERBS_OBJECT(
 3902                 UVERBS_OBJECT_MW,
 3903                 DECLARE_UVERBS_WRITE(
 3904                         IB_USER_VERBS_CMD_ALLOC_MW,
 3905                         ib_uverbs_alloc_mw,
 3906                         UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw,
 3907                                                 struct ib_uverbs_alloc_mw_resp),
 3908                         UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)),
 3909                 DECLARE_UVERBS_WRITE(
 3910                         IB_USER_VERBS_CMD_DEALLOC_MW,
 3911                         ib_uverbs_dealloc_mw,
 3912                         UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw),
 3913                         UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))),
 3914 
 3915         DECLARE_UVERBS_OBJECT(
 3916                 UVERBS_OBJECT_PD,
 3917                 DECLARE_UVERBS_WRITE(
 3918                         IB_USER_VERBS_CMD_ALLOC_PD,
 3919                         ib_uverbs_alloc_pd,
 3920                         UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd,
 3921                                                 struct ib_uverbs_alloc_pd_resp),
 3922                         UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)),
 3923                 DECLARE_UVERBS_WRITE(
 3924                         IB_USER_VERBS_CMD_DEALLOC_PD,
 3925                         ib_uverbs_dealloc_pd,
 3926                         UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd),
 3927                         UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))),
 3928 
 3929         DECLARE_UVERBS_OBJECT(
 3930                 UVERBS_OBJECT_QP,
 3931                 DECLARE_UVERBS_WRITE(
 3932                         IB_USER_VERBS_CMD_ATTACH_MCAST,
 3933                         ib_uverbs_attach_mcast,
 3934                         UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast),
 3935                         UAPI_DEF_METHOD_NEEDS_FN(attach_mcast),
 3936                         UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
 3937                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP,
 3938                                      ib_uverbs_create_qp,
 3939                                      UAPI_DEF_WRITE_UDATA_IO(
 3940                                              struct ib_uverbs_create_qp,
 3941                                              struct ib_uverbs_create_qp_resp),
 3942                                      UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
 3943                 DECLARE_UVERBS_WRITE(
 3944                         IB_USER_VERBS_CMD_DESTROY_QP,
 3945                         ib_uverbs_destroy_qp,
 3946                         UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp,
 3947                                           struct ib_uverbs_destroy_qp_resp),
 3948                         UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)),
 3949                 DECLARE_UVERBS_WRITE(
 3950                         IB_USER_VERBS_CMD_DETACH_MCAST,
 3951                         ib_uverbs_detach_mcast,
 3952                         UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast),
 3953                         UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
 3954                 DECLARE_UVERBS_WRITE(
 3955                         IB_USER_VERBS_CMD_MODIFY_QP,
 3956                         ib_uverbs_modify_qp,
 3957                         UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp),
 3958                         UAPI_DEF_METHOD_NEEDS_FN(modify_qp)),
 3959                 DECLARE_UVERBS_WRITE(
 3960                         IB_USER_VERBS_CMD_POST_RECV,
 3961                         ib_uverbs_post_recv,
 3962                         UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv,
 3963                                           struct ib_uverbs_post_recv_resp),
 3964                         UAPI_DEF_METHOD_NEEDS_FN(post_recv)),
 3965                 DECLARE_UVERBS_WRITE(
 3966                         IB_USER_VERBS_CMD_POST_SEND,
 3967                         ib_uverbs_post_send,
 3968                         UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send,
 3969                                           struct ib_uverbs_post_send_resp),
 3970                         UAPI_DEF_METHOD_NEEDS_FN(post_send)),
 3971                 DECLARE_UVERBS_WRITE(
 3972                         IB_USER_VERBS_CMD_QUERY_QP,
 3973                         ib_uverbs_query_qp,
 3974                         UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp,
 3975                                           struct ib_uverbs_query_qp_resp),
 3976                         UAPI_DEF_METHOD_NEEDS_FN(query_qp)),
 3977                 DECLARE_UVERBS_WRITE_EX(
 3978                         IB_USER_VERBS_EX_CMD_CREATE_QP,
 3979                         ib_uverbs_ex_create_qp,
 3980                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp,
 3981                                              comp_mask,
 3982                                              struct ib_uverbs_ex_create_qp_resp,
 3983                                              response_length),
 3984                         UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
 3985                 DECLARE_UVERBS_WRITE_EX(
 3986                         IB_USER_VERBS_EX_CMD_MODIFY_QP,
 3987                         ib_uverbs_ex_modify_qp,
 3988                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp,
 3989                                              base,
 3990                                              struct ib_uverbs_ex_modify_qp_resp,
 3991                                              response_length),
 3992                         UAPI_DEF_METHOD_NEEDS_FN(modify_qp))),
 3993 
 3994         DECLARE_UVERBS_OBJECT(
 3995                 UVERBS_OBJECT_RWQ_IND_TBL,
 3996                 DECLARE_UVERBS_WRITE_EX(
 3997                         IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
 3998                         ib_uverbs_ex_create_rwq_ind_table,
 3999                         UAPI_DEF_WRITE_IO_EX(
 4000                                 struct ib_uverbs_ex_create_rwq_ind_table,
 4001                                 log_ind_tbl_size,
 4002                                 struct ib_uverbs_ex_create_rwq_ind_table_resp,
 4003                                 ind_tbl_num),
 4004                         UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)),
 4005                 DECLARE_UVERBS_WRITE_EX(
 4006                         IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
 4007                         ib_uverbs_ex_destroy_rwq_ind_table,
 4008                         UAPI_DEF_WRITE_I(
 4009                                 struct ib_uverbs_ex_destroy_rwq_ind_table),
 4010                         UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
 4011 
 4012         DECLARE_UVERBS_OBJECT(
 4013                 UVERBS_OBJECT_WQ,
 4014                 DECLARE_UVERBS_WRITE_EX(
 4015                         IB_USER_VERBS_EX_CMD_CREATE_WQ,
 4016                         ib_uverbs_ex_create_wq,
 4017                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq,
 4018                                              max_sge,
 4019                                              struct ib_uverbs_ex_create_wq_resp,
 4020                                              wqn),
 4021                         UAPI_DEF_METHOD_NEEDS_FN(create_wq)),
 4022                 DECLARE_UVERBS_WRITE_EX(
 4023                         IB_USER_VERBS_EX_CMD_DESTROY_WQ,
 4024                         ib_uverbs_ex_destroy_wq,
 4025                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq,
 4026                                              wq_handle,
 4027                                              struct ib_uverbs_ex_destroy_wq_resp,
 4028                                              reserved),
 4029                         UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)),
 4030                 DECLARE_UVERBS_WRITE_EX(
 4031                         IB_USER_VERBS_EX_CMD_MODIFY_WQ,
 4032                         ib_uverbs_ex_modify_wq,
 4033                         UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq,
 4034                                             curr_wq_state),
 4035                         UAPI_DEF_METHOD_NEEDS_FN(modify_wq))),
 4036 
 4037         DECLARE_UVERBS_OBJECT(
 4038                 UVERBS_OBJECT_SRQ,
 4039                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ,
 4040                                      ib_uverbs_create_srq,
 4041                                      UAPI_DEF_WRITE_UDATA_IO(
 4042                                              struct ib_uverbs_create_srq,
 4043                                              struct ib_uverbs_create_srq_resp),
 4044                                      UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
 4045                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ,
 4046                                      ib_uverbs_create_xsrq,
 4047                                      UAPI_DEF_WRITE_UDATA_IO(
 4048                                              struct ib_uverbs_create_xsrq,
 4049                                              struct ib_uverbs_create_srq_resp),
 4050                                      UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
 4051                 DECLARE_UVERBS_WRITE(
 4052                         IB_USER_VERBS_CMD_DESTROY_SRQ,
 4053                         ib_uverbs_destroy_srq,
 4054                         UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq,
 4055                                           struct ib_uverbs_destroy_srq_resp),
 4056                         UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)),
 4057                 DECLARE_UVERBS_WRITE(
 4058                         IB_USER_VERBS_CMD_MODIFY_SRQ,
 4059                         ib_uverbs_modify_srq,
 4060                         UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq),
 4061                         UAPI_DEF_METHOD_NEEDS_FN(modify_srq)),
 4062                 DECLARE_UVERBS_WRITE(
 4063                         IB_USER_VERBS_CMD_POST_SRQ_RECV,
 4064                         ib_uverbs_post_srq_recv,
 4065                         UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv,
 4066                                           struct ib_uverbs_post_srq_recv_resp),
 4067                         UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)),
 4068                 DECLARE_UVERBS_WRITE(
 4069                         IB_USER_VERBS_CMD_QUERY_SRQ,
 4070                         ib_uverbs_query_srq,
 4071                         UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq,
 4072                                           struct ib_uverbs_query_srq_resp),
 4073                         UAPI_DEF_METHOD_NEEDS_FN(query_srq))),
 4074 
 4075         DECLARE_UVERBS_OBJECT(
 4076                 UVERBS_OBJECT_XRCD,
 4077                 DECLARE_UVERBS_WRITE(
 4078                         IB_USER_VERBS_CMD_CLOSE_XRCD,
 4079                         ib_uverbs_close_xrcd,
 4080                         UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd),
 4081                         UAPI_DEF_METHOD_NEEDS_FN(dealloc_xrcd)),
 4082                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP,
 4083                                      ib_uverbs_open_qp,
 4084                                      UAPI_DEF_WRITE_UDATA_IO(
 4085                                              struct ib_uverbs_open_qp,
 4086                                              struct ib_uverbs_create_qp_resp)),
 4087                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD,
 4088                                      ib_uverbs_open_xrcd,
 4089                                      UAPI_DEF_WRITE_UDATA_IO(
 4090                                              struct ib_uverbs_open_xrcd,
 4091                                              struct ib_uverbs_open_xrcd_resp),
 4092                                      UAPI_DEF_METHOD_NEEDS_FN(alloc_xrcd))),
 4093 
 4094         {},
 4095 };

Cache object: eb5d8db0fd202b19d017abecd6e0d6ec


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.