The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ofed/include/rdma/ib_verbs.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
    3  *
    4  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
    5  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
    6  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
    7  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
    8  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
    9  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   10  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   11  *
   12  * This software is available to you under a choice of one of two
   13  * licenses.  You may choose to be licensed under the terms of the GNU
   14  * General Public License (GPL) Version 2, available from the file
   15  * COPYING in the main directory of this source tree, or the
   16  * OpenIB.org BSD license below:
   17  *
   18  *     Redistribution and use in source and binary forms, with or
   19  *     without modification, are permitted provided that the following
   20  *     conditions are met:
   21  *
   22  *      - Redistributions of source code must retain the above
   23  *        copyright notice, this list of conditions and the following
   24  *        disclaimer.
   25  *
   26  *      - Redistributions in binary form must reproduce the above
   27  *        copyright notice, this list of conditions and the following
   28  *        disclaimer in the documentation and/or other materials
   29  *        provided with the distribution.
   30  *
   31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   32  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   33  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   34  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   35  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   36  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   37  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   38  * SOFTWARE.
   39  *
   40  * $FreeBSD$
   41  */
   42 
   43 #if !defined(IB_VERBS_H)
   44 #define IB_VERBS_H
   45 
   46 #include <linux/types.h>
   47 #include <linux/device.h>
   48 #include <linux/mm.h>
   49 #include <linux/dma-mapping.h>
   50 #include <linux/kref.h>
   51 #include <linux/list.h>
   52 #include <linux/rwsem.h>
   53 #include <linux/scatterlist.h>
   54 #include <linux/workqueue.h>
   55 #include <linux/socket.h>
   56 #include <linux/if_ether.h>
   57 #include <net/ipv6.h>
   58 #include <net/ip.h>
   59 #include <linux/string.h>
   60 #include <linux/slab.h>
   61 #include <linux/rcupdate.h>
   62 #include <linux/netdevice.h>
   63 #include <linux/xarray.h>
   64 #include <netinet/ip.h>
   65 #include <uapi/rdma/ib_user_verbs.h>
   66 #include <rdma/signature.h>
   67 #include <uapi/rdma/rdma_user_ioctl.h>
   68 #include <uapi/rdma/ib_user_ioctl_verbs.h>
   69 
   70 #include <asm/atomic.h>
   71 #include <asm/uaccess.h>
   72 
   73 struct ib_uqp_object;
   74 struct ib_usrq_object;
   75 struct ib_uwq_object;
   76 struct ifla_vf_info;
   77 struct ifla_vf_stats;
   78 struct ib_uverbs_file;
   79 struct uverbs_attr_bundle;
   80 
   81 enum ib_uverbs_advise_mr_advice;
   82 
   83 extern struct workqueue_struct *ib_wq;
   84 extern struct workqueue_struct *ib_comp_wq;
   85 
   86 struct ib_ucq_object;
   87 
   88 union ib_gid {
   89         u8      raw[16];
   90         struct {
   91                 __be64  subnet_prefix;
   92                 __be64  interface_id;
   93         } global;
   94 };
   95 
   96 extern union ib_gid zgid;
   97 
   98 enum ib_gid_type {
   99         /* If link layer is Ethernet, this is RoCE V1 */
  100         IB_GID_TYPE_IB        = 0,
  101         IB_GID_TYPE_ROCE      = 0,
  102         IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
  103         IB_GID_TYPE_SIZE
  104 };
  105 
  106 #define ROCE_V2_UDP_DPORT      4791
  107 struct ib_gid_attr {
  108         enum ib_gid_type        gid_type;
  109         struct ifnet    *ndev;
  110 };
  111 
  112 enum rdma_node_type {
  113         /* IB values map to NodeInfo:NodeType. */
  114         RDMA_NODE_IB_CA         = 1,
  115         RDMA_NODE_IB_SWITCH,
  116         RDMA_NODE_IB_ROUTER,
  117         RDMA_NODE_RNIC,
  118         RDMA_NODE_USNIC,
  119         RDMA_NODE_USNIC_UDP,
  120 };
  121 
  122 enum {
  123         /* set the local administered indication */
  124         IB_SA_WELL_KNOWN_GUID   = BIT_ULL(57) | 2,
  125 };
  126 
  127 enum rdma_transport_type {
  128         RDMA_TRANSPORT_IB,
  129         RDMA_TRANSPORT_IWARP,
  130         RDMA_TRANSPORT_USNIC,
  131         RDMA_TRANSPORT_USNIC_UDP
  132 };
  133 
  134 enum rdma_protocol_type {
  135         RDMA_PROTOCOL_IB,
  136         RDMA_PROTOCOL_IBOE,
  137         RDMA_PROTOCOL_IWARP,
  138         RDMA_PROTOCOL_USNIC_UDP
  139 };
  140 
  141 __attribute_const__ enum rdma_transport_type
  142 rdma_node_get_transport(enum rdma_node_type node_type);
  143 
  144 enum rdma_network_type {
  145         RDMA_NETWORK_IB,
  146         RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
  147         RDMA_NETWORK_IPV4,
  148         RDMA_NETWORK_IPV6
  149 };
  150 
  151 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
  152 {
  153         if (network_type == RDMA_NETWORK_IPV4 ||
  154             network_type == RDMA_NETWORK_IPV6)
  155                 return IB_GID_TYPE_ROCE_UDP_ENCAP;
  156 
  157         /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
  158         return IB_GID_TYPE_IB;
  159 }
  160 
  161 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
  162                                                             union ib_gid *gid)
  163 {
  164         if (gid_type == IB_GID_TYPE_IB)
  165                 return RDMA_NETWORK_IB;
  166 
  167         if (ipv6_addr_v4mapped((struct in6_addr *)gid))
  168                 return RDMA_NETWORK_IPV4;
  169         else
  170                 return RDMA_NETWORK_IPV6;
  171 }
  172 
  173 enum rdma_link_layer {
  174         IB_LINK_LAYER_UNSPECIFIED,
  175         IB_LINK_LAYER_INFINIBAND,
  176         IB_LINK_LAYER_ETHERNET,
  177 };
  178 
  179 enum ib_device_cap_flags {
  180         IB_DEVICE_RESIZE_MAX_WR                 = (1 << 0),
  181         IB_DEVICE_BAD_PKEY_CNTR                 = (1 << 1),
  182         IB_DEVICE_BAD_QKEY_CNTR                 = (1 << 2),
  183         IB_DEVICE_RAW_MULTI                     = (1 << 3),
  184         IB_DEVICE_AUTO_PATH_MIG                 = (1 << 4),
  185         IB_DEVICE_CHANGE_PHY_PORT               = (1 << 5),
  186         IB_DEVICE_UD_AV_PORT_ENFORCE            = (1 << 6),
  187         IB_DEVICE_CURR_QP_STATE_MOD             = (1 << 7),
  188         IB_DEVICE_SHUTDOWN_PORT                 = (1 << 8),
  189         IB_DEVICE_INIT_TYPE                     = (1 << 9),
  190         IB_DEVICE_PORT_ACTIVE_EVENT             = (1 << 10),
  191         IB_DEVICE_SYS_IMAGE_GUID                = (1 << 11),
  192         IB_DEVICE_RC_RNR_NAK_GEN                = (1 << 12),
  193         IB_DEVICE_SRQ_RESIZE                    = (1 << 13),
  194         IB_DEVICE_N_NOTIFY_CQ                   = (1 << 14),
  195 
  196         /*
  197          * This device supports a per-device lkey or stag that can be
  198          * used without performing a memory registration for the local
  199          * memory.  Note that ULPs should never check this flag, but
  200          * instead of use the local_dma_lkey flag in the ib_pd structure,
  201          * which will always contain a usable lkey.
  202          */
  203         IB_DEVICE_LOCAL_DMA_LKEY                = (1 << 15),
  204         IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16),
  205         IB_DEVICE_MEM_WINDOW                    = (1 << 17),
  206         /*
  207          * Devices should set IB_DEVICE_UD_IP_SUM if they support
  208          * insertion of UDP and TCP checksum on outgoing UD IPoIB
  209          * messages and can verify the validity of checksum for
  210          * incoming messages.  Setting this flag implies that the
  211          * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
  212          */
  213         IB_DEVICE_UD_IP_CSUM                    = (1 << 18),
  214         IB_DEVICE_UD_TSO                        = (1 << 19),
  215         IB_DEVICE_XRC                           = (1 << 20),
  216 
  217         /*
  218          * This device supports the IB "base memory management extension",
  219          * which includes support for fast registrations (IB_WR_REG_MR,
  220          * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
  221          * also be set by any iWarp device which must support FRs to comply
  222          * to the iWarp verbs spec.  iWarp devices also support the
  223          * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
  224          * stag.
  225          */
  226         IB_DEVICE_MEM_MGT_EXTENSIONS            = (1 << 21),
  227         IB_DEVICE_BLOCK_MULTICAST_LOOPBACK      = (1 << 22),
  228         IB_DEVICE_MEM_WINDOW_TYPE_2A            = (1 << 23),
  229         IB_DEVICE_MEM_WINDOW_TYPE_2B            = (1 << 24),
  230         IB_DEVICE_RC_IP_CSUM                    = (1 << 25),
  231         IB_DEVICE_RAW_IP_CSUM                   = (1 << 26),
  232         /*
  233          * Devices should set IB_DEVICE_CROSS_CHANNEL if they
  234          * support execution of WQEs that involve synchronization
  235          * of I/O operations with single completion queue managed
  236          * by hardware.
  237          */
  238         IB_DEVICE_CROSS_CHANNEL         = (1 << 27),
  239         IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
  240         IB_DEVICE_SIGNATURE_HANDOVER            = (1 << 30),
  241         IB_DEVICE_ON_DEMAND_PAGING              = (1ULL << 31),
  242         IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
  243         IB_DEVICE_VIRTUAL_FUNCTION              = (1ULL << 33),
  244         IB_DEVICE_RAW_SCATTER_FCS               = (1ULL << 34),
  245 };
  246 
  247 enum ib_atomic_cap {
  248         IB_ATOMIC_NONE,
  249         IB_ATOMIC_HCA,
  250         IB_ATOMIC_GLOB
  251 };
  252 
  253 enum ib_odp_general_cap_bits {
  254         IB_ODP_SUPPORT = 1 << 0,
  255 };
  256 
  257 enum ib_odp_transport_cap_bits {
  258         IB_ODP_SUPPORT_SEND     = 1 << 0,
  259         IB_ODP_SUPPORT_RECV     = 1 << 1,
  260         IB_ODP_SUPPORT_WRITE    = 1 << 2,
  261         IB_ODP_SUPPORT_READ     = 1 << 3,
  262         IB_ODP_SUPPORT_ATOMIC   = 1 << 4,
  263 };
  264 
  265 struct ib_odp_caps {
  266         uint64_t general_caps;
  267         struct {
  268                 uint32_t  rc_odp_caps;
  269                 uint32_t  uc_odp_caps;
  270                 uint32_t  ud_odp_caps;
  271                 uint32_t  xrc_odp_caps;
  272         } per_transport_caps;
  273 };
  274 
  275 struct ib_rss_caps {
  276         /* Corresponding bit will be set if qp type from
  277          * 'enum ib_qp_type' is supported, e.g.
  278          * supported_qpts |= 1 << IB_QPT_UD
  279          */
  280         u32 supported_qpts;
  281         u32 max_rwq_indirection_tables;
  282         u32 max_rwq_indirection_table_size;
  283 };
  284 
  285 enum ib_tm_cap_flags {
  286         /*  Support tag matching with rendezvous offload for RC transport */
  287         IB_TM_CAP_RNDV_RC = 1 << 0,
  288 };
  289 
  290 struct ib_tm_caps {
  291         /* Max size of RNDV header */
  292         u32 max_rndv_hdr_size;
  293         /* Max number of entries in tag matching list */
  294         u32 max_num_tags;
  295         /* From enum ib_tm_cap_flags */
  296         u32 flags;
  297         /* Max number of outstanding list operations */
  298         u32 max_ops;
  299         /* Max number of SGE in tag matching entry */
  300         u32 max_sge;
  301 };
  302 
  303 enum ib_cq_creation_flags {
  304         IB_CQ_FLAGS_TIMESTAMP_COMPLETION   = 1 << 0,
  305         IB_CQ_FLAGS_IGNORE_OVERRUN         = 1 << 1,
  306 };
  307 
  308 struct ib_cq_init_attr {
  309         unsigned int    cqe;
  310         u32             comp_vector;
  311         u32             flags;
  312 };
  313 
  314 enum ib_cq_attr_mask {
  315         IB_CQ_MODERATE = 1 << 0,
  316 };
  317 
  318 struct ib_cq_caps {
  319         u16     max_cq_moderation_count;
  320         u16     max_cq_moderation_period;
  321 };
  322 
  323 struct ib_dm_mr_attr {
  324         u64             length;
  325         u64             offset;
  326         u32             access_flags;
  327 };
  328 
  329 struct ib_dm_alloc_attr {
  330         u64     length;
  331         u32     alignment;
  332         u32     flags;
  333 };
  334 
  335 struct ib_device_attr {
  336         u64                     fw_ver;
  337         __be64                  sys_image_guid;
  338         u64                     max_mr_size;
  339         u64                     page_size_cap;
  340         u32                     vendor_id;
  341         u32                     vendor_part_id;
  342         u32                     hw_ver;
  343         int                     max_qp;
  344         int                     max_qp_wr;
  345         u64                     device_cap_flags;
  346         int                     max_sge;
  347         int                     max_sge_rd;
  348         int                     max_cq;
  349         int                     max_cqe;
  350         int                     max_mr;
  351         int                     max_pd;
  352         int                     max_qp_rd_atom;
  353         int                     max_ee_rd_atom;
  354         int                     max_res_rd_atom;
  355         int                     max_qp_init_rd_atom;
  356         int                     max_ee_init_rd_atom;
  357         enum ib_atomic_cap      atomic_cap;
  358         enum ib_atomic_cap      masked_atomic_cap;
  359         int                     max_ee;
  360         int                     max_rdd;
  361         int                     max_mw;
  362         int                     max_raw_ipv6_qp;
  363         int                     max_raw_ethy_qp;
  364         int                     max_mcast_grp;
  365         int                     max_mcast_qp_attach;
  366         int                     max_total_mcast_qp_attach;
  367         int                     max_ah;
  368         int                     max_fmr;
  369         int                     max_map_per_fmr;
  370         int                     max_srq;
  371         int                     max_srq_wr;
  372         union {
  373                 int             max_srq_sge;
  374                 int             max_send_sge;
  375                 int             max_recv_sge;
  376         };
  377         unsigned int            max_fast_reg_page_list_len;
  378         u16                     max_pkeys;
  379         u8                      local_ca_ack_delay;
  380         int                     sig_prot_cap;
  381         int                     sig_guard_cap;
  382         struct ib_odp_caps      odp_caps;
  383         uint64_t                timestamp_mask;
  384         uint64_t                hca_core_clock; /* in KHZ */
  385         struct ib_rss_caps      rss_caps;
  386         u32                     max_wq_type_rq;
  387         u32                     raw_packet_caps; /* Use ib_raw_packet_caps enum */
  388         struct ib_tm_caps       tm_caps;
  389         struct ib_cq_caps       cq_caps;
  390         u64                     max_dm_size;
  391         /* Max entries for sgl for optimized performance per READ */
  392         u32                     max_sgl_rd;
  393 };
  394 
  395 enum ib_mtu {
  396         IB_MTU_256  = 1,
  397         IB_MTU_512  = 2,
  398         IB_MTU_1024 = 3,
  399         IB_MTU_2048 = 4,
  400         IB_MTU_4096 = 5
  401 };
  402 
  403 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
  404 {
  405         switch (mtu) {
  406         case IB_MTU_256:  return  256;
  407         case IB_MTU_512:  return  512;
  408         case IB_MTU_1024: return 1024;
  409         case IB_MTU_2048: return 2048;
  410         case IB_MTU_4096: return 4096;
  411         default:          return -1;
  412         }
  413 }
  414 
  415 enum ib_port_state {
  416         IB_PORT_NOP             = 0,
  417         IB_PORT_DOWN            = 1,
  418         IB_PORT_INIT            = 2,
  419         IB_PORT_ARMED           = 3,
  420         IB_PORT_ACTIVE          = 4,
  421         IB_PORT_ACTIVE_DEFER    = 5,
  422         IB_PORT_DUMMY           = -1,   /* force enum signed */
  423 };
  424 
  425 enum ib_port_cap_flags {
  426         IB_PORT_SM                              = 1 <<  1,
  427         IB_PORT_NOTICE_SUP                      = 1 <<  2,
  428         IB_PORT_TRAP_SUP                        = 1 <<  3,
  429         IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
  430         IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,
  431         IB_PORT_SL_MAP_SUP                      = 1 <<  6,
  432         IB_PORT_MKEY_NVRAM                      = 1 <<  7,
  433         IB_PORT_PKEY_NVRAM                      = 1 <<  8,
  434         IB_PORT_LED_INFO_SUP                    = 1 <<  9,
  435         IB_PORT_SM_DISABLED                     = 1 << 10,
  436         IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,
  437         IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,
  438         IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
  439         IB_PORT_CM_SUP                          = 1 << 16,
  440         IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,
  441         IB_PORT_REINIT_SUP                      = 1 << 18,
  442         IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,
  443         IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,
  444         IB_PORT_DR_NOTICE_SUP                   = 1 << 21,
  445         IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,
  446         IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,
  447         IB_PORT_LINK_LATENCY_SUP                = 1 << 24,
  448         IB_PORT_CLIENT_REG_SUP                  = 1 << 25,
  449         IB_PORT_IP_BASED_GIDS                   = 1 << 26,
  450 };
  451 
  452 enum ib_port_phys_state {
  453         IB_PORT_PHYS_STATE_SLEEP = 1,
  454         IB_PORT_PHYS_STATE_POLLING = 2,
  455         IB_PORT_PHYS_STATE_DISABLED = 3,
  456         IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
  457         IB_PORT_PHYS_STATE_LINK_UP = 5,
  458         IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
  459         IB_PORT_PHYS_STATE_PHY_TEST = 7,
  460 };
  461 
  462 enum ib_port_width {
  463         IB_WIDTH_1X     = 1,
  464         IB_WIDTH_2X     = 16,
  465         IB_WIDTH_4X     = 2,
  466         IB_WIDTH_8X     = 4,
  467         IB_WIDTH_12X    = 8
  468 };
  469 
  470 static inline int ib_width_enum_to_int(enum ib_port_width width)
  471 {
  472         switch (width) {
  473         case IB_WIDTH_1X:  return  1;
  474         case IB_WIDTH_2X:  return  2;
  475         case IB_WIDTH_4X:  return  4;
  476         case IB_WIDTH_8X:  return  8;
  477         case IB_WIDTH_12X: return 12;
  478         default:          return -1;
  479         }
  480 }
  481 
  482 enum ib_port_speed {
  483         IB_SPEED_SDR    = 1,
  484         IB_SPEED_DDR    = 2,
  485         IB_SPEED_QDR    = 4,
  486         IB_SPEED_FDR10  = 8,
  487         IB_SPEED_FDR    = 16,
  488         IB_SPEED_EDR    = 32,
  489         IB_SPEED_HDR    = 64,
  490         IB_SPEED_NDR    = 128
  491 };
  492 
  493 /**
  494  * struct rdma_hw_stats
  495  * @lock - Mutex to protect parallel write access to lifespan and values
  496  *    of counters, which are 64bits and not guaranteeed to be written
  497  *    atomicaly on 32bits systems.
  498  * @timestamp - Used by the core code to track when the last update was
  499  * @lifespan - Used by the core code to determine how old the counters
  500  *   should be before being updated again.  Stored in jiffies, defaults
  501  *   to 10 milliseconds, drivers can override the default be specifying
  502  *   their own value during their allocation routine.
  503  * @name - Array of pointers to static names used for the counters in
  504  *   directory.
  505  * @num_counters - How many hardware counters there are.  If name is
  506  *   shorter than this number, a kernel oops will result.  Driver authors
  507  *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
  508  *   in their code to prevent this.
  509  * @value - Array of u64 counters that are accessed by the sysfs code and
  510  *   filled in by the drivers get_stats routine
  511  */
  512 struct rdma_hw_stats {
  513         struct mutex    lock; /* Protect lifespan and values[] */
  514         unsigned long   timestamp;
  515         unsigned long   lifespan;
  516         const char * const *names;
  517         int             num_counters;
  518         u64             value[];
  519 };
  520 
  521 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
  522 /**
  523  * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
  524  *   for drivers.
  525  * @names - Array of static const char *
  526  * @num_counters - How many elements in array
  527  * @lifespan - How many milliseconds between updates
  528  */
  529 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
  530                 const char * const *names, int num_counters,
  531                 unsigned long lifespan)
  532 {
  533         struct rdma_hw_stats *stats;
  534 
  535         stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
  536                         GFP_KERNEL);
  537         if (!stats)
  538                 return NULL;
  539         stats->names = names;
  540         stats->num_counters = num_counters;
  541         stats->lifespan = msecs_to_jiffies(lifespan);
  542 
  543         return stats;
  544 }
  545 
  546 
  547 /* Define bits for the various functionality this port needs to be supported by
  548  * the core.
  549  */
  550 /* Management                           0x00000FFF */
  551 #define RDMA_CORE_CAP_IB_MAD            0x00000001
  552 #define RDMA_CORE_CAP_IB_SMI            0x00000002
  553 #define RDMA_CORE_CAP_IB_CM             0x00000004
  554 #define RDMA_CORE_CAP_IW_CM             0x00000008
  555 #define RDMA_CORE_CAP_IB_SA             0x00000010
  556 #define RDMA_CORE_CAP_OPA_MAD           0x00000020
  557 
  558 /* Address format                       0x000FF000 */
  559 #define RDMA_CORE_CAP_AF_IB             0x00001000
  560 #define RDMA_CORE_CAP_ETH_AH            0x00002000
  561 
  562 /* Protocol                             0xFFF00000 */
  563 #define RDMA_CORE_CAP_PROT_IB           0x00100000
  564 #define RDMA_CORE_CAP_PROT_ROCE         0x00200000
  565 #define RDMA_CORE_CAP_PROT_IWARP        0x00400000
  566 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
  567 
  568 #define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
  569                                         | RDMA_CORE_CAP_IB_MAD \
  570                                         | RDMA_CORE_CAP_IB_SMI \
  571                                         | RDMA_CORE_CAP_IB_CM  \
  572                                         | RDMA_CORE_CAP_IB_SA  \
  573                                         | RDMA_CORE_CAP_AF_IB)
  574 #define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
  575                                         | RDMA_CORE_CAP_IB_MAD  \
  576                                         | RDMA_CORE_CAP_IB_CM   \
  577                                         | RDMA_CORE_CAP_AF_IB   \
  578                                         | RDMA_CORE_CAP_ETH_AH)
  579 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP                       \
  580                                         (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
  581                                         | RDMA_CORE_CAP_IB_MAD  \
  582                                         | RDMA_CORE_CAP_IB_CM   \
  583                                         | RDMA_CORE_CAP_AF_IB   \
  584                                         | RDMA_CORE_CAP_ETH_AH)
  585 #define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
  586                                         | RDMA_CORE_CAP_IW_CM)
  587 #define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
  588                                         | RDMA_CORE_CAP_OPA_MAD)
  589 
  590 struct ib_port_attr {
  591         u64                     subnet_prefix;
  592         enum ib_port_state      state;
  593         enum ib_mtu             max_mtu;
  594         enum ib_mtu             active_mtu;
  595         int                     gid_tbl_len;
  596         unsigned int            ip_gids:1;
  597         /* This is the value from PortInfo CapabilityMask, defined by IBA */
  598         u32                     port_cap_flags;
  599         u32                     max_msg_sz;
  600         u32                     bad_pkey_cntr;
  601         u32                     qkey_viol_cntr;
  602         u16                     pkey_tbl_len;
  603         u16                     lid;
  604         u16                     sm_lid;
  605         u8                      lmc;
  606         u8                      max_vl_num;
  607         u8                      sm_sl;
  608         u8                      subnet_timeout;
  609         u8                      init_type_reply;
  610         u8                      active_width;
  611         u8                      active_speed;
  612         u8                      phys_state;
  613         bool                    grh_required;
  614 };
  615 
  616 enum ib_device_modify_flags {
  617         IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
  618         IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
  619 };
  620 
  621 #define IB_DEVICE_NODE_DESC_MAX 64
  622 
  623 struct ib_device_modify {
  624         u64     sys_image_guid;
  625         char    node_desc[IB_DEVICE_NODE_DESC_MAX];
  626 };
  627 
  628 enum ib_port_modify_flags {
  629         IB_PORT_SHUTDOWN                = 1,
  630         IB_PORT_INIT_TYPE               = (1<<2),
  631         IB_PORT_RESET_QKEY_CNTR         = (1<<3)
  632 };
  633 
  634 struct ib_port_modify {
  635         u32     set_port_cap_mask;
  636         u32     clr_port_cap_mask;
  637         u8      init_type;
  638 };
  639 
  640 enum ib_event_type {
  641         IB_EVENT_CQ_ERR,
  642         IB_EVENT_QP_FATAL,
  643         IB_EVENT_QP_REQ_ERR,
  644         IB_EVENT_QP_ACCESS_ERR,
  645         IB_EVENT_COMM_EST,
  646         IB_EVENT_SQ_DRAINED,
  647         IB_EVENT_PATH_MIG,
  648         IB_EVENT_PATH_MIG_ERR,
  649         IB_EVENT_DEVICE_FATAL,
  650         IB_EVENT_PORT_ACTIVE,
  651         IB_EVENT_PORT_ERR,
  652         IB_EVENT_LID_CHANGE,
  653         IB_EVENT_PKEY_CHANGE,
  654         IB_EVENT_SM_CHANGE,
  655         IB_EVENT_SRQ_ERR,
  656         IB_EVENT_SRQ_LIMIT_REACHED,
  657         IB_EVENT_QP_LAST_WQE_REACHED,
  658         IB_EVENT_CLIENT_REREGISTER,
  659         IB_EVENT_GID_CHANGE,
  660         IB_EVENT_WQ_FATAL,
  661 };
  662 
  663 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
  664 
  665 struct ib_event {
  666         struct ib_device        *device;
  667         union {
  668                 struct ib_cq    *cq;
  669                 struct ib_qp    *qp;
  670                 struct ib_srq   *srq;
  671                 struct ib_wq    *wq;
  672                 u8              port_num;
  673         } element;
  674         enum ib_event_type      event;
  675 };
  676 
  677 struct ib_event_handler {
  678         struct ib_device *device;
  679         void            (*handler)(struct ib_event_handler *, struct ib_event *);
  680         struct list_head  list;
  681 };
  682 
  683 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
  684         do {                                                    \
  685                 (_ptr)->device  = _device;                      \
  686                 (_ptr)->handler = _handler;                     \
  687                 INIT_LIST_HEAD(&(_ptr)->list);                  \
  688         } while (0)
  689 
  690 struct ib_global_route {
  691         union ib_gid    dgid;
  692         u32             flow_label;
  693         u8              sgid_index;
  694         u8              hop_limit;
  695         u8              traffic_class;
  696 };
  697 
  698 struct ib_grh {
  699         __be32          version_tclass_flow;
  700         __be16          paylen;
  701         u8              next_hdr;
  702         u8              hop_limit;
  703         union ib_gid    sgid;
  704         union ib_gid    dgid;
  705 };
  706 
  707 union rdma_network_hdr {
  708         struct ib_grh ibgrh;
  709         struct {
  710                 /* The IB spec states that if it's IPv4, the header
  711                  * is located in the last 20 bytes of the header.
  712                  */
  713                 u8              reserved[20];
  714                 struct ip       roce4grh;
  715         };
  716 };
  717 
  718 enum {
  719         IB_MULTICAST_QPN = 0xffffff
  720 };
  721 
  722 #define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
  723 #define IB_MULTICAST_LID_BASE   cpu_to_be16(0xC000)
  724 
  725 enum ib_ah_flags {
  726         IB_AH_GRH       = 1
  727 };
  728 
  729 enum ib_rate {
  730         IB_RATE_PORT_CURRENT = 0,
  731         IB_RATE_2_5_GBPS = 2,
  732         IB_RATE_5_GBPS   = 5,
  733         IB_RATE_10_GBPS  = 3,
  734         IB_RATE_20_GBPS  = 6,
  735         IB_RATE_30_GBPS  = 4,
  736         IB_RATE_40_GBPS  = 7,
  737         IB_RATE_60_GBPS  = 8,
  738         IB_RATE_80_GBPS  = 9,
  739         IB_RATE_120_GBPS = 10,
  740         IB_RATE_14_GBPS  = 11,
  741         IB_RATE_56_GBPS  = 12,
  742         IB_RATE_112_GBPS = 13,
  743         IB_RATE_168_GBPS = 14,
  744         IB_RATE_25_GBPS  = 15,
  745         IB_RATE_100_GBPS = 16,
  746         IB_RATE_200_GBPS = 17,
  747         IB_RATE_300_GBPS = 18,
  748         IB_RATE_28_GBPS  = 19,
  749         IB_RATE_50_GBPS  = 20,
  750         IB_RATE_400_GBPS = 21,
  751         IB_RATE_600_GBPS = 22,
  752 };
  753 
  754 /**
  755  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
  756  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
  757  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
  758  * @rate: rate to convert.
  759  */
  760 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
  761 
  762 /**
  763  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
  764  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
  765  * @rate: rate to convert.
  766  */
  767 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
  768 
  769 
  770 /**
  771  * enum ib_mr_type - memory region type
  772  * @IB_MR_TYPE_MEM_REG:       memory region that is used for
  773  *                            normal registration
  774  * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
  775  *                            register any arbitrary sg lists (without
  776  *                            the normal mr constraints - see
  777  *                            ib_map_mr_sg)
  778  * @IB_MR_TYPE_DM:            memory region that is used for device
  779  *                            memory registration
  780  * @IB_MR_TYPE_USER:          memory region that is used for the user-space
  781  *                            application
  782  * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
  783  *                            without address translations (VA=PA)
  784  * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
  785  *                            data integrity operations
  786  */
  787 enum ib_mr_type {
  788         IB_MR_TYPE_MEM_REG,
  789         IB_MR_TYPE_SG_GAPS,
  790         IB_MR_TYPE_DM,
  791         IB_MR_TYPE_USER,
  792         IB_MR_TYPE_DMA,
  793         IB_MR_TYPE_INTEGRITY,
  794 };
  795 
  796 enum ib_mr_status_check {
  797         IB_MR_CHECK_SIG_STATUS = 1,
  798 };
  799 
  800 /**
  801  * struct ib_mr_status - Memory region status container
  802  *
  803  * @fail_status: Bitmask of MR checks status. For each
  804  *     failed check a corresponding status bit is set.
  805  * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
  806  *     failure.
  807  */
  808 struct ib_mr_status {
  809         u32                 fail_status;
  810         struct ib_sig_err   sig_err;
  811 };
  812 
  813 /**
  814  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
  815  * enum.
  816  * @mult: multiple to convert.
  817  */
  818 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
  819 
  820 struct ib_ah_attr {
  821         struct ib_global_route  grh;
  822         u16                     dlid;
  823         u8                      sl;
  824         u8                      src_path_bits;
  825         u8                      static_rate;
  826         u8                      ah_flags;
  827         u8                      port_num;
  828         u8                      dmac[ETH_ALEN];
  829 };
  830 
  831 enum ib_wc_status {
  832         IB_WC_SUCCESS,
  833         IB_WC_LOC_LEN_ERR,
  834         IB_WC_LOC_QP_OP_ERR,
  835         IB_WC_LOC_EEC_OP_ERR,
  836         IB_WC_LOC_PROT_ERR,
  837         IB_WC_WR_FLUSH_ERR,
  838         IB_WC_MW_BIND_ERR,
  839         IB_WC_BAD_RESP_ERR,
  840         IB_WC_LOC_ACCESS_ERR,
  841         IB_WC_REM_INV_REQ_ERR,
  842         IB_WC_REM_ACCESS_ERR,
  843         IB_WC_REM_OP_ERR,
  844         IB_WC_RETRY_EXC_ERR,
  845         IB_WC_RNR_RETRY_EXC_ERR,
  846         IB_WC_LOC_RDD_VIOL_ERR,
  847         IB_WC_REM_INV_RD_REQ_ERR,
  848         IB_WC_REM_ABORT_ERR,
  849         IB_WC_INV_EECN_ERR,
  850         IB_WC_INV_EEC_STATE_ERR,
  851         IB_WC_FATAL_ERR,
  852         IB_WC_RESP_TIMEOUT_ERR,
  853         IB_WC_GENERAL_ERR
  854 };
  855 
  856 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
  857 
  858 enum ib_wc_opcode {
  859         IB_WC_SEND,
  860         IB_WC_RDMA_WRITE,
  861         IB_WC_RDMA_READ,
  862         IB_WC_COMP_SWAP,
  863         IB_WC_FETCH_ADD,
  864         IB_WC_LSO,
  865         IB_WC_LOCAL_INV,
  866         IB_WC_REG_MR,
  867         IB_WC_MASKED_COMP_SWAP,
  868         IB_WC_MASKED_FETCH_ADD,
  869 /*
  870  * Set value of IB_WC_RECV so consumers can test if a completion is a
  871  * receive by testing (opcode & IB_WC_RECV).
  872  */
  873         IB_WC_RECV                      = 1 << 7,
  874         IB_WC_RECV_RDMA_WITH_IMM,
  875         IB_WC_DUMMY = -1,       /* force enum signed */
  876 };
  877 
  878 enum ib_wc_flags {
  879         IB_WC_GRH               = 1,
  880         IB_WC_WITH_IMM          = (1<<1),
  881         IB_WC_WITH_INVALIDATE   = (1<<2),
  882         IB_WC_IP_CSUM_OK        = (1<<3),
  883         IB_WC_WITH_SMAC         = (1<<4),
  884         IB_WC_WITH_VLAN         = (1<<5),
  885         IB_WC_WITH_NETWORK_HDR_TYPE     = (1<<6),
  886 };
  887 
  888 struct ib_wc {
  889         union {
  890                 u64             wr_id;
  891                 struct ib_cqe   *wr_cqe;
  892         };
  893         enum ib_wc_status       status;
  894         enum ib_wc_opcode       opcode;
  895         u32                     vendor_err;
  896         u32                     byte_len;
  897         struct ib_qp           *qp;
  898         union {
  899                 __be32          imm_data;
  900                 u32             invalidate_rkey;
  901         } ex;
  902         u32                     src_qp;
  903         int                     wc_flags;
  904         u16                     pkey_index;
  905         u16                     slid;
  906         u8                      sl;
  907         u8                      dlid_path_bits;
  908         u8                      port_num;       /* valid only for DR SMPs on switches */
  909         u8                      smac[ETH_ALEN];
  910         u16                     vlan_id;
  911         u8                      network_hdr_type;
  912 };
  913 
  914 enum ib_cq_notify_flags {
  915         IB_CQ_SOLICITED                 = 1 << 0,
  916         IB_CQ_NEXT_COMP                 = 1 << 1,
  917         IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
  918         IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
  919 };
  920 
  921 enum ib_srq_type {
  922         IB_SRQT_BASIC,
  923         IB_SRQT_XRC,
  924         IB_SRQT_TM,
  925 };
  926 
  927 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
  928 {
  929         return srq_type == IB_SRQT_XRC ||
  930                srq_type == IB_SRQT_TM;
  931 }
  932 
  933 enum ib_srq_attr_mask {
  934         IB_SRQ_MAX_WR   = 1 << 0,
  935         IB_SRQ_LIMIT    = 1 << 1,
  936 };
  937 
  938 struct ib_srq_attr {
  939         u32     max_wr;
  940         u32     max_sge;
  941         u32     srq_limit;
  942 };
  943 
  944 struct ib_srq_init_attr {
  945         void                  (*event_handler)(struct ib_event *, void *);
  946         void                   *srq_context;
  947         struct ib_srq_attr      attr;
  948         enum ib_srq_type        srq_type;
  949 
  950         struct {
  951                 struct ib_cq   *cq;
  952                 union {
  953                         struct {
  954                                 struct ib_xrcd *xrcd;
  955                         } xrc;
  956 
  957                         struct {
  958                                 u32             max_num_tags;
  959                         } tag_matching;
  960                 };
  961         } ext;
  962 };
  963 
  964 struct ib_qp_cap {
  965         u32     max_send_wr;
  966         u32     max_recv_wr;
  967         u32     max_send_sge;
  968         u32     max_recv_sge;
  969         u32     max_inline_data;
  970 
  971         /*
  972          * Maximum number of rdma_rw_ctx structures in flight at a time.
  973          * ib_create_qp() will calculate the right amount of neededed WRs
  974          * and MRs based on this.
  975          */
  976         u32     max_rdma_ctxs;
  977 };
  978 
  979 enum ib_sig_type {
  980         IB_SIGNAL_ALL_WR,
  981         IB_SIGNAL_REQ_WR
  982 };
  983 
  984 enum ib_qp_type {
  985         /*
  986          * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
  987          * here (and in that order) since the MAD layer uses them as
  988          * indices into a 2-entry table.
  989          */
  990         IB_QPT_SMI,
  991         IB_QPT_GSI,
  992 
  993         IB_QPT_RC,
  994         IB_QPT_UC,
  995         IB_QPT_UD,
  996         IB_QPT_RAW_IPV6,
  997         IB_QPT_RAW_ETHERTYPE,
  998         IB_QPT_RAW_PACKET = 8,
  999         IB_QPT_XRC_INI = 9,
 1000         IB_QPT_XRC_TGT,
 1001         IB_QPT_MAX,
 1002         IB_QPT_DRIVER = 0xFF,
 1003         /* Reserve a range for qp types internal to the low level driver.
 1004          * These qp types will not be visible at the IB core layer, so the
 1005          * IB_QPT_MAX usages should not be affected in the core layer
 1006          */
 1007         IB_QPT_RESERVED1 = 0x1000,
 1008         IB_QPT_RESERVED2,
 1009         IB_QPT_RESERVED3,
 1010         IB_QPT_RESERVED4,
 1011         IB_QPT_RESERVED5,
 1012         IB_QPT_RESERVED6,
 1013         IB_QPT_RESERVED7,
 1014         IB_QPT_RESERVED8,
 1015         IB_QPT_RESERVED9,
 1016         IB_QPT_RESERVED10,
 1017 };
 1018 
 1019 enum ib_qp_create_flags {
 1020         IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
 1021         IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
 1022         IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
 1023         IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
 1024         IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
 1025         IB_QP_CREATE_NETIF_QP                   = 1 << 5,
 1026         IB_QP_CREATE_SIGNATURE_EN               = 1 << 6,
 1027         IB_QP_CREATE_USE_GFP_NOIO               = 1 << 7,
 1028         IB_QP_CREATE_SCATTER_FCS                = 1 << 8,
 1029         IB_QP_CREATE_CVLAN_STRIPPING            = 1 << 9,
 1030         IB_QP_CREATE_SOURCE_QPN                 = 1 << 10,
 1031         IB_QP_CREATE_PCI_WRITE_END_PADDING      = 1 << 11,
 1032         /* reserve bits 26-31 for low level drivers' internal use */
 1033         IB_QP_CREATE_RESERVED_START             = 1 << 26,
 1034         IB_QP_CREATE_RESERVED_END               = 1 << 31,
 1035 };
 1036 
 1037 /*
 1038  * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
 1039  * callback to destroy the passed in QP.
 1040  */
 1041 
 1042 struct ib_qp_init_attr {
 1043         void                  (*event_handler)(struct ib_event *, void *);
 1044         void                   *qp_context;
 1045         struct ib_cq           *send_cq;
 1046         struct ib_cq           *recv_cq;
 1047         struct ib_srq          *srq;
 1048         struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
 1049         struct ib_qp_cap        cap;
 1050         enum ib_sig_type        sq_sig_type;
 1051         enum ib_qp_type         qp_type;
 1052         enum ib_qp_create_flags create_flags;
 1053 
 1054         /*
 1055          * Only needed for special QP types, or when using the RW API.
 1056          */
 1057         u8                      port_num;
 1058         struct ib_rwq_ind_table *rwq_ind_tbl;
 1059         u32                     source_qpn;
 1060 };
 1061 
 1062 struct ib_qp_open_attr {
 1063         void                  (*event_handler)(struct ib_event *, void *);
 1064         void                   *qp_context;
 1065         u32                     qp_num;
 1066         enum ib_qp_type         qp_type;
 1067 };
 1068 
 1069 enum ib_rnr_timeout {
 1070         IB_RNR_TIMER_655_36 =  0,
 1071         IB_RNR_TIMER_000_01 =  1,
 1072         IB_RNR_TIMER_000_02 =  2,
 1073         IB_RNR_TIMER_000_03 =  3,
 1074         IB_RNR_TIMER_000_04 =  4,
 1075         IB_RNR_TIMER_000_06 =  5,
 1076         IB_RNR_TIMER_000_08 =  6,
 1077         IB_RNR_TIMER_000_12 =  7,
 1078         IB_RNR_TIMER_000_16 =  8,
 1079         IB_RNR_TIMER_000_24 =  9,
 1080         IB_RNR_TIMER_000_32 = 10,
 1081         IB_RNR_TIMER_000_48 = 11,
 1082         IB_RNR_TIMER_000_64 = 12,
 1083         IB_RNR_TIMER_000_96 = 13,
 1084         IB_RNR_TIMER_001_28 = 14,
 1085         IB_RNR_TIMER_001_92 = 15,
 1086         IB_RNR_TIMER_002_56 = 16,
 1087         IB_RNR_TIMER_003_84 = 17,
 1088         IB_RNR_TIMER_005_12 = 18,
 1089         IB_RNR_TIMER_007_68 = 19,
 1090         IB_RNR_TIMER_010_24 = 20,
 1091         IB_RNR_TIMER_015_36 = 21,
 1092         IB_RNR_TIMER_020_48 = 22,
 1093         IB_RNR_TIMER_030_72 = 23,
 1094         IB_RNR_TIMER_040_96 = 24,
 1095         IB_RNR_TIMER_061_44 = 25,
 1096         IB_RNR_TIMER_081_92 = 26,
 1097         IB_RNR_TIMER_122_88 = 27,
 1098         IB_RNR_TIMER_163_84 = 28,
 1099         IB_RNR_TIMER_245_76 = 29,
 1100         IB_RNR_TIMER_327_68 = 30,
 1101         IB_RNR_TIMER_491_52 = 31
 1102 };
 1103 
 1104 enum ib_qp_attr_mask {
 1105         IB_QP_STATE                     = 1,
 1106         IB_QP_CUR_STATE                 = (1<<1),
 1107         IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
 1108         IB_QP_ACCESS_FLAGS              = (1<<3),
 1109         IB_QP_PKEY_INDEX                = (1<<4),
 1110         IB_QP_PORT                      = (1<<5),
 1111         IB_QP_QKEY                      = (1<<6),
 1112         IB_QP_AV                        = (1<<7),
 1113         IB_QP_PATH_MTU                  = (1<<8),
 1114         IB_QP_TIMEOUT                   = (1<<9),
 1115         IB_QP_RETRY_CNT                 = (1<<10),
 1116         IB_QP_RNR_RETRY                 = (1<<11),
 1117         IB_QP_RQ_PSN                    = (1<<12),
 1118         IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
 1119         IB_QP_ALT_PATH                  = (1<<14),
 1120         IB_QP_MIN_RNR_TIMER             = (1<<15),
 1121         IB_QP_SQ_PSN                    = (1<<16),
 1122         IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
 1123         IB_QP_PATH_MIG_STATE            = (1<<18),
 1124         IB_QP_CAP                       = (1<<19),
 1125         IB_QP_DEST_QPN                  = (1<<20),
 1126         IB_QP_RESERVED1                 = (1<<21),
 1127         IB_QP_RESERVED2                 = (1<<22),
 1128         IB_QP_RESERVED3                 = (1<<23),
 1129         IB_QP_RESERVED4                 = (1<<24),
 1130         IB_QP_RATE_LIMIT                = (1<<25),
 1131 };
 1132 
 1133 enum ib_qp_state {
 1134         IB_QPS_RESET,
 1135         IB_QPS_INIT,
 1136         IB_QPS_RTR,
 1137         IB_QPS_RTS,
 1138         IB_QPS_SQD,
 1139         IB_QPS_SQE,
 1140         IB_QPS_ERR,
 1141         IB_QPS_DUMMY = -1,      /* force enum signed */
 1142 };
 1143 
 1144 enum ib_mig_state {
 1145         IB_MIG_MIGRATED,
 1146         IB_MIG_REARM,
 1147         IB_MIG_ARMED
 1148 };
 1149 
 1150 enum ib_mw_type {
 1151         IB_MW_TYPE_1 = 1,
 1152         IB_MW_TYPE_2 = 2
 1153 };
 1154 
 1155 struct ib_qp_attr {
 1156         enum ib_qp_state        qp_state;
 1157         enum ib_qp_state        cur_qp_state;
 1158         enum ib_mtu             path_mtu;
 1159         enum ib_mig_state       path_mig_state;
 1160         u32                     qkey;
 1161         u32                     rq_psn;
 1162         u32                     sq_psn;
 1163         u32                     dest_qp_num;
 1164         int                     qp_access_flags;
 1165         struct ib_qp_cap        cap;
 1166         struct ib_ah_attr       ah_attr;
 1167         struct ib_ah_attr       alt_ah_attr;
 1168         u16                     pkey_index;
 1169         u16                     alt_pkey_index;
 1170         u8                      en_sqd_async_notify;
 1171         u8                      sq_draining;
 1172         u8                      max_rd_atomic;
 1173         u8                      max_dest_rd_atomic;
 1174         u8                      min_rnr_timer;
 1175         u8                      port_num;
 1176         u8                      timeout;
 1177         u8                      retry_cnt;
 1178         u8                      rnr_retry;
 1179         u8                      alt_port_num;
 1180         u8                      alt_timeout;
 1181         u32                     rate_limit;
 1182 };
 1183 
 1184 enum ib_wr_opcode {
 1185         IB_WR_RDMA_WRITE,
 1186         IB_WR_RDMA_WRITE_WITH_IMM,
 1187         IB_WR_SEND,
 1188         IB_WR_SEND_WITH_IMM,
 1189         IB_WR_RDMA_READ,
 1190         IB_WR_ATOMIC_CMP_AND_SWP,
 1191         IB_WR_ATOMIC_FETCH_AND_ADD,
 1192         IB_WR_LSO,
 1193         IB_WR_SEND_WITH_INV,
 1194         IB_WR_RDMA_READ_WITH_INV,
 1195         IB_WR_LOCAL_INV,
 1196         IB_WR_REG_MR,
 1197         IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
 1198         IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
 1199         IB_WR_REG_SIG_MR,
 1200         /* reserve values for low level drivers' internal use.
 1201          * These values will not be used at all in the ib core layer.
 1202          */
 1203         IB_WR_RESERVED1 = 0xf0,
 1204         IB_WR_RESERVED2,
 1205         IB_WR_RESERVED3,
 1206         IB_WR_RESERVED4,
 1207         IB_WR_RESERVED5,
 1208         IB_WR_RESERVED6,
 1209         IB_WR_RESERVED7,
 1210         IB_WR_RESERVED8,
 1211         IB_WR_RESERVED9,
 1212         IB_WR_RESERVED10,
 1213         IB_WR_DUMMY = -1,       /* force enum signed */
 1214 };
 1215 
 1216 enum ib_send_flags {
 1217         IB_SEND_FENCE           = 1,
 1218         IB_SEND_SIGNALED        = (1<<1),
 1219         IB_SEND_SOLICITED       = (1<<2),
 1220         IB_SEND_INLINE          = (1<<3),
 1221         IB_SEND_IP_CSUM         = (1<<4),
 1222 
 1223         /* reserve bits 26-31 for low level drivers' internal use */
 1224         IB_SEND_RESERVED_START  = (1 << 26),
 1225         IB_SEND_RESERVED_END    = (1 << 31),
 1226 };
 1227 
 1228 struct ib_sge {
 1229         u64     addr;
 1230         u32     length;
 1231         u32     lkey;
 1232 };
 1233 
 1234 struct ib_cqe {
 1235         void (*done)(struct ib_cq *cq, struct ib_wc *wc);
 1236 };
 1237 
 1238 struct ib_send_wr {
 1239         struct ib_send_wr      *next;
 1240         union {
 1241                 u64             wr_id;
 1242                 struct ib_cqe   *wr_cqe;
 1243         };
 1244         struct ib_sge          *sg_list;
 1245         int                     num_sge;
 1246         enum ib_wr_opcode       opcode;
 1247         int                     send_flags;
 1248         union {
 1249                 __be32          imm_data;
 1250                 u32             invalidate_rkey;
 1251         } ex;
 1252 };
 1253 
 1254 struct ib_rdma_wr {
 1255         struct ib_send_wr       wr;
 1256         u64                     remote_addr;
 1257         u32                     rkey;
 1258 };
 1259 
 1260 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
 1261 {
 1262         return container_of(wr, struct ib_rdma_wr, wr);
 1263 }
 1264 
 1265 struct ib_atomic_wr {
 1266         struct ib_send_wr       wr;
 1267         u64                     remote_addr;
 1268         u64                     compare_add;
 1269         u64                     swap;
 1270         u64                     compare_add_mask;
 1271         u64                     swap_mask;
 1272         u32                     rkey;
 1273 };
 1274 
 1275 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
 1276 {
 1277         return container_of(wr, struct ib_atomic_wr, wr);
 1278 }
 1279 
 1280 struct ib_ud_wr {
 1281         struct ib_send_wr       wr;
 1282         struct ib_ah            *ah;
 1283         void                    *header;
 1284         int                     hlen;
 1285         int                     mss;
 1286         u32                     remote_qpn;
 1287         u32                     remote_qkey;
 1288         u16                     pkey_index; /* valid for GSI only */
 1289         u8                      port_num;   /* valid for DR SMPs on switch only */
 1290 };
 1291 
 1292 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
 1293 {
 1294         return container_of(wr, struct ib_ud_wr, wr);
 1295 }
 1296 
 1297 struct ib_reg_wr {
 1298         struct ib_send_wr       wr;
 1299         struct ib_mr            *mr;
 1300         u32                     key;
 1301         int                     access;
 1302 };
 1303 
 1304 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
 1305 {
 1306         return container_of(wr, struct ib_reg_wr, wr);
 1307 }
 1308 
 1309 struct ib_sig_handover_wr {
 1310         struct ib_send_wr       wr;
 1311         struct ib_sig_attrs    *sig_attrs;
 1312         struct ib_mr           *sig_mr;
 1313         int                     access_flags;
 1314         struct ib_sge          *prot;
 1315 };
 1316 
 1317 static inline const struct ib_sig_handover_wr *sig_handover_wr(const struct ib_send_wr *wr)
 1318 {
 1319         return container_of(wr, struct ib_sig_handover_wr, wr);
 1320 }
 1321 
 1322 struct ib_recv_wr {
 1323         struct ib_recv_wr      *next;
 1324         union {
 1325                 u64             wr_id;
 1326                 struct ib_cqe   *wr_cqe;
 1327         };
 1328         struct ib_sge          *sg_list;
 1329         int                     num_sge;
 1330 };
 1331 
 1332 enum ib_access_flags {
 1333         IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
 1334         IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
 1335         IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
 1336         IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
 1337         IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
 1338         IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
 1339         IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
 1340         IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
 1341         IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
 1342 
 1343         IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
 1344         IB_ACCESS_SUPPORTED =
 1345                 ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
 1346 };
 1347 
 1348 /*
 1349  * XXX: these are apparently used for ->rereg_user_mr, no idea why they
 1350  * are hidden here instead of a uapi header!
 1351  */
 1352 enum ib_mr_rereg_flags {
 1353         IB_MR_REREG_TRANS       = 1,
 1354         IB_MR_REREG_PD          = (1<<1),
 1355         IB_MR_REREG_ACCESS      = (1<<2),
 1356         IB_MR_REREG_SUPPORTED   = ((IB_MR_REREG_ACCESS << 1) - 1)
 1357 };
 1358 
 1359 struct ib_fmr_attr {
 1360         int     max_pages;
 1361         int     max_maps;
 1362         u8      page_shift;
 1363 };
 1364 
 1365 struct ib_umem;
 1366 
 1367 enum rdma_remove_reason {
 1368         /*
 1369          * Userspace requested uobject deletion or initial try
 1370          * to remove uobject via cleanup. Call could fail
 1371          */
 1372         RDMA_REMOVE_DESTROY,
 1373         /* Context deletion. This call should delete the actual object itself */
 1374         RDMA_REMOVE_CLOSE,
 1375         /* Driver is being hot-unplugged. This call should delete the actual object itself */
 1376         RDMA_REMOVE_DRIVER_REMOVE,
 1377         /* uobj is being cleaned-up before being committed */
 1378         RDMA_REMOVE_ABORT,
 1379 };
 1380 
 1381 struct ib_rdmacg_object {
 1382 };
 1383 
 1384 struct ib_ucontext {
 1385         struct ib_device       *device;
 1386         struct ib_uverbs_file  *ufile;
 1387         /*
 1388          * 'closing' can be read by the driver only during a destroy callback,
 1389          * it is set when we are closing the file descriptor and indicates
 1390          * that mm_sem may be locked.
 1391          */
 1392         bool closing;
 1393 
 1394         bool cleanup_retryable;
 1395 
 1396         struct ib_rdmacg_object cg_obj;
 1397         /*
 1398          * Implementation details of the RDMA core, don't use in drivers:
 1399          */
 1400         struct xarray mmap_xa;
 1401 };
 1402 
 1403 struct ib_uobject {
 1404         u64                     user_handle;    /* handle given to us by userspace */
 1405         /* ufile & ucontext owning this object */
 1406         struct ib_uverbs_file  *ufile;
 1407         /* FIXME, save memory: ufile->context == context */
 1408         struct ib_ucontext     *context;        /* associated user context */
 1409         void                   *object;         /* containing object */
 1410         struct list_head        list;           /* link to context's list */
 1411         struct ib_rdmacg_object cg_obj;         /* rdmacg object */
 1412         int                     id;             /* index into kernel idr */
 1413         struct kref             ref;
 1414         atomic_t                usecnt;         /* protects exclusive access */
 1415         struct rcu_head         rcu;            /* kfree_rcu() overhead */
 1416 
 1417         const struct uverbs_api_object *uapi_object;
 1418 };
 1419 
 1420 struct ib_udata {
 1421         const u8 __user *inbuf;
 1422         u8 __user *outbuf;
 1423         size_t       inlen;
 1424         size_t       outlen;
 1425 };
 1426 
 1427 struct ib_pd {
 1428         u32                     local_dma_lkey;
 1429         u32                     flags;
 1430         struct ib_device       *device;
 1431         struct ib_uobject      *uobject;
 1432         atomic_t                usecnt; /* count all resources */
 1433 
 1434         u32                     unsafe_global_rkey;
 1435 
 1436         /*
 1437          * Implementation details of the RDMA core, don't use in drivers:
 1438          */
 1439         struct ib_mr           *__internal_mr;
 1440 };
 1441 
 1442 struct ib_xrcd {
 1443         struct ib_device       *device;
 1444         atomic_t                usecnt; /* count all exposed resources */
 1445         struct inode           *inode;
 1446 
 1447         struct mutex            tgt_qp_mutex;
 1448         struct list_head        tgt_qp_list;
 1449 };
 1450 
 1451 struct ib_ah {
 1452         struct ib_device        *device;
 1453         struct ib_pd            *pd;
 1454         struct ib_uobject       *uobject;
 1455 };
 1456 
 1457 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
 1458 
 1459 enum ib_poll_context {
 1460         IB_POLL_DIRECT,         /* caller context, no hw completions */
 1461         IB_POLL_SOFTIRQ,        /* poll from softirq context */
 1462         IB_POLL_WORKQUEUE,      /* poll from workqueue */
 1463 };
 1464 
 1465 struct ib_cq {
 1466         struct ib_device       *device;
 1467         struct ib_ucq_object   *uobject;
 1468         ib_comp_handler         comp_handler;
 1469         void                  (*event_handler)(struct ib_event *, void *);
 1470         void                   *cq_context;
 1471         int                     cqe;
 1472         atomic_t                usecnt; /* count number of work queues */
 1473         enum ib_poll_context    poll_ctx;
 1474         struct work_struct      work;
 1475 };
 1476 
 1477 struct ib_srq {
 1478         struct ib_device       *device;
 1479         struct ib_pd           *pd;
 1480         struct ib_usrq_object  *uobject;
 1481         void                  (*event_handler)(struct ib_event *, void *);
 1482         void                   *srq_context;
 1483         enum ib_srq_type        srq_type;
 1484         atomic_t                usecnt;
 1485 
 1486         struct {
 1487                 struct ib_cq   *cq;
 1488                 union {
 1489                         struct {
 1490                                 struct ib_xrcd *xrcd;
 1491                                 u32             srq_num;
 1492                         } xrc;
 1493                 };
 1494         } ext;
 1495 };
 1496 
 1497 enum ib_wq_type {
 1498         IB_WQT_RQ
 1499 };
 1500 
 1501 enum ib_wq_state {
 1502         IB_WQS_RESET,
 1503         IB_WQS_RDY,
 1504         IB_WQS_ERR
 1505 };
 1506 
 1507 struct ib_wq {
 1508         struct ib_device       *device;
 1509         struct ib_uwq_object   *uobject;
 1510         void                *wq_context;
 1511         void                (*event_handler)(struct ib_event *, void *);
 1512         struct ib_pd           *pd;
 1513         struct ib_cq           *cq;
 1514         u32             wq_num;
 1515         enum ib_wq_state       state;
 1516         enum ib_wq_type wq_type;
 1517         atomic_t                usecnt;
 1518 };
 1519 
 1520 enum ib_wq_flags {
 1521         IB_WQ_FLAGS_CVLAN_STRIPPING     = 1 << 0,
 1522         IB_WQ_FLAGS_SCATTER_FCS         = 1 << 1,
 1523         IB_WQ_FLAGS_DELAY_DROP          = 1 << 2,
 1524         IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
 1525 };
 1526 
 1527 struct ib_wq_init_attr {
 1528         void                   *wq_context;
 1529         enum ib_wq_type wq_type;
 1530         u32             max_wr;
 1531         u32             max_sge;
 1532         struct  ib_cq          *cq;
 1533         void                (*event_handler)(struct ib_event *, void *);
 1534         u32             create_flags; /* Use enum ib_wq_flags */
 1535 };
 1536 
 1537 enum ib_wq_attr_mask {
 1538         IB_WQ_STATE             = 1 << 0,
 1539         IB_WQ_CUR_STATE         = 1 << 1,
 1540         IB_WQ_FLAGS             = 1 << 2,
 1541 };
 1542 
 1543 struct ib_wq_attr {
 1544         enum    ib_wq_state     wq_state;
 1545         enum    ib_wq_state     curr_wq_state;
 1546         u32                     flags; /* Use enum ib_wq_flags */
 1547         u32                     flags_mask; /* Use enum ib_wq_flags */
 1548 };
 1549 
 1550 struct ib_rwq_ind_table {
 1551         struct ib_device        *device;
 1552         struct ib_uobject      *uobject;
 1553         atomic_t                usecnt;
 1554         u32             ind_tbl_num;
 1555         u32             log_ind_tbl_size;
 1556         struct ib_wq    **ind_tbl;
 1557 };
 1558 
 1559 struct ib_rwq_ind_table_init_attr {
 1560         u32             log_ind_tbl_size;
 1561         /* Each entry is a pointer to Receive Work Queue */
 1562         struct ib_wq    **ind_tbl;
 1563 };
 1564 
 1565 /*
 1566  * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
 1567  * @max_read_sge:  Maximum SGE elements per RDMA READ request.
 1568  */
 1569 struct ib_qp {
 1570         struct ib_device       *device;
 1571         struct ib_pd           *pd;
 1572         struct ib_cq           *send_cq;
 1573         struct ib_cq           *recv_cq;
 1574         spinlock_t              mr_lock;
 1575         struct ib_srq          *srq;
 1576         struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
 1577         struct list_head        xrcd_list;
 1578 
 1579         /* count times opened, mcast attaches, flow attaches */
 1580         atomic_t                usecnt;
 1581         struct list_head        open_list;
 1582         struct ib_qp           *real_qp;
 1583         struct ib_uqp_object   *uobject;
 1584         void                  (*event_handler)(struct ib_event *, void *);
 1585         void                   *qp_context;
 1586         u32                     qp_num;
 1587         u32                     max_write_sge;
 1588         u32                     max_read_sge;
 1589         enum ib_qp_type         qp_type;
 1590         struct ib_rwq_ind_table *rwq_ind_tbl;
 1591         u8                      port;
 1592 };
 1593 
 1594 struct ib_dm {
 1595         struct ib_device  *device;
 1596         u32                length;
 1597         u32                flags;
 1598         struct ib_uobject *uobject;
 1599         atomic_t           usecnt;
 1600 };
 1601 
 1602 struct ib_mr {
 1603         struct ib_device  *device;
 1604         struct ib_pd      *pd;
 1605         u32                lkey;
 1606         u32                rkey;
 1607         u64                iova;
 1608         u64                length;
 1609         unsigned int       page_size;
 1610         enum ib_mr_type    type;
 1611         bool               need_inval;
 1612         union {
 1613                 struct ib_uobject       *uobject;       /* user */
 1614                 struct list_head        qp_entry;       /* FR */
 1615         };
 1616 
 1617         struct ib_dm      *dm;
 1618         struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
 1619 };
 1620 
 1621 struct ib_mw {
 1622         struct ib_device        *device;
 1623         struct ib_pd            *pd;
 1624         struct ib_uobject       *uobject;
 1625         u32                     rkey;
 1626         enum ib_mw_type         type;
 1627 };
 1628 
 1629 struct ib_fmr {
 1630         struct ib_device        *device;
 1631         struct ib_pd            *pd;
 1632         struct list_head        list;
 1633         u32                     lkey;
 1634         u32                     rkey;
 1635 };
 1636 
 1637 /* Supported steering options */
 1638 enum ib_flow_attr_type {
 1639         /* steering according to rule specifications */
 1640         IB_FLOW_ATTR_NORMAL             = 0x0,
 1641         /* default unicast and multicast rule -
 1642          * receive all Eth traffic which isn't steered to any QP
 1643          */
 1644         IB_FLOW_ATTR_ALL_DEFAULT        = 0x1,
 1645         /* default multicast rule -
 1646          * receive all Eth multicast traffic which isn't steered to any QP
 1647          */
 1648         IB_FLOW_ATTR_MC_DEFAULT         = 0x2,
 1649         /* sniffer rule - receive all port traffic */
 1650         IB_FLOW_ATTR_SNIFFER            = 0x3
 1651 };
 1652 
 1653 /* Supported steering header types */
 1654 enum ib_flow_spec_type {
 1655         /* L2 headers*/
 1656         IB_FLOW_SPEC_ETH                = 0x20,
 1657         IB_FLOW_SPEC_IB                 = 0x22,
 1658         /* L3 header*/
 1659         IB_FLOW_SPEC_IPV4               = 0x30,
 1660         IB_FLOW_SPEC_IPV6               = 0x31,
 1661         IB_FLOW_SPEC_ESP                = 0x34,
 1662         /* L4 headers*/
 1663         IB_FLOW_SPEC_TCP                = 0x40,
 1664         IB_FLOW_SPEC_UDP                = 0x41,
 1665         IB_FLOW_SPEC_VXLAN_TUNNEL       = 0x50,
 1666         IB_FLOW_SPEC_GRE                = 0x51,
 1667         IB_FLOW_SPEC_MPLS               = 0x60,
 1668         IB_FLOW_SPEC_INNER              = 0x100,
 1669         /* Actions */
 1670         IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
 1671         IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
 1672         IB_FLOW_SPEC_ACTION_HANDLE      = 0x1002,
 1673         IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
 1674 };
 1675 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
 1676 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
 1677 
 1678 /* Flow steering rule priority is set according to it's domain.
 1679  * Lower domain value means higher priority.
 1680  */
 1681 enum ib_flow_domain {
 1682         IB_FLOW_DOMAIN_USER,
 1683         IB_FLOW_DOMAIN_ETHTOOL,
 1684         IB_FLOW_DOMAIN_RFS,
 1685         IB_FLOW_DOMAIN_NIC,
 1686         IB_FLOW_DOMAIN_NUM /* Must be last */
 1687 };
 1688 
 1689 enum ib_flow_flags {
 1690         IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
 1691         IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 2  /* Must be last */
 1692 };
 1693 
 1694 struct ib_flow_eth_filter {
 1695         u8      dst_mac[6];
 1696         u8      src_mac[6];
 1697         __be16  ether_type;
 1698         __be16  vlan_tag;
 1699         /* Must be last */
 1700         u8      real_sz[0];
 1701 };
 1702 
 1703 struct ib_flow_spec_eth {
 1704         enum ib_flow_spec_type    type;
 1705         u16                       size;
 1706         struct ib_flow_eth_filter val;
 1707         struct ib_flow_eth_filter mask;
 1708 };
 1709 
 1710 struct ib_flow_ib_filter {
 1711         __be16 dlid;
 1712         __u8   sl;
 1713         /* Must be last */
 1714         u8      real_sz[0];
 1715 };
 1716 
 1717 struct ib_flow_spec_ib {
 1718         enum ib_flow_spec_type   type;
 1719         u16                      size;
 1720         struct ib_flow_ib_filter val;
 1721         struct ib_flow_ib_filter mask;
 1722 };
 1723 
 1724 /* IPv4 header flags */
 1725 enum ib_ipv4_flags {
 1726         IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
 1727         IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
 1728                                     last have this flag set */
 1729 };
 1730 
 1731 struct ib_flow_ipv4_filter {
 1732         __be32  src_ip;
 1733         __be32  dst_ip;
 1734         u8      proto;
 1735         u8      tos;
 1736         u8      ttl;
 1737         u8      flags;
 1738         /* Must be last */
 1739         u8      real_sz[0];
 1740 };
 1741 
 1742 struct ib_flow_spec_ipv4 {
 1743         enum ib_flow_spec_type     type;
 1744         u16                        size;
 1745         struct ib_flow_ipv4_filter val;
 1746         struct ib_flow_ipv4_filter mask;
 1747 };
 1748 
 1749 struct ib_flow_ipv6_filter {
 1750         u8      src_ip[16];
 1751         u8      dst_ip[16];
 1752         __be32  flow_label;
 1753         u8      next_hdr;
 1754         u8      traffic_class;
 1755         u8      hop_limit;
 1756         /* Must be last */
 1757         u8      real_sz[0];
 1758 };
 1759 
 1760 struct ib_flow_spec_ipv6 {
 1761         enum ib_flow_spec_type     type;
 1762         u16                        size;
 1763         struct ib_flow_ipv6_filter val;
 1764         struct ib_flow_ipv6_filter mask;
 1765 };
 1766 
 1767 struct ib_flow_tcp_udp_filter {
 1768         __be16  dst_port;
 1769         __be16  src_port;
 1770         /* Must be last */
 1771         u8      real_sz[0];
 1772 };
 1773 
 1774 struct ib_flow_spec_tcp_udp {
 1775         enum ib_flow_spec_type        type;
 1776         u16                           size;
 1777         struct ib_flow_tcp_udp_filter val;
 1778         struct ib_flow_tcp_udp_filter mask;
 1779 };
 1780 
 1781 struct ib_flow_tunnel_filter {
 1782         __be32  tunnel_id;
 1783         u8      real_sz[0];
 1784 };
 1785 
 1786 /* ib_flow_spec_tunnel describes the Vxlan tunnel
 1787  * the tunnel_id from val has the vni value
 1788  */
 1789 struct ib_flow_spec_tunnel {
 1790         u32                           type;
 1791         u16                           size;
 1792         struct ib_flow_tunnel_filter  val;
 1793         struct ib_flow_tunnel_filter  mask;
 1794 };
 1795 
 1796 struct ib_flow_esp_filter {
 1797         __be32  spi;
 1798         __be32  seq;
 1799         /* Must be last */
 1800         u8      real_sz[0];
 1801 };
 1802 
 1803 struct ib_flow_spec_esp {
 1804         u32                           type;
 1805         u16                           size;
 1806         struct ib_flow_esp_filter     val;
 1807         struct ib_flow_esp_filter     mask;
 1808 };
 1809 
 1810 struct ib_flow_gre_filter {
 1811         __be16 c_ks_res0_ver;
 1812         __be16 protocol;
 1813         __be32 key;
 1814         /* Must be last */
 1815         u8      real_sz[0];
 1816 };
 1817 
 1818 struct ib_flow_spec_gre {
 1819         u32                           type;
 1820         u16                           size;
 1821         struct ib_flow_gre_filter     val;
 1822         struct ib_flow_gre_filter     mask;
 1823 };
 1824 
 1825 struct ib_flow_mpls_filter {
 1826         __be32 tag;
 1827         /* Must be last */
 1828         u8      real_sz[0];
 1829 };
 1830 
 1831 struct ib_flow_spec_mpls {
 1832         u32                           type;
 1833         u16                           size;
 1834         struct ib_flow_mpls_filter     val;
 1835         struct ib_flow_mpls_filter     mask;
 1836 };
 1837 
 1838 struct ib_flow_spec_action_tag {
 1839         enum ib_flow_spec_type        type;
 1840         u16                           size;
 1841         u32                           tag_id;
 1842 };
 1843 
 1844 struct ib_flow_spec_action_drop {
 1845         enum ib_flow_spec_type        type;
 1846         u16                           size;
 1847 };
 1848 
 1849 struct ib_flow_spec_action_handle {
 1850         enum ib_flow_spec_type        type;
 1851         u16                           size;
 1852         struct ib_flow_action        *act;
 1853 };
 1854 
 1855 enum ib_counters_description {
 1856         IB_COUNTER_PACKETS,
 1857         IB_COUNTER_BYTES,
 1858 };
 1859 
 1860 struct ib_flow_spec_action_count {
 1861         enum ib_flow_spec_type type;
 1862         u16 size;
 1863         struct ib_counters *counters;
 1864 };
 1865 
 1866 union ib_flow_spec {
 1867         struct {
 1868                 u32                     type;
 1869                 u16                     size;
 1870         };
 1871         struct ib_flow_spec_eth         eth;
 1872         struct ib_flow_spec_ib          ib;
 1873         struct ib_flow_spec_ipv4        ipv4;
 1874         struct ib_flow_spec_tcp_udp     tcp_udp;
 1875         struct ib_flow_spec_ipv6        ipv6;
 1876         struct ib_flow_spec_tunnel      tunnel;
 1877         struct ib_flow_spec_esp         esp;
 1878         struct ib_flow_spec_gre         gre;
 1879         struct ib_flow_spec_mpls        mpls;
 1880         struct ib_flow_spec_action_tag  flow_tag;
 1881         struct ib_flow_spec_action_drop drop;
 1882         struct ib_flow_spec_action_handle action;
 1883         struct ib_flow_spec_action_count flow_count;
 1884 };
 1885 
 1886 struct ib_flow_attr {
 1887         enum ib_flow_attr_type type;
 1888         u16          size;
 1889         u16          priority;
 1890         u32          flags;
 1891         u8           num_of_specs;
 1892         u8           port;
 1893         union ib_flow_spec flows[0];
 1894 };
 1895 
 1896 struct ib_flow {
 1897         struct ib_qp            *qp;
 1898         struct ib_device        *device;
 1899         struct ib_uobject       *uobject;
 1900 };
 1901 
 1902 enum ib_flow_action_type {
 1903         IB_FLOW_ACTION_UNSPECIFIED,
 1904         IB_FLOW_ACTION_ESP = 1,
 1905 };
 1906 
 1907 struct ib_flow_action_attrs_esp_keymats {
 1908         enum ib_uverbs_flow_action_esp_keymat                   protocol;
 1909         union {
 1910                 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
 1911         } keymat;
 1912 };
 1913 
 1914 struct ib_flow_action_attrs_esp_replays {
 1915         enum ib_uverbs_flow_action_esp_replay                   protocol;
 1916         union {
 1917                 struct ib_uverbs_flow_action_esp_replay_bmp     bmp;
 1918         } replay;
 1919 };
 1920 
 1921 enum ib_flow_action_attrs_esp_flags {
 1922         /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
 1923          * This is done in order to share the same flags between user-space and
 1924          * kernel and spare an unnecessary translation.
 1925          */
 1926 
 1927         /* Kernel flags */
 1928         IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED  = 1ULL << 32,
 1929         IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS  = 1ULL << 33,
 1930 };
 1931 
 1932 struct ib_flow_spec_list {
 1933         struct ib_flow_spec_list        *next;
 1934         union ib_flow_spec              spec;
 1935 };
 1936 
 1937 struct ib_flow_action_attrs_esp {
 1938         struct ib_flow_action_attrs_esp_keymats         *keymat;
 1939         struct ib_flow_action_attrs_esp_replays         *replay;
 1940         struct ib_flow_spec_list                        *encap;
 1941         /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
 1942          * Value of 0 is a valid value.
 1943          */
 1944         u32                                             esn;
 1945         u32                                             spi;
 1946         u32                                             seq;
 1947         u32                                             tfc_pad;
 1948         /* Use enum ib_flow_action_attrs_esp_flags */
 1949         u64                                             flags;
 1950         u64                                             hard_limit_pkts;
 1951 };
 1952 
 1953 struct ib_flow_action {
 1954         struct ib_device                *device;
 1955         struct ib_uobject               *uobject;
 1956         enum ib_flow_action_type        type;
 1957         atomic_t                        usecnt;
 1958 };
 1959 
 1960 
 1961 struct ib_mad_hdr;
 1962 struct ib_grh;
 1963 
 1964 enum ib_process_mad_flags {
 1965         IB_MAD_IGNORE_MKEY      = 1,
 1966         IB_MAD_IGNORE_BKEY      = 2,
 1967         IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
 1968 };
 1969 
 1970 enum ib_mad_result {
 1971         IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
 1972         IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
 1973         IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
 1974         IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
 1975 };
 1976 
 1977 #define IB_DEVICE_NAME_MAX 64
 1978 
 1979 struct ib_cache {
 1980         rwlock_t                lock;
 1981         struct ib_event_handler event_handler;
 1982         struct ib_pkey_cache  **pkey_cache;
 1983         struct ib_gid_table   **gid_cache;
 1984         u8                     *lmc_cache;
 1985 };
 1986 
 1987 struct ib_dma_mapping_ops {
 1988         int             (*mapping_error)(struct ib_device *dev,
 1989                                          u64 dma_addr);
 1990         u64             (*map_single)(struct ib_device *dev,
 1991                                       void *ptr, size_t size,
 1992                                       enum dma_data_direction direction);
 1993         void            (*unmap_single)(struct ib_device *dev,
 1994                                         u64 addr, size_t size,
 1995                                         enum dma_data_direction direction);
 1996         u64             (*map_page)(struct ib_device *dev,
 1997                                     struct page *page, unsigned long offset,
 1998                                     size_t size,
 1999                                     enum dma_data_direction direction);
 2000         void            (*unmap_page)(struct ib_device *dev,
 2001                                       u64 addr, size_t size,
 2002                                       enum dma_data_direction direction);
 2003         int             (*map_sg)(struct ib_device *dev,
 2004                                   struct scatterlist *sg, int nents,
 2005                                   enum dma_data_direction direction);
 2006         void            (*unmap_sg)(struct ib_device *dev,
 2007                                     struct scatterlist *sg, int nents,
 2008                                     enum dma_data_direction direction);
 2009         int             (*map_sg_attrs)(struct ib_device *dev,
 2010                                         struct scatterlist *sg, int nents,
 2011                                         enum dma_data_direction direction,
 2012                                         struct dma_attrs *attrs);
 2013         void            (*unmap_sg_attrs)(struct ib_device *dev,
 2014                                           struct scatterlist *sg, int nents,
 2015                                           enum dma_data_direction direction,
 2016                                           struct dma_attrs *attrs);
 2017         void            (*sync_single_for_cpu)(struct ib_device *dev,
 2018                                                u64 dma_handle,
 2019                                                size_t size,
 2020                                                enum dma_data_direction dir);
 2021         void            (*sync_single_for_device)(struct ib_device *dev,
 2022                                                   u64 dma_handle,
 2023                                                   size_t size,
 2024                                                   enum dma_data_direction dir);
 2025         void            *(*alloc_coherent)(struct ib_device *dev,
 2026                                            size_t size,
 2027                                            u64 *dma_handle,
 2028                                            gfp_t flag);
 2029         void            (*free_coherent)(struct ib_device *dev,
 2030                                          size_t size, void *cpu_addr,
 2031                                          u64 dma_handle);
 2032 };
 2033 
 2034 struct iw_cm_verbs;
 2035 
 2036 struct ib_port_immutable {
 2037         int                           pkey_tbl_len;
 2038         int                           gid_tbl_len;
 2039         u32                           core_cap_flags;
 2040         u32                           max_mad_size;
 2041 };
 2042 
 2043 struct ib_counters {
 2044         struct ib_device        *device;
 2045         struct ib_uobject       *uobject;
 2046         /* num of objects attached */
 2047         atomic_t        usecnt;
 2048 };
 2049 
 2050 struct ib_counters_read_attr {
 2051         u64     *counters_buff;
 2052         u32     ncounters;
 2053         u32     flags; /* use enum ib_read_counters_flags */
 2054 };
 2055 
 2056 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
 2057         .size_##ib_struct =                                                    \
 2058                 (sizeof(struct drv_struct) +                                   \
 2059                  BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
 2060                  BUILD_BUG_ON_ZERO(                                            \
 2061                          !__same_type(((struct drv_struct *)NULL)->member,     \
 2062                                       struct ib_struct)))
 2063 
 2064 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                         \
 2065         ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
 2066 
 2067 #define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
 2068         rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
 2069 
 2070 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
 2071 
 2072 struct rdma_user_mmap_entry {
 2073         struct kref ref;
 2074         struct ib_ucontext *ucontext;
 2075         unsigned long start_pgoff;
 2076         size_t npages;
 2077         bool driver_removed;
 2078 };
 2079 
 2080 /* Return the offset (in bytes) the user should pass to libc's mmap() */
 2081 static inline u64
 2082 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
 2083 {
 2084         return (u64)entry->start_pgoff << PAGE_SHIFT;
 2085 }
 2086 
 2087 struct ib_device_ops {
 2088         enum rdma_driver_id driver_id;
 2089         DECLARE_RDMA_OBJ_SIZE(ib_ah);
 2090         DECLARE_RDMA_OBJ_SIZE(ib_cq);
 2091         DECLARE_RDMA_OBJ_SIZE(ib_pd);
 2092         DECLARE_RDMA_OBJ_SIZE(ib_srq);
 2093         DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
 2094 };
 2095 
 2096 #define INIT_IB_DEVICE_OPS(pop, driver, DRIVER) do {                    \
 2097         (pop)[0] .driver_id = RDMA_DRIVER_##DRIVER;                     \
 2098         (pop)[0] INIT_RDMA_OBJ_SIZE(ib_ah, driver##_ib_ah, ibah);       \
 2099         (pop)[0] INIT_RDMA_OBJ_SIZE(ib_cq, driver##_ib_cq, ibcq);       \
 2100         (pop)[0] INIT_RDMA_OBJ_SIZE(ib_pd, driver##_ib_pd, ibpd);       \
 2101         (pop)[0] INIT_RDMA_OBJ_SIZE(ib_srq, driver##_ib_srq, ibsrq);    \
 2102         (pop)[0] INIT_RDMA_OBJ_SIZE(ib_ucontext, driver##_ib_ucontext, ibucontext); \
 2103 } while (0)
 2104 
 2105 struct ib_device {
 2106         struct device                *dma_device;
 2107         struct ib_device_ops         ops;
 2108 
 2109         char                          name[IB_DEVICE_NAME_MAX];
 2110 
 2111         struct list_head              event_handler_list;
 2112         spinlock_t                    event_handler_lock;
 2113 
 2114         spinlock_t                    client_data_lock;
 2115         struct list_head              core_list;
 2116         /* Access to the client_data_list is protected by the client_data_lock
 2117          * spinlock and the lists_rwsem read-write semaphore */
 2118         struct list_head              client_data_list;
 2119 
 2120         struct ib_cache               cache;
 2121         /**
 2122          * port_immutable is indexed by port number
 2123          */
 2124         struct ib_port_immutable     *port_immutable;
 2125 
 2126         int                           num_comp_vectors;
 2127 
 2128         struct iw_cm_verbs           *iwcm;
 2129 
 2130         /**
 2131          * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
 2132          *   driver initialized data.  The struct is kfree()'ed by the sysfs
 2133          *   core when the device is removed.  A lifespan of -1 in the return
 2134          *   struct tells the core to set a default lifespan.
 2135          */
 2136         struct rdma_hw_stats      *(*alloc_hw_stats)(struct ib_device *device,
 2137                                                      u8 port_num);
 2138         /**
 2139          * get_hw_stats - Fill in the counter value(s) in the stats struct.
 2140          * @index - The index in the value array we wish to have updated, or
 2141          *   num_counters if we want all stats updated
 2142          * Return codes -
 2143          *   < 0 - Error, no counters updated
 2144          *   index - Updated the single counter pointed to by index
 2145          *   num_counters - Updated all counters (will reset the timestamp
 2146          *     and prevent further calls for lifespan milliseconds)
 2147          * Drivers are allowed to update all counters in leiu of just the
 2148          *   one given in index at their option
 2149          */
 2150         int                        (*get_hw_stats)(struct ib_device *device,
 2151                                                    struct rdma_hw_stats *stats,
 2152                                                    u8 port, int index);
 2153         int                        (*query_device)(struct ib_device *device,
 2154                                                    struct ib_device_attr *device_attr,
 2155                                                    struct ib_udata *udata);
 2156         int                        (*query_port)(struct ib_device *device,
 2157                                                  u8 port_num,
 2158                                                  struct ib_port_attr *port_attr);
 2159         enum rdma_link_layer       (*get_link_layer)(struct ib_device *device,
 2160                                                      u8 port_num);
 2161         /* When calling get_netdev, the HW vendor's driver should return the
 2162          * net device of device @device at port @port_num or NULL if such
 2163          * a net device doesn't exist. The vendor driver should call dev_hold
 2164          * on this net device. The HW vendor's device driver must guarantee
 2165          * that this function returns NULL before the net device reaches
 2166          * NETDEV_UNREGISTER_FINAL state.
 2167          */
 2168         struct ifnet              *(*get_netdev)(struct ib_device *device,
 2169                                                  u8 port_num);
 2170         int                        (*query_gid)(struct ib_device *device,
 2171                                                 u8 port_num, int index,
 2172                                                 union ib_gid *gid);
 2173         /* When calling add_gid, the HW vendor's driver should
 2174          * add the gid of device @device at gid index @index of
 2175          * port @port_num to be @gid. Meta-info of that gid (for example,
 2176          * the network device related to this gid is available
 2177          * at @attr. @context allows the HW vendor driver to store extra
 2178          * information together with a GID entry. The HW vendor may allocate
 2179          * memory to contain this information and store it in @context when a
 2180          * new GID entry is written to. Params are consistent until the next
 2181          * call of add_gid or delete_gid. The function should return 0 on
 2182          * success or error otherwise. The function could be called
 2183          * concurrently for different ports. This function is only called
 2184          * when roce_gid_table is used.
 2185          */
 2186         int                        (*add_gid)(struct ib_device *device,
 2187                                               u8 port_num,
 2188                                               unsigned int index,
 2189                                               const union ib_gid *gid,
 2190                                               const struct ib_gid_attr *attr,
 2191                                               void **context);
 2192         /* When calling del_gid, the HW vendor's driver should delete the
 2193          * gid of device @device at gid index @index of port @port_num.
 2194          * Upon the deletion of a GID entry, the HW vendor must free any
 2195          * allocated memory. The caller will clear @context afterwards.
 2196          * This function is only called when roce_gid_table is used.
 2197          */
 2198         int                        (*del_gid)(struct ib_device *device,
 2199                                               u8 port_num,
 2200                                               unsigned int index,
 2201                                               void **context);
 2202         int                        (*query_pkey)(struct ib_device *device,
 2203                                                  u8 port_num, u16 index, u16 *pkey);
 2204         int                        (*modify_device)(struct ib_device *device,
 2205                                                     int device_modify_mask,
 2206                                                     struct ib_device_modify *device_modify);
 2207         int                        (*modify_port)(struct ib_device *device,
 2208                                                   u8 port_num, int port_modify_mask,
 2209                                                   struct ib_port_modify *port_modify);
 2210         int                        (*alloc_ucontext)(struct ib_ucontext *uctx,
 2211                                                      struct ib_udata *udata);
 2212         void                       (*dealloc_ucontext)(struct ib_ucontext *context);
 2213         int                        (*mmap)(struct ib_ucontext *context,
 2214                                            struct vm_area_struct *vma);
 2215         int                        (*alloc_pd)(struct ib_pd *pd,
 2216                                                struct ib_udata *udata);
 2217         void                       (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
 2218         int                        (*create_ah)(struct ib_ah *ah, struct ib_ah_attr *ah_attr,
 2219                                                 u32 flags, struct ib_udata *udata);
 2220         int                        (*modify_ah)(struct ib_ah *ah,
 2221                                                 struct ib_ah_attr *ah_attr);
 2222         int                        (*query_ah)(struct ib_ah *ah,
 2223                                                struct ib_ah_attr *ah_attr);
 2224         void                       (*destroy_ah)(struct ib_ah *ah, u32 flags);
 2225         int                        (*create_srq)(struct ib_srq *srq,
 2226                                                  struct ib_srq_init_attr *srq_init_attr,
 2227                                                  struct ib_udata *udata);
 2228         int                        (*modify_srq)(struct ib_srq *srq,
 2229                                                  struct ib_srq_attr *srq_attr,
 2230                                                  enum ib_srq_attr_mask srq_attr_mask,
 2231                                                  struct ib_udata *udata);
 2232         int                        (*query_srq)(struct ib_srq *srq,
 2233                                                 struct ib_srq_attr *srq_attr);
 2234         void                       (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
 2235         int                        (*post_srq_recv)(struct ib_srq *srq,
 2236                                                     const struct ib_recv_wr *recv_wr,
 2237                                                     const struct ib_recv_wr **bad_recv_wr);
 2238         struct ib_qp *             (*create_qp)(struct ib_pd *pd,
 2239                                                 struct ib_qp_init_attr *qp_init_attr,
 2240                                                 struct ib_udata *udata);
 2241         int                        (*modify_qp)(struct ib_qp *qp,
 2242                                                 struct ib_qp_attr *qp_attr,
 2243                                                 int qp_attr_mask,
 2244                                                 struct ib_udata *udata);
 2245         int                        (*query_qp)(struct ib_qp *qp,
 2246                                                struct ib_qp_attr *qp_attr,
 2247                                                int qp_attr_mask,
 2248                                                struct ib_qp_init_attr *qp_init_attr);
 2249         int                        (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
 2250         int                        (*post_send)(struct ib_qp *qp,
 2251                                                 const struct ib_send_wr *send_wr,
 2252                                                 const struct ib_send_wr **bad_send_wr);
 2253         int                        (*post_recv)(struct ib_qp *qp,
 2254                                                 const struct ib_recv_wr *recv_wr,
 2255                                                 const struct ib_recv_wr **bad_recv_wr);
 2256         int                        (*create_cq)(struct ib_cq *,
 2257                                                 const struct ib_cq_init_attr *attr,
 2258                                                 struct ib_udata *udata);
 2259         int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
 2260                                                 u16 cq_period);
 2261         void                       (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
 2262         int                        (*resize_cq)(struct ib_cq *cq, int cqe,
 2263                                                 struct ib_udata *udata);
 2264         int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
 2265                                               struct ib_wc *wc);
 2266         int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
 2267         int                        (*req_notify_cq)(struct ib_cq *cq,
 2268                                                     enum ib_cq_notify_flags flags);
 2269         int                        (*req_ncomp_notif)(struct ib_cq *cq,
 2270                                                       int wc_cnt);
 2271         struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
 2272                                                  int mr_access_flags);
 2273         struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
 2274                                                   u64 start, u64 length,
 2275                                                   u64 virt_addr,
 2276                                                   int mr_access_flags,
 2277                                                   struct ib_udata *udata);
 2278         int                        (*rereg_user_mr)(struct ib_mr *mr,
 2279                                                     int flags,
 2280                                                     u64 start, u64 length,
 2281                                                     u64 virt_addr,
 2282                                                     int mr_access_flags,
 2283                                                     struct ib_pd *pd,
 2284                                                     struct ib_udata *udata);
 2285         int                        (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
 2286         struct ib_mr *             (*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
 2287                                                u32 max_num_sg, struct ib_udata *udata);
 2288         int                        (*advise_mr)(struct ib_pd *pd,
 2289                                                 enum ib_uverbs_advise_mr_advice advice, u32 flags,
 2290                                                 const struct ib_sge *sg_list, u32 num_sge,
 2291                                                 struct uverbs_attr_bundle *attrs);
 2292         int                        (*map_mr_sg)(struct ib_mr *mr,
 2293                                                 struct scatterlist *sg,
 2294                                                 int sg_nents,
 2295                                                 unsigned int *sg_offset);
 2296         struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
 2297                                                enum ib_mw_type type,
 2298                                                struct ib_udata *udata);
 2299         int                        (*dealloc_mw)(struct ib_mw *mw);
 2300         struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
 2301                                                 int mr_access_flags,
 2302                                                 struct ib_fmr_attr *fmr_attr);
 2303         int                        (*map_phys_fmr)(struct ib_fmr *fmr,
 2304                                                    u64 *page_list, int list_len,
 2305                                                    u64 iova);
 2306         int                        (*unmap_fmr)(struct list_head *fmr_list);
 2307         int                        (*dealloc_fmr)(struct ib_fmr *fmr);
 2308         int                        (*attach_mcast)(struct ib_qp *qp,
 2309                                                    union ib_gid *gid,
 2310                                                    u16 lid);
 2311         int                        (*detach_mcast)(struct ib_qp *qp,
 2312                                                    union ib_gid *gid,
 2313                                                    u16 lid);
 2314         int                        (*process_mad)(struct ib_device *device,
 2315                                                   int process_mad_flags,
 2316                                                   u8 port_num,
 2317                                                   const struct ib_wc *in_wc,
 2318                                                   const struct ib_grh *in_grh,
 2319                                                   const struct ib_mad_hdr *in_mad,
 2320                                                   size_t in_mad_size,
 2321                                                   struct ib_mad_hdr *out_mad,
 2322                                                   size_t *out_mad_size,
 2323                                                   u16 *out_mad_pkey_index);
 2324         struct ib_xrcd *           (*alloc_xrcd)(struct ib_device *device,
 2325                                                  struct ib_udata *udata);
 2326         int                        (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
 2327         struct ib_flow *           (*create_flow)(struct ib_qp *qp,
 2328                                                   struct ib_flow_attr
 2329                                                   *flow_attr,
 2330                                                   int domain, struct ib_udata *udata);
 2331         int                        (*destroy_flow)(struct ib_flow *flow_id);
 2332         struct ib_flow_action *(*create_flow_action_esp)(
 2333                 struct ib_device *device,
 2334                 const struct ib_flow_action_attrs_esp *attr,
 2335                 struct uverbs_attr_bundle *attrs);
 2336         int (*destroy_flow_action)(struct ib_flow_action *action);
 2337         int (*modify_flow_action_esp)(
 2338                 struct ib_flow_action *action,
 2339                 const struct ib_flow_action_attrs_esp *attr,
 2340                 struct uverbs_attr_bundle *attrs);
 2341         int                        (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
 2342                                                       struct ib_mr_status *mr_status);
 2343         /**
 2344          * This will be called once refcount of an entry in mmap_xa reaches
 2345          * zero. The type of the memory that was mapped may differ between
 2346          * entries and is opaque to the rdma_user_mmap interface.
 2347          * Therefore needs to be implemented by the driver in mmap_free.
 2348          */
 2349         void                       (*mmap_free)(struct rdma_user_mmap_entry *entry);
 2350         void                       (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
 2351         void                       (*drain_rq)(struct ib_qp *qp);
 2352         void                       (*drain_sq)(struct ib_qp *qp);
 2353         int                        (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
 2354                                                         int state);
 2355         int                        (*get_vf_config)(struct ib_device *device, int vf, u8 port,
 2356                                                    struct ifla_vf_info *ivf);
 2357         int                        (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
 2358                                                    struct ifla_vf_stats *stats);
 2359         int                        (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
 2360                                                   int type);
 2361         struct ib_wq *             (*create_wq)(struct ib_pd *pd,
 2362                                                 struct ib_wq_init_attr *init_attr,
 2363                                                 struct ib_udata *udata);
 2364         void                       (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
 2365         int                        (*modify_wq)(struct ib_wq *wq,
 2366                                                 struct ib_wq_attr *attr,
 2367                                                 u32 wq_attr_mask,
 2368                                                 struct ib_udata *udata);
 2369         struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
 2370                                                            struct ib_rwq_ind_table_init_attr *init_attr,
 2371                                                            struct ib_udata *udata);
 2372         int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
 2373         struct ib_dm *(*alloc_dm)(struct ib_device *device,
 2374                                   struct ib_ucontext *context,
 2375                                   struct ib_dm_alloc_attr *attr,
 2376                                   struct uverbs_attr_bundle *attrs);
 2377         int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
 2378         struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
 2379                                    struct ib_dm_mr_attr *attr,
 2380                                    struct uverbs_attr_bundle *attrs);
 2381         struct ib_counters *(*create_counters)(
 2382                 struct ib_device *device, struct uverbs_attr_bundle *attrs);
 2383         int (*destroy_counters)(struct ib_counters *counters);
 2384         int (*read_counters)(struct ib_counters *counters,
 2385                              struct ib_counters_read_attr *counters_read_attr,
 2386                              struct uverbs_attr_bundle *attrs);
 2387         struct ib_dma_mapping_ops   *dma_ops;
 2388 
 2389         struct module               *owner;
 2390         struct device                dev;
 2391         struct kobject               *ports_parent;
 2392         struct list_head             port_list;
 2393 
 2394         enum {
 2395                 IB_DEV_UNINITIALIZED,
 2396                 IB_DEV_REGISTERED,
 2397                 IB_DEV_UNREGISTERED
 2398         }                            reg_state;
 2399 
 2400         int                          uverbs_abi_ver;
 2401         u64                          uverbs_cmd_mask;
 2402         u64                          uverbs_ex_cmd_mask;
 2403 
 2404         char                         node_desc[IB_DEVICE_NODE_DESC_MAX];
 2405         __be64                       node_guid;
 2406         u32                          local_dma_lkey;
 2407         u16                          is_switch:1;
 2408         u8                           node_type;
 2409         u8                           phys_port_cnt;
 2410         struct ib_device_attr        attrs;
 2411         struct attribute_group       *hw_stats_ag;
 2412         struct rdma_hw_stats         *hw_stats;
 2413 
 2414         const struct uapi_definition   *driver_def;
 2415 
 2416         /**
 2417          * The following mandatory functions are used only at device
 2418          * registration.  Keep functions such as these at the end of this
 2419          * structure to avoid cache line misses when accessing struct ib_device
 2420          * in fast paths.
 2421          */
 2422         int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
 2423         void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len);
 2424 };
 2425 
 2426 struct ib_client {
 2427         char  *name;
 2428         void (*add)   (struct ib_device *);
 2429         void (*remove)(struct ib_device *, void *client_data);
 2430 
 2431         /* Returns the net_dev belonging to this ib_client and matching the
 2432          * given parameters.
 2433          * @dev:         An RDMA device that the net_dev use for communication.
 2434          * @port:        A physical port number on the RDMA device.
 2435          * @pkey:        P_Key that the net_dev uses if applicable.
 2436          * @gid:         A GID that the net_dev uses to communicate.
 2437          * @addr:        An IP address the net_dev is configured with.
 2438          * @client_data: The device's client data set by ib_set_client_data().
 2439          *
 2440          * An ib_client that implements a net_dev on top of RDMA devices
 2441          * (such as IP over IB) should implement this callback, allowing the
 2442          * rdma_cm module to find the right net_dev for a given request.
 2443          *
 2444          * The caller is responsible for calling dev_put on the returned
 2445          * netdev. */
 2446         struct ifnet *(*get_net_dev_by_params)(
 2447                         struct ib_device *dev,
 2448                         u8 port,
 2449                         u16 pkey,
 2450                         const union ib_gid *gid,
 2451                         const struct sockaddr *addr,
 2452                         void *client_data);
 2453         struct list_head list;
 2454 };
 2455 
 2456 struct ib_device *ib_alloc_device(size_t size);
 2457 void ib_dealloc_device(struct ib_device *device);
 2458 
 2459 void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len);
 2460 
 2461 int ib_register_device(struct ib_device *device,
 2462                        int (*port_callback)(struct ib_device *,
 2463                                             u8, struct kobject *));
 2464 void ib_unregister_device(struct ib_device *device);
 2465 
 2466 int ib_register_client   (struct ib_client *client);
 2467 void ib_unregister_client(struct ib_client *client);
 2468 
 2469 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
 2470 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
 2471                          void *data);
 2472 
 2473 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
 2474                       unsigned long pfn, unsigned long size, pgprot_t prot,
 2475                       struct rdma_user_mmap_entry *entry);
 2476 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
 2477                                 struct rdma_user_mmap_entry *entry,
 2478                                 size_t length);
 2479 int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
 2480                                       struct rdma_user_mmap_entry *entry,
 2481                                       size_t length, u32 min_pgoff,
 2482                                       u32 max_pgoff);
 2483 
 2484 struct rdma_user_mmap_entry *
 2485 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
 2486                                unsigned long pgoff);
 2487 struct rdma_user_mmap_entry *
 2488 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
 2489                          struct vm_area_struct *vma);
 2490 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
 2491 
 2492 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
 2493 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
 2494 {
 2495         return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
 2496 }
 2497 
 2498 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
 2499 {
 2500         return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
 2501 }
 2502 
 2503 static inline bool ib_is_buffer_cleared(const void __user *p,
 2504                                         size_t len)
 2505 {
 2506         bool ret;
 2507         u8 *buf;
 2508 
 2509         if (len > USHRT_MAX)
 2510                 return false;
 2511 
 2512         buf = memdup_user(p, len);
 2513         if (IS_ERR(buf))
 2514                 return false;
 2515 
 2516         ret = !memchr_inv(buf, 0, len);
 2517         kfree(buf);
 2518         return ret;
 2519 }
 2520 
 2521 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
 2522                                        size_t offset,
 2523                                        size_t len)
 2524 {
 2525         return ib_is_buffer_cleared(udata->inbuf + offset, len);
 2526 }
 2527 
 2528 /**
 2529  * ib_is_destroy_retryable - Check whether the uobject destruction
 2530  * is retryable.
 2531  * @ret: The initial destruction return code
 2532  * @why: remove reason
 2533  * @uobj: The uobject that is destroyed
 2534  *
 2535  * This function is a helper function that IB layer and low-level drivers
 2536  * can use to consider whether the destruction of the given uobject is
 2537  * retry-able.
 2538  * It checks the original return code, if it wasn't success the destruction
 2539  * is retryable according to the ucontext state (i.e. cleanup_retryable) and
 2540  * the remove reason. (i.e. why).
 2541  * Must be called with the object locked for destroy.
 2542  */
 2543 static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
 2544                                            struct ib_uobject *uobj)
 2545 {
 2546         return ret && (why == RDMA_REMOVE_DESTROY ||
 2547                        uobj->context->cleanup_retryable);
 2548 }
 2549 
 2550 /**
 2551  * ib_destroy_usecnt - Called during destruction to check the usecnt
 2552  * @usecnt: The usecnt atomic
 2553  * @why: remove reason
 2554  * @uobj: The uobject that is destroyed
 2555  *
 2556  * Non-zero usecnts will block destruction unless destruction was triggered by
 2557  * a ucontext cleanup.
 2558  */
 2559 static inline int ib_destroy_usecnt(atomic_t *usecnt,
 2560                                     enum rdma_remove_reason why,
 2561                                     struct ib_uobject *uobj)
 2562 {
 2563         if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
 2564                 return -EBUSY;
 2565         return 0;
 2566 }
 2567 
 2568 /**
 2569  * ib_modify_qp_is_ok - Check that the supplied attribute mask
 2570  * contains all required attributes and no attributes not allowed for
 2571  * the given QP state transition.
 2572  * @cur_state: Current QP state
 2573  * @next_state: Next QP state
 2574  * @type: QP type
 2575  * @mask: Mask of supplied QP attributes
 2576  *
 2577  * This function is a helper function that a low-level driver's
 2578  * modify_qp method can use to validate the consumer's input.  It
 2579  * checks that cur_state and next_state are valid QP states, that a
 2580  * transition from cur_state to next_state is allowed by the IB spec,
 2581  * and that the attribute mask supplied is allowed for the transition.
 2582  */
 2583 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
 2584                         enum ib_qp_type type, enum ib_qp_attr_mask mask);
 2585 
 2586 int ib_register_event_handler  (struct ib_event_handler *event_handler);
 2587 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
 2588 void ib_dispatch_event(struct ib_event *event);
 2589 
 2590 int ib_query_port(struct ib_device *device,
 2591                   u8 port_num, struct ib_port_attr *port_attr);
 2592 
 2593 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
 2594                                                u8 port_num);
 2595 
 2596 /**
 2597  * rdma_cap_ib_switch - Check if the device is IB switch
 2598  * @device: Device to check
 2599  *
 2600  * Device driver is responsible for setting is_switch bit on
 2601  * in ib_device structure at init time.
 2602  *
 2603  * Return: true if the device is IB switch.
 2604  */
 2605 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
 2606 {
 2607         return device->is_switch;
 2608 }
 2609 
 2610 /**
 2611  * rdma_start_port - Return the first valid port number for the device
 2612  * specified
 2613  *
 2614  * @device: Device to be checked
 2615  *
 2616  * Return start port number
 2617  */
 2618 static inline u8 rdma_start_port(const struct ib_device *device)
 2619 {
 2620         return rdma_cap_ib_switch(device) ? 0 : 1;
 2621 }
 2622 
 2623 /**
 2624  * rdma_end_port - Return the last valid port number for the device
 2625  * specified
 2626  *
 2627  * @device: Device to be checked
 2628  *
 2629  * Return last port number
 2630  */
 2631 static inline u8 rdma_end_port(const struct ib_device *device)
 2632 {
 2633         return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
 2634 }
 2635 
 2636 static inline int rdma_is_port_valid(const struct ib_device *device,
 2637                                      unsigned int port)
 2638 {
 2639         return (port >= rdma_start_port(device) &&
 2640                 port <= rdma_end_port(device));
 2641 }
 2642 
 2643 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
 2644 {
 2645         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
 2646 }
 2647 
 2648 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
 2649 {
 2650         return device->port_immutable[port_num].core_cap_flags &
 2651                 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
 2652 }
 2653 
 2654 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
 2655 {
 2656         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
 2657 }
 2658 
 2659 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
 2660 {
 2661         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
 2662 }
 2663 
 2664 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
 2665 {
 2666         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
 2667 }
 2668 
 2669 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
 2670 {
 2671         return rdma_protocol_ib(device, port_num) ||
 2672                 rdma_protocol_roce(device, port_num);
 2673 }
 2674 
 2675 /**
 2676  * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
 2677  * Management Datagrams.
 2678  * @device: Device to check
 2679  * @port_num: Port number to check
 2680  *
 2681  * Management Datagrams (MAD) are a required part of the InfiniBand
 2682  * specification and are supported on all InfiniBand devices.  A slightly
 2683  * extended version are also supported on OPA interfaces.
 2684  *
 2685  * Return: true if the port supports sending/receiving of MAD packets.
 2686  */
 2687 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
 2688 {
 2689         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
 2690 }
 2691 
 2692 /**
 2693  * rdma_cap_opa_mad - Check if the port of device provides support for OPA
 2694  * Management Datagrams.
 2695  * @device: Device to check
 2696  * @port_num: Port number to check
 2697  *
 2698  * Intel OmniPath devices extend and/or replace the InfiniBand Management
 2699  * datagrams with their own versions.  These OPA MADs share many but not all of
 2700  * the characteristics of InfiniBand MADs.
 2701  *
 2702  * OPA MADs differ in the following ways:
 2703  *
 2704  *    1) MADs are variable size up to 2K
 2705  *       IBTA defined MADs remain fixed at 256 bytes
 2706  *    2) OPA SMPs must carry valid PKeys
 2707  *    3) OPA SMP packets are a different format
 2708  *
 2709  * Return: true if the port supports OPA MAD packet formats.
 2710  */
 2711 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
 2712 {
 2713         return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
 2714                 == RDMA_CORE_CAP_OPA_MAD;
 2715 }
 2716 
 2717 /**
 2718  * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
 2719  * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
 2720  * @device: Device to check
 2721  * @port_num: Port number to check
 2722  *
 2723  * Each InfiniBand node is required to provide a Subnet Management Agent
 2724  * that the subnet manager can access.  Prior to the fabric being fully
 2725  * configured by the subnet manager, the SMA is accessed via a well known
 2726  * interface called the Subnet Management Interface (SMI).  This interface
 2727  * uses directed route packets to communicate with the SM to get around the
 2728  * chicken and egg problem of the SM needing to know what's on the fabric
 2729  * in order to configure the fabric, and needing to configure the fabric in
 2730  * order to send packets to the devices on the fabric.  These directed
 2731  * route packets do not need the fabric fully configured in order to reach
 2732  * their destination.  The SMI is the only method allowed to send
 2733  * directed route packets on an InfiniBand fabric.
 2734  *
 2735  * Return: true if the port provides an SMI.
 2736  */
 2737 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
 2738 {
 2739         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
 2740 }
 2741 
 2742 /**
 2743  * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
 2744  * Communication Manager.
 2745  * @device: Device to check
 2746  * @port_num: Port number to check
 2747  *
 2748  * The InfiniBand Communication Manager is one of many pre-defined General
 2749  * Service Agents (GSA) that are accessed via the General Service
 2750  * Interface (GSI).  It's role is to facilitate establishment of connections
 2751  * between nodes as well as other management related tasks for established
 2752  * connections.
 2753  *
 2754  * Return: true if the port supports an IB CM (this does not guarantee that
 2755  * a CM is actually running however).
 2756  */
 2757 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
 2758 {
 2759         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
 2760 }
 2761 
 2762 /**
 2763  * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
 2764  * Communication Manager.
 2765  * @device: Device to check
 2766  * @port_num: Port number to check
 2767  *
 2768  * Similar to above, but specific to iWARP connections which have a different
 2769  * managment protocol than InfiniBand.
 2770  *
 2771  * Return: true if the port supports an iWARP CM (this does not guarantee that
 2772  * a CM is actually running however).
 2773  */
 2774 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
 2775 {
 2776         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
 2777 }
 2778 
 2779 /**
 2780  * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
 2781  * Subnet Administration.
 2782  * @device: Device to check
 2783  * @port_num: Port number to check
 2784  *
 2785  * An InfiniBand Subnet Administration (SA) service is a pre-defined General
 2786  * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
 2787  * fabrics, devices should resolve routes to other hosts by contacting the
 2788  * SA to query the proper route.
 2789  *
 2790  * Return: true if the port should act as a client to the fabric Subnet
 2791  * Administration interface.  This does not imply that the SA service is
 2792  * running locally.
 2793  */
 2794 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
 2795 {
 2796         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
 2797 }
 2798 
 2799 /**
 2800  * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
 2801  * Multicast.
 2802  * @device: Device to check
 2803  * @port_num: Port number to check
 2804  *
 2805  * InfiniBand multicast registration is more complex than normal IPv4 or
 2806  * IPv6 multicast registration.  Each Host Channel Adapter must register
 2807  * with the Subnet Manager when it wishes to join a multicast group.  It
 2808  * should do so only once regardless of how many queue pairs it subscribes
 2809  * to this group.  And it should leave the group only after all queue pairs
 2810  * attached to the group have been detached.
 2811  *
 2812  * Return: true if the port must undertake the additional adminstrative
 2813  * overhead of registering/unregistering with the SM and tracking of the
 2814  * total number of queue pairs attached to the multicast group.
 2815  */
 2816 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
 2817 {
 2818         return rdma_cap_ib_sa(device, port_num);
 2819 }
 2820 
 2821 /**
 2822  * rdma_cap_af_ib - Check if the port of device has the capability
 2823  * Native Infiniband Address.
 2824  * @device: Device to check
 2825  * @port_num: Port number to check
 2826  *
 2827  * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
 2828  * GID.  RoCE uses a different mechanism, but still generates a GID via
 2829  * a prescribed mechanism and port specific data.
 2830  *
 2831  * Return: true if the port uses a GID address to identify devices on the
 2832  * network.
 2833  */
 2834 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
 2835 {
 2836         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
 2837 }
 2838 
 2839 /**
 2840  * rdma_cap_eth_ah - Check if the port of device has the capability
 2841  * Ethernet Address Handle.
 2842  * @device: Device to check
 2843  * @port_num: Port number to check
 2844  *
 2845  * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
 2846  * to fabricate GIDs over Ethernet/IP specific addresses native to the
 2847  * port.  Normally, packet headers are generated by the sending host
 2848  * adapter, but when sending connectionless datagrams, we must manually
 2849  * inject the proper headers for the fabric we are communicating over.
 2850  *
 2851  * Return: true if we are running as a RoCE port and must force the
 2852  * addition of a Global Route Header built from our Ethernet Address
 2853  * Handle into our header list for connectionless packets.
 2854  */
 2855 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
 2856 {
 2857         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
 2858 }
 2859 
 2860 /**
 2861  * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
 2862  *
 2863  * @device: Device
 2864  * @port_num: Port number
 2865  *
 2866  * This MAD size includes the MAD headers and MAD payload.  No other headers
 2867  * are included.
 2868  *
 2869  * Return the max MAD size required by the Port.  Will return 0 if the port
 2870  * does not support MADs
 2871  */
 2872 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
 2873 {
 2874         return device->port_immutable[port_num].max_mad_size;
 2875 }
 2876 
 2877 /**
 2878  * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
 2879  * @device: Device to check
 2880  * @port_num: Port number to check
 2881  *
 2882  * RoCE GID table mechanism manages the various GIDs for a device.
 2883  *
 2884  * NOTE: if allocating the port's GID table has failed, this call will still
 2885  * return true, but any RoCE GID table API will fail.
 2886  *
 2887  * Return: true if the port uses RoCE GID table mechanism in order to manage
 2888  * its GIDs.
 2889  */
 2890 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
 2891                                            u8 port_num)
 2892 {
 2893         return rdma_protocol_roce(device, port_num) &&
 2894                 device->add_gid && device->del_gid;
 2895 }
 2896 
 2897 /*
 2898  * Check if the device supports READ W/ INVALIDATE.
 2899  */
 2900 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
 2901 {
 2902         /*
 2903          * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
 2904          * has support for it yet.
 2905          */
 2906         return rdma_protocol_iwarp(dev, port_num);
 2907 }
 2908 
 2909 int ib_query_gid(struct ib_device *device,
 2910                  u8 port_num, int index, union ib_gid *gid,
 2911                  struct ib_gid_attr *attr);
 2912 
 2913 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
 2914                          int state);
 2915 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
 2916                      struct ifla_vf_info *info);
 2917 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
 2918                     struct ifla_vf_stats *stats);
 2919 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
 2920                    int type);
 2921 
 2922 int ib_query_pkey(struct ib_device *device,
 2923                   u8 port_num, u16 index, u16 *pkey);
 2924 
 2925 int ib_modify_device(struct ib_device *device,
 2926                      int device_modify_mask,
 2927                      struct ib_device_modify *device_modify);
 2928 
 2929 int ib_modify_port(struct ib_device *device,
 2930                    u8 port_num, int port_modify_mask,
 2931                    struct ib_port_modify *port_modify);
 2932 
 2933 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
 2934                 enum ib_gid_type gid_type, struct ifnet *ndev,
 2935                 u8 *port_num, u16 *index);
 2936 
 2937 int ib_find_pkey(struct ib_device *device,
 2938                  u8 port_num, u16 pkey, u16 *index);
 2939 
 2940 enum ib_pd_flags {
 2941         /*
 2942          * Create a memory registration for all memory in the system and place
 2943          * the rkey for it into pd->unsafe_global_rkey.  This can be used by
 2944          * ULPs to avoid the overhead of dynamic MRs.
 2945          *
 2946          * This flag is generally considered unsafe and must only be used in
 2947          * extremly trusted environments.  Every use of it will log a warning
 2948          * in the kernel log.
 2949          */
 2950         IB_PD_UNSAFE_GLOBAL_RKEY        = 0x01,
 2951 };
 2952 
 2953 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
 2954                 const char *caller);
 2955 #define ib_alloc_pd(device, flags) \
 2956         __ib_alloc_pd((device), (flags), __func__)
 2957 
 2958 /**
 2959  * ib_dealloc_pd_user - Deallocate kernel/user PD
 2960  * @pd: The protection domain
 2961  * @udata: Valid user data or NULL for kernel objects
 2962  */
 2963 void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
 2964 
 2965 /**
 2966  * ib_dealloc_pd - Deallocate kernel PD
 2967  * @pd: The protection domain
 2968  *
 2969  * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
 2970  */
 2971 static inline void ib_dealloc_pd(struct ib_pd *pd)
 2972 {
 2973         ib_dealloc_pd_user(pd, NULL);
 2974 }
 2975 
 2976 enum rdma_create_ah_flags {
 2977         /* In a sleepable context */
 2978         RDMA_CREATE_AH_SLEEPABLE = BIT(0),
 2979 };
 2980 
 2981 /**
 2982  * ib_create_ah - Creates an address handle for the given address vector.
 2983  * @pd: The protection domain associated with the address handle.
 2984  * @ah_attr: The attributes of the address vector.
 2985  * @flags: Create address handle flags (see enum rdma_create_ah_flags).
 2986  *
 2987  * The address handle is used to reference a local or global destination
 2988  * in all UD QP post sends.
 2989  */
 2990 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
 2991                            u32 flags);
 2992 
 2993 /**
 2994  * ib_create_user_ah - Creates an address handle for the given address vector.
 2995  * It resolves destination mac address for ah attribute of RoCE type.
 2996  * @pd: The protection domain associated with the address handle.
 2997  * @ah_attr: The attributes of the address vector.
 2998  * @udata: pointer to user's input output buffer information need by
 2999  *         provider driver.
 3000  *
 3001  * It returns 0 on success and returns appropriate error code on error.
 3002  * The address handle is used to reference a local or global destination
 3003  * in all UD QP post sends.
 3004  */
 3005 struct ib_ah *ib_create_user_ah(struct ib_pd *pd,
 3006                                 struct ib_ah_attr *ah_attr,
 3007                                 struct ib_udata *udata);
 3008 
 3009 /**
 3010  * ib_init_ah_from_wc - Initializes address handle attributes from a
 3011  *   work completion.
 3012  * @device: Device on which the received message arrived.
 3013  * @port_num: Port on which the received message arrived.
 3014  * @wc: Work completion associated with the received message.
 3015  * @grh: References the received global route header.  This parameter is
 3016  *   ignored unless the work completion indicates that the GRH is valid.
 3017  * @ah_attr: Returned attributes that can be used when creating an address
 3018  *   handle for replying to the message.
 3019  */
 3020 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
 3021                        const struct ib_wc *wc, const struct ib_grh *grh,
 3022                        struct ib_ah_attr *ah_attr);
 3023 
 3024 /**
 3025  * ib_create_ah_from_wc - Creates an address handle associated with the
 3026  *   sender of the specified work completion.
 3027  * @pd: The protection domain associated with the address handle.
 3028  * @wc: Work completion information associated with a received message.
 3029  * @grh: References the received global route header.  This parameter is
 3030  *   ignored unless the work completion indicates that the GRH is valid.
 3031  * @port_num: The outbound port number to associate with the address.
 3032  *
 3033  * The address handle is used to reference a local or global destination
 3034  * in all UD QP post sends.
 3035  */
 3036 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
 3037                                    const struct ib_grh *grh, u8 port_num);
 3038 
 3039 /**
 3040  * ib_modify_ah - Modifies the address vector associated with an address
 3041  *   handle.
 3042  * @ah: The address handle to modify.
 3043  * @ah_attr: The new address vector attributes to associate with the
 3044  *   address handle.
 3045  */
 3046 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
 3047 
 3048 /**
 3049  * ib_query_ah - Queries the address vector associated with an address
 3050  *   handle.
 3051  * @ah: The address handle to query.
 3052  * @ah_attr: The address vector attributes associated with the address
 3053  *   handle.
 3054  */
 3055 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
 3056 
 3057 enum rdma_destroy_ah_flags {
 3058         /* In a sleepable context */
 3059         RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
 3060 };
 3061 
 3062 /**
 3063  * ib_destroy_ah_user - Destroys an address handle.
 3064  * @ah: The address handle to destroy.
 3065  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
 3066  * @udata: Valid user data or NULL for kernel objects
 3067  */
 3068 int ib_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
 3069 
 3070 /**
 3071  * rdma_destroy_ah - Destroys an kernel address handle.
 3072  * @ah: The address handle to destroy.
 3073  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
 3074  *
 3075  * NOTE: for user ah use ib_destroy_ah_user with valid udata!
 3076  */
 3077 static inline int ib_destroy_ah(struct ib_ah *ah, u32 flags)
 3078 {
 3079         return ib_destroy_ah_user(ah, flags, NULL);
 3080 }
 3081 
 3082 /**
 3083  * ib_create_srq - Creates a SRQ associated with the specified protection
 3084  *   domain.
 3085  * @pd: The protection domain associated with the SRQ.
 3086  * @srq_init_attr: A list of initial attributes required to create the
 3087  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
 3088  *   the actual capabilities of the created SRQ.
 3089  *
 3090  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
 3091  * requested size of the SRQ, and set to the actual values allocated
 3092  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
 3093  * will always be at least as large as the requested values.
 3094  */
 3095 struct ib_srq *ib_create_srq(struct ib_pd *pd,
 3096                              struct ib_srq_init_attr *srq_init_attr);
 3097 
 3098 /**
 3099  * ib_modify_srq - Modifies the attributes for the specified SRQ.
 3100  * @srq: The SRQ to modify.
 3101  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
 3102  *   the current values of selected SRQ attributes are returned.
 3103  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
 3104  *   are being modified.
 3105  *
 3106  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
 3107  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
 3108  * the number of receives queued drops below the limit.
 3109  */
 3110 int ib_modify_srq(struct ib_srq *srq,
 3111                   struct ib_srq_attr *srq_attr,
 3112                   enum ib_srq_attr_mask srq_attr_mask);
 3113 
 3114 /**
 3115  * ib_query_srq - Returns the attribute list and current values for the
 3116  *   specified SRQ.
 3117  * @srq: The SRQ to query.
 3118  * @srq_attr: The attributes of the specified SRQ.
 3119  */
 3120 int ib_query_srq(struct ib_srq *srq,
 3121                  struct ib_srq_attr *srq_attr);
 3122 
 3123 /**
 3124  * ib_destroy_srq_user - Destroys the specified SRQ.
 3125  * @srq: The SRQ to destroy.
 3126  * @udata: Valid user data or NULL for kernel objects
 3127  */
 3128 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
 3129 
 3130 /**
 3131  * ib_destroy_srq - Destroys the specified kernel SRQ.
 3132  * @srq: The SRQ to destroy.
 3133  *
 3134  * NOTE: for user srq use ib_destroy_srq_user with valid udata!
 3135  */
 3136 static inline int ib_destroy_srq(struct ib_srq *srq)
 3137 {
 3138         return ib_destroy_srq_user(srq, NULL);
 3139 }
 3140 
 3141 /**
 3142  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
 3143  * @srq: The SRQ to post the work request on.
 3144  * @recv_wr: A list of work requests to post on the receive queue.
 3145  * @bad_recv_wr: On an immediate failure, this parameter will reference
 3146  *   the work request that failed to be posted on the QP.
 3147  */
 3148 static inline int ib_post_srq_recv(struct ib_srq *srq,
 3149                                    const struct ib_recv_wr *recv_wr,
 3150                                    const struct ib_recv_wr **bad_recv_wr)
 3151 {
 3152         return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
 3153 }
 3154 
 3155 /**
 3156  * ib_create_qp - Creates a QP associated with the specified protection
 3157  *   domain.
 3158  * @pd: The protection domain associated with the QP.
 3159  * @qp_init_attr: A list of initial attributes required to create the
 3160  *   QP.  If QP creation succeeds, then the attributes are updated to
 3161  *   the actual capabilities of the created QP.
 3162  */
 3163 struct ib_qp *ib_create_qp(struct ib_pd *pd,
 3164                            struct ib_qp_init_attr *qp_init_attr);
 3165 
 3166 /**
 3167  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
 3168  * @qp: The QP to modify.
 3169  * @attr: On input, specifies the QP attributes to modify.  On output,
 3170  *   the current values of selected QP attributes are returned.
 3171  * @attr_mask: A bit-mask used to specify which attributes of the QP
 3172  *   are being modified.
 3173  * @udata: pointer to user's input output buffer information
 3174  *   are being modified.
 3175  * It returns 0 on success and returns appropriate error code on error.
 3176  */
 3177 int ib_modify_qp_with_udata(struct ib_qp *qp,
 3178                             struct ib_qp_attr *attr,
 3179                             int attr_mask,
 3180                             struct ib_udata *udata);
 3181 
 3182 /**
 3183  * ib_modify_qp - Modifies the attributes for the specified QP and then
 3184  *   transitions the QP to the given state.
 3185  * @qp: The QP to modify.
 3186  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
 3187  *   the current values of selected QP attributes are returned.
 3188  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
 3189  *   are being modified.
 3190  */
 3191 int ib_modify_qp(struct ib_qp *qp,
 3192                  struct ib_qp_attr *qp_attr,
 3193                  int qp_attr_mask);
 3194 
 3195 /**
 3196  * ib_query_qp - Returns the attribute list and current values for the
 3197  *   specified QP.
 3198  * @qp: The QP to query.
 3199  * @qp_attr: The attributes of the specified QP.
 3200  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
 3201  * @qp_init_attr: Additional attributes of the selected QP.
 3202  *
 3203  * The qp_attr_mask may be used to limit the query to gathering only the
 3204  * selected attributes.
 3205  */
 3206 int ib_query_qp(struct ib_qp *qp,
 3207                 struct ib_qp_attr *qp_attr,
 3208                 int qp_attr_mask,
 3209                 struct ib_qp_init_attr *qp_init_attr);
 3210 
 3211 /**
 3212  * ib_destroy_qp - Destroys the specified QP.
 3213  * @qp: The QP to destroy.
 3214  * @udata: Valid udata or NULL for kernel objects
 3215  */
 3216 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
 3217 
 3218 /**
 3219  * ib_destroy_qp - Destroys the specified kernel QP.
 3220  * @qp: The QP to destroy.
 3221  *
 3222  * NOTE: for user qp use ib_destroy_qp_user with valid udata!
 3223  */
 3224 static inline int ib_destroy_qp(struct ib_qp *qp)
 3225 {
 3226         return ib_destroy_qp_user(qp, NULL);
 3227 }
 3228 
 3229 /**
 3230  * ib_open_qp - Obtain a reference to an existing sharable QP.
 3231  * @xrcd - XRC domain
 3232  * @qp_open_attr: Attributes identifying the QP to open.
 3233  *
 3234  * Returns a reference to a sharable QP.
 3235  */
 3236 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
 3237                          struct ib_qp_open_attr *qp_open_attr);
 3238 
 3239 /**
 3240  * ib_close_qp - Release an external reference to a QP.
 3241  * @qp: The QP handle to release
 3242  *
 3243  * The opened QP handle is released by the caller.  The underlying
 3244  * shared QP is not destroyed until all internal references are released.
 3245  */
 3246 int ib_close_qp(struct ib_qp *qp);
 3247 
 3248 /**
 3249  * ib_post_send - Posts a list of work requests to the send queue of
 3250  *   the specified QP.
 3251  * @qp: The QP to post the work request on.
 3252  * @send_wr: A list of work requests to post on the send queue.
 3253  * @bad_send_wr: On an immediate failure, this parameter will reference
 3254  *   the work request that failed to be posted on the QP.
 3255  *
 3256  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
 3257  * error is returned, the QP state shall not be affected,
 3258  * ib_post_send() will return an immediate error after queueing any
 3259  * earlier work requests in the list.
 3260  */
 3261 static inline int ib_post_send(struct ib_qp *qp,
 3262                                const struct ib_send_wr *send_wr,
 3263                                const struct ib_send_wr **bad_send_wr)
 3264 {
 3265         return qp->device->post_send(qp, send_wr, bad_send_wr);
 3266 }
 3267 
 3268 /**
 3269  * ib_post_recv - Posts a list of work requests to the receive queue of
 3270  *   the specified QP.
 3271  * @qp: The QP to post the work request on.
 3272  * @recv_wr: A list of work requests to post on the receive queue.
 3273  * @bad_recv_wr: On an immediate failure, this parameter will reference
 3274  *   the work request that failed to be posted on the QP.
 3275  */
 3276 static inline int ib_post_recv(struct ib_qp *qp,
 3277                                const struct ib_recv_wr *recv_wr,
 3278                                const struct ib_recv_wr **bad_recv_wr)
 3279 {
 3280         return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
 3281 }
 3282 
 3283 struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
 3284                                  int nr_cqe, int comp_vector,
 3285                                  enum ib_poll_context poll_ctx,
 3286                                  const char *caller, struct ib_udata *udata);
 3287 
 3288 /**
 3289  * ib_alloc_cq_user: Allocate kernel/user CQ
 3290  * @dev: The IB device
 3291  * @private: Private data attached to the CQE
 3292  * @nr_cqe: Number of CQEs in the CQ
 3293  * @comp_vector: Completion vector used for the IRQs
 3294  * @poll_ctx: Context used for polling the CQ
 3295  * @udata: Valid user data or NULL for kernel objects
 3296  */
 3297 static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
 3298                                              void *private, int nr_cqe,
 3299                                              int comp_vector,
 3300                                              enum ib_poll_context poll_ctx,
 3301                                              struct ib_udata *udata)
 3302 {
 3303         return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
 3304                                   "ibcore", udata);
 3305 }
 3306 
 3307 /**
 3308  * ib_alloc_cq: Allocate kernel CQ
 3309  * @dev: The IB device
 3310  * @private: Private data attached to the CQE
 3311  * @nr_cqe: Number of CQEs in the CQ
 3312  * @comp_vector: Completion vector used for the IRQs
 3313  * @poll_ctx: Context used for polling the CQ
 3314  *
 3315  * NOTE: for user cq use ib_alloc_cq_user with valid udata!
 3316  */
 3317 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
 3318                                         int nr_cqe, int comp_vector,
 3319                                         enum ib_poll_context poll_ctx)
 3320 {
 3321         return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
 3322                                 NULL);
 3323 }
 3324 
 3325 /**
 3326  * ib_free_cq_user - Free kernel/user CQ
 3327  * @cq: The CQ to free
 3328  * @udata: Valid user data or NULL for kernel objects
 3329  */
 3330 void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
 3331 
 3332 /**
 3333  * ib_free_cq - Free kernel CQ
 3334  * @cq: The CQ to free
 3335  *
 3336  * NOTE: for user cq use ib_free_cq_user with valid udata!
 3337  */
 3338 static inline void ib_free_cq(struct ib_cq *cq)
 3339 {
 3340         ib_free_cq_user(cq, NULL);
 3341 }
 3342 
 3343 /**
 3344  * ib_create_cq - Creates a CQ on the specified device.
 3345  * @device: The device on which to create the CQ.
 3346  * @comp_handler: A user-specified callback that is invoked when a
 3347  *   completion event occurs on the CQ.
 3348  * @event_handler: A user-specified callback that is invoked when an
 3349  *   asynchronous event not associated with a completion occurs on the CQ.
 3350  * @cq_context: Context associated with the CQ returned to the user via
 3351  *   the associated completion and event handlers.
 3352  * @cq_attr: The attributes the CQ should be created upon.
 3353  *
 3354  * Users can examine the cq structure to determine the actual CQ size.
 3355  */
 3356 struct ib_cq *__ib_create_cq(struct ib_device *device,
 3357                              ib_comp_handler comp_handler,
 3358                              void (*event_handler)(struct ib_event *, void *),
 3359                              void *cq_context,
 3360                              const struct ib_cq_init_attr *cq_attr,
 3361                              const char *caller);
 3362 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
 3363         __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), "ibcore")
 3364 
 3365 /**
 3366  * ib_resize_cq - Modifies the capacity of the CQ.
 3367  * @cq: The CQ to resize.
 3368  * @cqe: The minimum size of the CQ.
 3369  *
 3370  * Users can examine the cq structure to determine the actual CQ size.
 3371  */
 3372 int ib_resize_cq(struct ib_cq *cq, int cqe);
 3373 
 3374 /**
 3375  * ib_modify_cq - Modifies moderation params of the CQ
 3376  * @cq: The CQ to modify.
 3377  * @cq_count: number of CQEs that will trigger an event
 3378  * @cq_period: max period of time in usec before triggering an event
 3379  *
 3380  */
 3381 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
 3382 
 3383 /**
 3384  * ib_destroy_cq_user - Destroys the specified CQ.
 3385  * @cq: The CQ to destroy.
 3386  * @udata: Valid user data or NULL for kernel objects
 3387  */
 3388 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
 3389 
 3390 /**
 3391  * ib_destroy_cq - Destroys the specified kernel CQ.
 3392  * @cq: The CQ to destroy.
 3393  *
 3394  * NOTE: for user cq use ib_destroy_cq_user with valid udata!
 3395  */
 3396 static inline void ib_destroy_cq(struct ib_cq *cq)
 3397 {
 3398         ib_destroy_cq_user(cq, NULL);
 3399 }
 3400 
 3401 /**
 3402  * ib_poll_cq - poll a CQ for completion(s)
 3403  * @cq:the CQ being polled
 3404  * @num_entries:maximum number of completions to return
 3405  * @wc:array of at least @num_entries &struct ib_wc where completions
 3406  *   will be returned
 3407  *
 3408  * Poll a CQ for (possibly multiple) completions.  If the return value
 3409  * is < 0, an error occurred.  If the return value is >= 0, it is the
 3410  * number of completions returned.  If the return value is
 3411  * non-negative and < num_entries, then the CQ was emptied.
 3412  */
 3413 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
 3414                              struct ib_wc *wc)
 3415 {
 3416         return cq->device->poll_cq(cq, num_entries, wc);
 3417 }
 3418 
 3419 /**
 3420  * ib_peek_cq - Returns the number of unreaped completions currently
 3421  *   on the specified CQ.
 3422  * @cq: The CQ to peek.
 3423  * @wc_cnt: A minimum number of unreaped completions to check for.
 3424  *
 3425  * If the number of unreaped completions is greater than or equal to wc_cnt,
 3426  * this function returns wc_cnt, otherwise, it returns the actual number of
 3427  * unreaped completions.
 3428  */
 3429 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
 3430 
 3431 /**
 3432  * ib_req_notify_cq - Request completion notification on a CQ.
 3433  * @cq: The CQ to generate an event for.
 3434  * @flags:
 3435  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
 3436  *   to request an event on the next solicited event or next work
 3437  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
 3438  *   may also be |ed in to request a hint about missed events, as
 3439  *   described below.
 3440  *
 3441  * Return Value:
 3442  *    < 0 means an error occurred while requesting notification
 3443  *   == 0 means notification was requested successfully, and if
 3444  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
 3445  *        were missed and it is safe to wait for another event.  In
 3446  *        this case is it guaranteed that any work completions added
 3447  *        to the CQ since the last CQ poll will trigger a completion
 3448  *        notification event.
 3449  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
 3450  *        in.  It means that the consumer must poll the CQ again to
 3451  *        make sure it is empty to avoid missing an event because of a
 3452  *        race between requesting notification and an entry being
 3453  *        added to the CQ.  This return value means it is possible
 3454  *        (but not guaranteed) that a work completion has been added
 3455  *        to the CQ since the last poll without triggering a
 3456  *        completion notification event.
 3457  */
 3458 static inline int ib_req_notify_cq(struct ib_cq *cq,
 3459                                    enum ib_cq_notify_flags flags)
 3460 {
 3461         return cq->device->req_notify_cq(cq, flags);
 3462 }
 3463 
 3464 /**
 3465  * ib_req_ncomp_notif - Request completion notification when there are
 3466  *   at least the specified number of unreaped completions on the CQ.
 3467  * @cq: The CQ to generate an event for.
 3468  * @wc_cnt: The number of unreaped completions that should be on the
 3469  *   CQ before an event is generated.
 3470  */
 3471 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
 3472 {
 3473         return cq->device->req_ncomp_notif ?
 3474                 cq->device->req_ncomp_notif(cq, wc_cnt) :
 3475                 -ENOSYS;
 3476 }
 3477 
 3478 /**
 3479  * ib_dma_mapping_error - check a DMA addr for error
 3480  * @dev: The device for which the dma_addr was created
 3481  * @dma_addr: The DMA address to check
 3482  */
 3483 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
 3484 {
 3485         if (dev->dma_ops)
 3486                 return dev->dma_ops->mapping_error(dev, dma_addr);
 3487         return dma_mapping_error(dev->dma_device, dma_addr);
 3488 }
 3489 
 3490 /**
 3491  * ib_dma_map_single - Map a kernel virtual address to DMA address
 3492  * @dev: The device for which the dma_addr is to be created
 3493  * @cpu_addr: The kernel virtual address
 3494  * @size: The size of the region in bytes
 3495  * @direction: The direction of the DMA
 3496  */
 3497 static inline u64 ib_dma_map_single(struct ib_device *dev,
 3498                                     void *cpu_addr, size_t size,
 3499                                     enum dma_data_direction direction)
 3500 {
 3501         if (dev->dma_ops)
 3502                 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
 3503         return dma_map_single(dev->dma_device, cpu_addr, size, direction);
 3504 }
 3505 
 3506 /**
 3507  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
 3508  * @dev: The device for which the DMA address was created
 3509  * @addr: The DMA address
 3510  * @size: The size of the region in bytes
 3511  * @direction: The direction of the DMA
 3512  */
 3513 static inline void ib_dma_unmap_single(struct ib_device *dev,
 3514                                        u64 addr, size_t size,
 3515                                        enum dma_data_direction direction)
 3516 {
 3517         if (dev->dma_ops)
 3518                 dev->dma_ops->unmap_single(dev, addr, size, direction);
 3519         else
 3520                 dma_unmap_single(dev->dma_device, addr, size, direction);
 3521 }
 3522 
 3523 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
 3524                                           void *cpu_addr, size_t size,
 3525                                           enum dma_data_direction direction,
 3526                                           struct dma_attrs *dma_attrs)
 3527 {
 3528         return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
 3529                                     direction, dma_attrs);
 3530 }
 3531 
 3532 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
 3533                                              u64 addr, size_t size,
 3534                                              enum dma_data_direction direction,
 3535                                              struct dma_attrs *dma_attrs)
 3536 {
 3537         return dma_unmap_single_attrs(dev->dma_device, addr, size,
 3538                                       direction, dma_attrs);
 3539 }
 3540 
 3541 /**
 3542  * ib_dma_map_page - Map a physical page to DMA address
 3543  * @dev: The device for which the dma_addr is to be created
 3544  * @page: The page to be mapped
 3545  * @offset: The offset within the page
 3546  * @size: The size of the region in bytes
 3547  * @direction: The direction of the DMA
 3548  */
 3549 static inline u64 ib_dma_map_page(struct ib_device *dev,
 3550                                   struct page *page,
 3551                                   unsigned long offset,
 3552                                   size_t size,
 3553                                          enum dma_data_direction direction)
 3554 {
 3555         if (dev->dma_ops)
 3556                 return dev->dma_ops->map_page(dev, page, offset, size, direction);
 3557         return dma_map_page(dev->dma_device, page, offset, size, direction);
 3558 }
 3559 
 3560 /**
 3561  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
 3562  * @dev: The device for which the DMA address was created
 3563  * @addr: The DMA address
 3564  * @size: The size of the region in bytes
 3565  * @direction: The direction of the DMA
 3566  */
 3567 static inline void ib_dma_unmap_page(struct ib_device *dev,
 3568                                      u64 addr, size_t size,
 3569                                      enum dma_data_direction direction)
 3570 {
 3571         if (dev->dma_ops)
 3572                 dev->dma_ops->unmap_page(dev, addr, size, direction);
 3573         else
 3574                 dma_unmap_page(dev->dma_device, addr, size, direction);
 3575 }
 3576 
 3577 /**
 3578  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
 3579  * @dev: The device for which the DMA addresses are to be created
 3580  * @sg: The array of scatter/gather entries
 3581  * @nents: The number of scatter/gather entries
 3582  * @direction: The direction of the DMA
 3583  */
 3584 static inline int ib_dma_map_sg(struct ib_device *dev,
 3585                                 struct scatterlist *sg, int nents,
 3586                                 enum dma_data_direction direction)
 3587 {
 3588         if (dev->dma_ops)
 3589                 return dev->dma_ops->map_sg(dev, sg, nents, direction);
 3590         return dma_map_sg(dev->dma_device, sg, nents, direction);
 3591 }
 3592 
 3593 /**
 3594  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
 3595  * @dev: The device for which the DMA addresses were created
 3596  * @sg: The array of scatter/gather entries
 3597  * @nents: The number of scatter/gather entries
 3598  * @direction: The direction of the DMA
 3599  */
 3600 static inline void ib_dma_unmap_sg(struct ib_device *dev,
 3601                                    struct scatterlist *sg, int nents,
 3602                                    enum dma_data_direction direction)
 3603 {
 3604         if (dev->dma_ops)
 3605                 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
 3606         else
 3607                 dma_unmap_sg(dev->dma_device, sg, nents, direction);
 3608 }
 3609 
 3610 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
 3611                                       struct scatterlist *sg, int nents,
 3612                                       enum dma_data_direction direction,
 3613                                       struct dma_attrs *dma_attrs)
 3614 {
 3615         if (dev->dma_ops)
 3616                 return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
 3617                                                   dma_attrs);
 3618         else
 3619                 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
 3620                                         dma_attrs);
 3621 }
 3622 
 3623 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
 3624                                          struct scatterlist *sg, int nents,
 3625                                          enum dma_data_direction direction,
 3626                                          struct dma_attrs *dma_attrs)
 3627 {
 3628         if (dev->dma_ops)
 3629                 return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
 3630                                                   dma_attrs);
 3631         else
 3632                 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
 3633                                    dma_attrs);
 3634 }
 3635 /**
 3636  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
 3637  * @dev: The device for which the DMA addresses were created
 3638  * @sg: The scatter/gather entry
 3639  *
 3640  * Note: this function is obsolete. To do: change all occurrences of
 3641  * ib_sg_dma_address() into sg_dma_address().
 3642  */
 3643 static inline u64 ib_sg_dma_address(struct ib_device *dev,
 3644                                     struct scatterlist *sg)
 3645 {
 3646         return sg_dma_address(sg);
 3647 }
 3648 
 3649 /**
 3650  * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
 3651  * @dev: The device for which the DMA addresses were created
 3652  * @sg: The scatter/gather entry
 3653  *
 3654  * Note: this function is obsolete. To do: change all occurrences of
 3655  * ib_sg_dma_len() into sg_dma_len().
 3656  */
 3657 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
 3658                                          struct scatterlist *sg)
 3659 {
 3660         return sg_dma_len(sg);
 3661 }
 3662 
 3663 /**
 3664  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
 3665  * @dev: The device for which the DMA address was created
 3666  * @addr: The DMA address
 3667  * @size: The size of the region in bytes
 3668  * @dir: The direction of the DMA
 3669  */
 3670 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
 3671                                               u64 addr,
 3672                                               size_t size,
 3673                                               enum dma_data_direction dir)
 3674 {
 3675         if (dev->dma_ops)
 3676                 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
 3677         else
 3678                 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
 3679 }
 3680 
 3681 /**
 3682  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
 3683  * @dev: The device for which the DMA address was created
 3684  * @addr: The DMA address
 3685  * @size: The size of the region in bytes
 3686  * @dir: The direction of the DMA
 3687  */
 3688 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
 3689                                                  u64 addr,
 3690                                                  size_t size,
 3691                                                  enum dma_data_direction dir)
 3692 {
 3693         if (dev->dma_ops)
 3694                 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
 3695         else
 3696                 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
 3697 }
 3698 
 3699 /**
 3700  * ib_dma_alloc_coherent - Allocate memory and map it for DMA
 3701  * @dev: The device for which the DMA address is requested
 3702  * @size: The size of the region to allocate in bytes
 3703  * @dma_handle: A pointer for returning the DMA address of the region
 3704  * @flag: memory allocator flags
 3705  */
 3706 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
 3707                                            size_t size,
 3708                                            u64 *dma_handle,
 3709                                            gfp_t flag)
 3710 {
 3711         if (dev->dma_ops)
 3712                 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
 3713         else {
 3714                 dma_addr_t handle;
 3715                 void *ret;
 3716 
 3717                 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
 3718                 *dma_handle = handle;
 3719                 return ret;
 3720         }
 3721 }
 3722 
 3723 /**
 3724  * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
 3725  * @dev: The device for which the DMA addresses were allocated
 3726  * @size: The size of the region
 3727  * @cpu_addr: the address returned by ib_dma_alloc_coherent()
 3728  * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
 3729  */
 3730 static inline void ib_dma_free_coherent(struct ib_device *dev,
 3731                                         size_t size, void *cpu_addr,
 3732                                         u64 dma_handle)
 3733 {
 3734         if (dev->dma_ops)
 3735                 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
 3736         else
 3737                 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
 3738 }
 3739 
 3740 /**
 3741  * ib_dereg_mr - Deregisters a memory region and removes it from the
 3742  *   HCA translation table.
 3743  * @mr: The memory region to deregister.
 3744  *
 3745  * This function can fail, if the memory region has memory windows bound to it.
 3746  */
 3747 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
 3748 
 3749 /**
 3750  * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
 3751  *   HCA translation table.
 3752  * @mr: The memory region to deregister.
 3753  *
 3754  * This function can fail, if the memory region has memory windows bound to it.
 3755  *
 3756  * NOTE: for user mr use ib_dereg_mr_user with valid udata!
 3757  */
 3758 static inline int ib_dereg_mr(struct ib_mr *mr)
 3759 {
 3760         return ib_dereg_mr_user(mr, NULL);
 3761 }
 3762 
 3763 struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
 3764                                u32 max_num_sg, struct ib_udata *udata);
 3765 
 3766 static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
 3767                                         enum ib_mr_type mr_type, u32 max_num_sg)
 3768 {
 3769         return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
 3770 }
 3771 
 3772 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
 3773                                     u32 max_num_data_sg,
 3774                                     u32 max_num_meta_sg);
 3775 
 3776 /**
 3777  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
 3778  *   R_Key and L_Key.
 3779  * @mr - struct ib_mr pointer to be updated.
 3780  * @newkey - new key to be used.
 3781  */
 3782 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
 3783 {
 3784         mr->lkey = (mr->lkey & 0xffffff00) | newkey;
 3785         mr->rkey = (mr->rkey & 0xffffff00) | newkey;
 3786 }
 3787 
 3788 /**
 3789  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
 3790  * for calculating a new rkey for type 2 memory windows.
 3791  * @rkey - the rkey to increment.
 3792  */
 3793 static inline u32 ib_inc_rkey(u32 rkey)
 3794 {
 3795         const u32 mask = 0x000000ff;
 3796         return ((rkey + 1) & mask) | (rkey & ~mask);
 3797 }
 3798 
 3799 /**
 3800  * ib_alloc_fmr - Allocates a unmapped fast memory region.
 3801  * @pd: The protection domain associated with the unmapped region.
 3802  * @mr_access_flags: Specifies the memory access rights.
 3803  * @fmr_attr: Attributes of the unmapped region.
 3804  *
 3805  * A fast memory region must be mapped before it can be used as part of
 3806  * a work request.
 3807  */
 3808 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
 3809                             int mr_access_flags,
 3810                             struct ib_fmr_attr *fmr_attr);
 3811 
 3812 /**
 3813  * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
 3814  * @fmr: The fast memory region to associate with the pages.
 3815  * @page_list: An array of physical pages to map to the fast memory region.
 3816  * @list_len: The number of pages in page_list.
 3817  * @iova: The I/O virtual address to use with the mapped region.
 3818  */
 3819 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
 3820                                   u64 *page_list, int list_len,
 3821                                   u64 iova)
 3822 {
 3823         return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
 3824 }
 3825 
 3826 /**
 3827  * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
 3828  * @fmr_list: A linked list of fast memory regions to unmap.
 3829  */
 3830 int ib_unmap_fmr(struct list_head *fmr_list);
 3831 
 3832 /**
 3833  * ib_dealloc_fmr - Deallocates a fast memory region.
 3834  * @fmr: The fast memory region to deallocate.
 3835  */
 3836 int ib_dealloc_fmr(struct ib_fmr *fmr);
 3837 
 3838 /**
 3839  * ib_attach_mcast - Attaches the specified QP to a multicast group.
 3840  * @qp: QP to attach to the multicast group.  The QP must be type
 3841  *   IB_QPT_UD.
 3842  * @gid: Multicast group GID.
 3843  * @lid: Multicast group LID in host byte order.
 3844  *
 3845  * In order to send and receive multicast packets, subnet
 3846  * administration must have created the multicast group and configured
 3847  * the fabric appropriately.  The port associated with the specified
 3848  * QP must also be a member of the multicast group.
 3849  */
 3850 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
 3851 
 3852 /**
 3853  * ib_detach_mcast - Detaches the specified QP from a multicast group.
 3854  * @qp: QP to detach from the multicast group.
 3855  * @gid: Multicast group GID.
 3856  * @lid: Multicast group LID in host byte order.
 3857  */
 3858 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
 3859 
 3860 /**
 3861  * ib_alloc_xrcd - Allocates an XRC domain.
 3862  * @device: The device on which to allocate the XRC domain.
 3863  * @caller: Module name for kernel consumers
 3864  */
 3865 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
 3866 #define ib_alloc_xrcd(device) \
 3867         __ib_alloc_xrcd((device), "ibcore")
 3868 
 3869 /**
 3870  * ib_dealloc_xrcd - Deallocates an XRC domain.
 3871  * @xrcd: The XRC domain to deallocate.
 3872  * @udata: Valid user data or NULL for kernel object
 3873  */
 3874 int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
 3875 
 3876 static inline int ib_check_mr_access(int flags)
 3877 {
 3878         /*
 3879          * Local write permission is required if remote write or
 3880          * remote atomic permission is also requested.
 3881          */
 3882         if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
 3883             !(flags & IB_ACCESS_LOCAL_WRITE))
 3884                 return -EINVAL;
 3885 
 3886         if (flags & ~IB_ACCESS_SUPPORTED)
 3887                 return -EINVAL;
 3888 
 3889         return 0;
 3890 }
 3891 
 3892 static inline bool ib_access_writable(int access_flags)
 3893 {
 3894         /*
 3895          * We have writable memory backing the MR if any of the following
 3896          * access flags are set.  "Local write" and "remote write" obviously
 3897          * require write access.  "Remote atomic" can do things like fetch and
 3898          * add, which will modify memory, and "MW bind" can change permissions
 3899          * by binding a window.
 3900          */
 3901         return access_flags &
 3902                 (IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
 3903                  IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
 3904 }
 3905 
 3906 /**
 3907  * ib_check_mr_status: lightweight check of MR status.
 3908  *     This routine may provide status checks on a selected
 3909  *     ib_mr. first use is for signature status check.
 3910  *
 3911  * @mr: A memory region.
 3912  * @check_mask: Bitmask of which checks to perform from
 3913  *     ib_mr_status_check enumeration.
 3914  * @mr_status: The container of relevant status checks.
 3915  *     failed checks will be indicated in the status bitmask
 3916  *     and the relevant info shall be in the error item.
 3917  */
 3918 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
 3919                        struct ib_mr_status *mr_status);
 3920 
 3921 struct ifnet *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
 3922                                             u16 pkey, const union ib_gid *gid,
 3923                                             const struct sockaddr *addr);
 3924 struct ib_wq *ib_create_wq(struct ib_pd *pd,
 3925                            struct ib_wq_init_attr *init_attr);
 3926 int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
 3927 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
 3928                  u32 wq_attr_mask);
 3929 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
 3930                                                  struct ib_rwq_ind_table_init_attr*
 3931                                                  wq_ind_table_init_attr);
 3932 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
 3933 
 3934 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
 3935                  unsigned int *sg_offset, unsigned int page_size);
 3936 
 3937 static inline int
 3938 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
 3939                   unsigned int *sg_offset, unsigned int page_size)
 3940 {
 3941         int n;
 3942 
 3943         n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
 3944         mr->iova = 0;
 3945 
 3946         return n;
 3947 }
 3948 
 3949 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
 3950                 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
 3951 
 3952 void ib_drain_rq(struct ib_qp *qp);
 3953 void ib_drain_sq(struct ib_qp *qp);
 3954 void ib_drain_qp(struct ib_qp *qp);
 3955 
 3956 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
 3957 
 3958 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
 3959 
 3960 int ib_resolve_eth_dmac(struct ib_device *device,
 3961                         struct ib_ah_attr *ah_attr);
 3962 #endif /* IB_VERBS_H */

Cache object: 5da3481abf5337e53a6a6036664682dd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.