The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ofed/include/rdma/rdmavt_qp.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #ifndef DEF_RDMAVT_INCQP_H
    2 #define DEF_RDMAVT_INCQP_H
    3 
    4 /*-
    5  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
    6  *
    7  * Copyright(c) 2016 Intel Corporation.
    8  *
    9  * This file is provided under a dual BSD/GPLv2 license.  When using or
   10  * redistributing this file, you may do so under either license.
   11  *
   12  * GPL LICENSE SUMMARY
   13  *
   14  * This program is free software; you can redistribute it and/or modify
   15  * it under the terms of version 2 of the GNU General Public License as
   16  * published by the Free Software Foundation.
   17  *
   18  * This program is distributed in the hope that it will be useful, but
   19  * WITHOUT ANY WARRANTY; without even the implied warranty of
   20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   21  * General Public License for more details.
   22  *
   23  * BSD LICENSE
   24  *
   25  * Redistribution and use in source and binary forms, with or without
   26  * modification, are permitted provided that the following conditions
   27  * are met:
   28  *
   29  *  - Redistributions of source code must retain the above copyright
   30  *    notice, this list of conditions and the following disclaimer.
   31  *  - Redistributions in binary form must reproduce the above copyright
   32  *    notice, this list of conditions and the following disclaimer in
   33  *    the documentation and/or other materials provided with the
   34  *    distribution.
   35  *  - Neither the name of Intel Corporation nor the names of its
   36  *    contributors may be used to endorse or promote products derived
   37  *    from this software without specific prior written permission.
   38  *
   39  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   40  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   41  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
   42  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
   43  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   44  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   45  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   46  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   47  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   48  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   49  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   50  *
   51  * $FreeBSD$
   52  */
   53 
   54 #include <rdma/rdma_vt.h>
   55 #include <rdma/ib_pack.h>
   56 #include <rdma/ib_verbs.h>
   57 /*
   58  * Atomic bit definitions for r_aflags.
   59  */
   60 #define RVT_R_WRID_VALID        0
   61 #define RVT_R_REWIND_SGE        1
   62 
   63 /*
   64  * Bit definitions for r_flags.
   65  */
   66 #define RVT_R_REUSE_SGE 0x01
   67 #define RVT_R_RDMAR_SEQ 0x02
   68 #define RVT_R_RSP_NAK   0x04
   69 #define RVT_R_RSP_SEND  0x08
   70 #define RVT_R_COMM_EST  0x10
   71 
   72 /*
   73  * Bit definitions for s_flags.
   74  *
   75  * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
   76  * RVT_S_BUSY - send tasklet is processing the QP
   77  * RVT_S_TIMER - the RC retry timer is active
   78  * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
   79  * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
   80  *                         before processing the next SWQE
   81  * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
   82  *                         before processing the next SWQE
   83  * RVT_S_WAIT_RNR - waiting for RNR timeout
   84  * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
   85  * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
   86  *                  next send completion entry not via send DMA
   87  * RVT_S_WAIT_PIO - waiting for a send buffer to be available
   88  * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
   89  * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
   90  * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
   91  * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
   92  * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
   93  * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
   94  * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
   95  * RVT_S_ECN - a BECN was queued to the send engine
   96  */
   97 #define RVT_S_SIGNAL_REQ_WR     0x0001
   98 #define RVT_S_BUSY              0x0002
   99 #define RVT_S_TIMER             0x0004
  100 #define RVT_S_RESP_PENDING      0x0008
  101 #define RVT_S_ACK_PENDING       0x0010
  102 #define RVT_S_WAIT_FENCE        0x0020
  103 #define RVT_S_WAIT_RDMAR        0x0040
  104 #define RVT_S_WAIT_RNR          0x0080
  105 #define RVT_S_WAIT_SSN_CREDIT   0x0100
  106 #define RVT_S_WAIT_DMA          0x0200
  107 #define RVT_S_WAIT_PIO          0x0400
  108 #define RVT_S_WAIT_PIO_DRAIN    0x0800
  109 #define RVT_S_WAIT_TX           0x1000
  110 #define RVT_S_WAIT_DMA_DESC     0x2000
  111 #define RVT_S_WAIT_KMEM         0x4000
  112 #define RVT_S_WAIT_PSN          0x8000
  113 #define RVT_S_WAIT_ACK          0x10000
  114 #define RVT_S_SEND_ONE          0x20000
  115 #define RVT_S_UNLIMITED_CREDIT  0x40000
  116 #define RVT_S_AHG_VALID         0x80000
  117 #define RVT_S_AHG_CLEAR         0x100000
  118 #define RVT_S_ECN               0x200000
  119 
  120 /*
  121  * Wait flags that would prevent any packet type from being sent.
  122  */
  123 #define RVT_S_ANY_WAIT_IO \
  124         (RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN | RVT_S_WAIT_TX | \
  125          RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
  126 
  127 /*
  128  * Wait flags that would prevent send work requests from making progress.
  129  */
  130 #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
  131         RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
  132         RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
  133 
  134 #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
  135 
  136 /* Number of bits to pay attention to in the opcode for checking qp type */
  137 #define RVT_OPCODE_QP_MASK 0xE0
  138 
  139 /* Flags for checking QP state (see ib_rvt_state_ops[]) */
  140 #define RVT_POST_SEND_OK                0x01
  141 #define RVT_POST_RECV_OK                0x02
  142 #define RVT_PROCESS_RECV_OK             0x04
  143 #define RVT_PROCESS_SEND_OK             0x08
  144 #define RVT_PROCESS_NEXT_SEND_OK        0x10
  145 #define RVT_FLUSH_SEND                  0x20
  146 #define RVT_FLUSH_RECV                  0x40
  147 #define RVT_PROCESS_OR_FLUSH_SEND \
  148         (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
  149 
  150 /*
  151  * Internal send flags
  152  */
  153 #define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
  154 #define RVT_SEND_COMPLETION_ONLY        (IB_SEND_RESERVED_START << 1)
  155 
  156 /*
  157  * Send work request queue entry.
  158  * The size of the sg_list is determined when the QP is created and stored
  159  * in qp->s_max_sge.
  160  */
  161 struct rvt_swqe {
  162         union {
  163                 struct ib_send_wr wr;   /* don't use wr.sg_list */
  164                 struct ib_ud_wr ud_wr;
  165                 struct ib_reg_wr reg_wr;
  166                 struct ib_rdma_wr rdma_wr;
  167                 struct ib_atomic_wr atomic_wr;
  168         };
  169         u32 psn;                /* first packet sequence number */
  170         u32 lpsn;               /* last packet sequence number */
  171         u32 ssn;                /* send sequence number */
  172         u32 length;             /* total length of data in sg_list */
  173         struct rvt_sge sg_list[0];
  174 };
  175 
  176 /*
  177  * Receive work request queue entry.
  178  * The size of the sg_list is determined when the QP (or SRQ) is created
  179  * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
  180  */
  181 struct rvt_rwqe {
  182         u64 wr_id;
  183         u8 num_sge;
  184         struct ib_sge sg_list[0];
  185 };
  186 
  187 /*
  188  * This structure is used to contain the head pointer, tail pointer,
  189  * and receive work queue entries as a single memory allocation so
  190  * it can be mmap'ed into user space.
  191  * Note that the wq array elements are variable size so you can't
  192  * just index into the array to get the N'th element;
  193  * use get_rwqe_ptr() instead.
  194  */
  195 struct rvt_rwq {
  196         u32 head;               /* new work requests posted to the head */
  197         u32 tail;               /* receives pull requests from here. */
  198         struct rvt_rwqe wq[0];
  199 };
  200 
  201 struct rvt_rq {
  202         struct rvt_rwq *wq;
  203         u32 size;               /* size of RWQE array */
  204         u8 max_sge;
  205         /* protect changes in this struct */
  206         spinlock_t lock ____cacheline_aligned_in_smp;
  207 };
  208 
  209 /*
  210  * This structure is used by rvt_mmap() to validate an offset
  211  * when an mmap() request is made.  The vm_area_struct then uses
  212  * this as its vm_private_data.
  213  */
  214 struct rvt_mmap_info {
  215         struct list_head pending_mmaps;
  216         struct ib_ucontext *context;
  217         void *obj;
  218         __u64 offset;
  219         struct kref ref;
  220         unsigned size;
  221 };
  222 
  223 /*
  224  * This structure holds the information that the send tasklet needs
  225  * to send a RDMA read response or atomic operation.
  226  */
  227 struct rvt_ack_entry {
  228         struct rvt_sge rdma_sge;
  229         u64 atomic_data;
  230         u32 psn;
  231         u32 lpsn;
  232         u8 opcode;
  233         u8 sent;
  234 };
  235 
  236 #define RC_QP_SCALING_INTERVAL  5
  237 
  238 #define RVT_OPERATION_PRIV        0x00000001
  239 #define RVT_OPERATION_ATOMIC      0x00000002
  240 #define RVT_OPERATION_ATOMIC_SGE  0x00000004
  241 #define RVT_OPERATION_LOCAL       0x00000008
  242 #define RVT_OPERATION_USE_RESERVE 0x00000010
  243 
  244 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
  245 
  246 /**
  247  * rvt_operation_params - op table entry
  248  * @length - the length to copy into the swqe entry
  249  * @qpt_support - a bit mask indicating QP type support
  250  * @flags - RVT_OPERATION flags (see above)
  251  *
  252  * This supports table driven post send so that
  253  * the driver can have differing an potentially
  254  * different sets of operations.
  255  *
  256  **/
  257 
  258 struct rvt_operation_params {
  259         size_t length;
  260         u32 qpt_support;
  261         u32 flags;
  262 };
  263 
  264 /*
  265  * Common variables are protected by both r_rq.lock and s_lock in that order
  266  * which only happens in modify_qp() or changing the QP 'state'.
  267  */
  268 struct rvt_qp {
  269         struct ib_qp ibqp;
  270         void *priv; /* Driver private data */
  271         /* read mostly fields above and below */
  272         struct ib_ah_attr remote_ah_attr;
  273         struct ib_ah_attr alt_ah_attr;
  274         struct rvt_qp __rcu *next;           /* link list for QPN hash table */
  275         struct rvt_swqe *s_wq;  /* send work queue */
  276         struct rvt_mmap_info *ip;
  277 
  278         unsigned long timeout_jiffies;  /* computed from timeout */
  279 
  280         enum ib_mtu path_mtu;
  281         int srate_mbps;         /* s_srate (below) converted to Mbit/s */
  282         pid_t pid;              /* pid for user mode QPs */
  283         u32 remote_qpn;
  284         u32 qkey;               /* QKEY for this QP (for UD or RD) */
  285         u32 s_size;             /* send work queue size */
  286         u32 s_ahgpsn;           /* set to the psn in the copy of the header */
  287 
  288         u16 pmtu;               /* decoded from path_mtu */
  289         u8 log_pmtu;            /* shift for pmtu */
  290         u8 state;               /* QP state */
  291         u8 allowed_ops;         /* high order bits of allowed opcodes */
  292         u8 qp_access_flags;
  293         u8 alt_timeout;         /* Alternate path timeout for this QP */
  294         u8 timeout;             /* Timeout for this QP */
  295         u8 s_srate;
  296         u8 s_mig_state;
  297         u8 port_num;
  298         u8 s_pkey_index;        /* PKEY index to use */
  299         u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
  300         u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
  301         u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
  302         u8 s_retry_cnt;         /* number of times to retry */
  303         u8 s_rnr_retry_cnt;
  304         u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
  305         u8 s_max_sge;           /* size of s_wq->sg_list */
  306         u8 s_draining;
  307 
  308         /* start of read/write fields */
  309         atomic_t refcount ____cacheline_aligned_in_smp;
  310         wait_queue_head_t wait;
  311 
  312         struct rvt_ack_entry *s_ack_queue;
  313         struct rvt_sge_state s_rdma_read_sge;
  314 
  315         spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
  316         u32 r_psn;              /* expected rcv packet sequence number */
  317         unsigned long r_aflags;
  318         u64 r_wr_id;            /* ID for current receive WQE */
  319         u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
  320         u32 r_len;              /* total length of r_sge */
  321         u32 r_rcv_len;          /* receive data len processed */
  322         u32 r_msn;              /* message sequence number */
  323 
  324         u8 r_state;             /* opcode of last packet received */
  325         u8 r_flags;
  326         u8 r_head_ack_queue;    /* index into s_ack_queue[] */
  327 
  328         struct list_head rspwait;       /* link for waiting to respond */
  329 
  330         struct rvt_sge_state r_sge;     /* current receive data */
  331         struct rvt_rq r_rq;             /* receive work queue */
  332 
  333         /* post send line */
  334         spinlock_t s_hlock ____cacheline_aligned_in_smp;
  335         u32 s_head;             /* new entries added here */
  336         u32 s_next_psn;         /* PSN for next request */
  337         u32 s_avail;            /* number of entries avail */
  338         u32 s_ssn;              /* SSN of tail entry */
  339         atomic_t s_reserved_used; /* reserved entries in use */
  340 
  341         spinlock_t s_lock ____cacheline_aligned_in_smp;
  342         u32 s_flags;
  343         struct rvt_sge_state *s_cur_sge;
  344         struct rvt_swqe *s_wqe;
  345         struct rvt_sge_state s_sge;     /* current send request data */
  346         struct rvt_mregion *s_rdma_mr;
  347         u32 s_cur_size;         /* size of send packet in bytes */
  348         u32 s_len;              /* total length of s_sge */
  349         u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
  350         u32 s_last_psn;         /* last response PSN processed */
  351         u32 s_sending_psn;      /* lowest PSN that is being sent */
  352         u32 s_sending_hpsn;     /* highest PSN that is being sent */
  353         u32 s_psn;              /* current packet sequence number */
  354         u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
  355         u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
  356         u32 s_tail;             /* next entry to process */
  357         u32 s_cur;              /* current work queue entry */
  358         u32 s_acked;            /* last un-ACK'ed entry */
  359         u32 s_last;             /* last completed entry */
  360         u32 s_lsn;              /* limit sequence number (credit) */
  361         u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
  362         u16 s_rdma_ack_cnt;
  363         s8 s_ahgidx;
  364         u8 s_state;             /* opcode of last packet sent */
  365         u8 s_ack_state;         /* opcode of packet to ACK */
  366         u8 s_nak_state;         /* non-zero if NAK is pending */
  367         u8 r_nak_state;         /* non-zero if NAK is pending */
  368         u8 s_retry;             /* requester retry counter */
  369         u8 s_rnr_retry;         /* requester RNR retry counter */
  370         u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
  371         u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
  372 
  373         struct rvt_sge_state s_ack_rdma_sge;
  374         struct timer_list s_timer;
  375 
  376         atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
  377 
  378         /*
  379          * This sge list MUST be last. Do not add anything below here.
  380          */
  381         struct rvt_sge r_sg_list[0] /* verified SGEs */
  382                 ____cacheline_aligned_in_smp;
  383 };
  384 
  385 struct rvt_srq {
  386         struct ib_srq ibsrq;
  387         struct rvt_rq rq;
  388         struct rvt_mmap_info *ip;
  389         /* send signal when number of RWQEs < limit */
  390         u32 limit;
  391 };
  392 
  393 #define RVT_QPN_MAX                 BIT(24)
  394 #define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
  395 #define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
  396 #define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
  397 #define RVT_QPN_MASK                0xFFFFFF
  398 
  399 /*
  400  * QPN-map pages start out as NULL, they get allocated upon
  401  * first use and are never deallocated. This way,
  402  * large bitmaps are not allocated unless large numbers of QPs are used.
  403  */
  404 struct rvt_qpn_map {
  405         void *page;
  406 };
  407 
  408 struct rvt_qpn_table {
  409         spinlock_t lock; /* protect changes to the qp table */
  410         unsigned flags;         /* flags for QP0/1 allocated for each port */
  411         u32 last;               /* last QP number allocated */
  412         u32 nmaps;              /* size of the map table */
  413         u16 limit;
  414         u8  incr;
  415         /* bit map of free QP numbers other than 0/1 */
  416         struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
  417 };
  418 
  419 struct rvt_qp_ibdev {
  420         u32 qp_table_size;
  421         u32 qp_table_bits;
  422         struct rvt_qp __rcu **qp_table;
  423         spinlock_t qpt_lock; /* qptable lock */
  424         struct rvt_qpn_table qpn_table;
  425 };
  426 
  427 /*
  428  * There is one struct rvt_mcast for each multicast GID.
  429  * All attached QPs are then stored as a list of
  430  * struct rvt_mcast_qp.
  431  */
  432 struct rvt_mcast_qp {
  433         struct list_head list;
  434         struct rvt_qp *qp;
  435 };
  436 
  437 struct rvt_mcast {
  438         struct rb_node rb_node;
  439         union ib_gid mgid;
  440         struct list_head qp_list;
  441         wait_queue_head_t wait;
  442         atomic_t refcount;
  443         int n_attached;
  444 };
  445 
  446 /*
  447  * Since struct rvt_swqe is not a fixed size, we can't simply index into
  448  * struct rvt_qp.s_wq.  This function does the array index computation.
  449  */
  450 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
  451                                                 unsigned n)
  452 {
  453         return (struct rvt_swqe *)((char *)qp->s_wq +
  454                                      (sizeof(struct rvt_swqe) +
  455                                       qp->s_max_sge *
  456                                       sizeof(struct rvt_sge)) * n);
  457 }
  458 
  459 /*
  460  * Since struct rvt_rwqe is not a fixed size, we can't simply index into
  461  * struct rvt_rwq.wq.  This function does the array index computation.
  462  */
  463 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
  464 {
  465         return (struct rvt_rwqe *)
  466                 ((char *)rq->wq->wq +
  467                  (sizeof(struct rvt_rwqe) +
  468                   rq->max_sge * sizeof(struct ib_sge)) * n);
  469 }
  470 
  471 /**
  472  * rvt_get_qp - get a QP reference
  473  * @qp - the QP to hold
  474  */
  475 static inline void rvt_get_qp(struct rvt_qp *qp)
  476 {
  477         atomic_inc(&qp->refcount);
  478 }
  479 
  480 /**
  481  * rvt_put_qp - release a QP reference
  482  * @qp - the QP to release
  483  */
  484 static inline void rvt_put_qp(struct rvt_qp *qp)
  485 {
  486         if (qp && atomic_dec_and_test(&qp->refcount))
  487                 wake_up(&qp->wait);
  488 }
  489 
  490 /**
  491  * rvt_qp_wqe_reserve - reserve operation
  492  * @qp - the rvt qp
  493  * @wqe - the send wqe
  494  *
  495  * This routine used in post send to record
  496  * a wqe relative reserved operation use.
  497  */
  498 static inline void rvt_qp_wqe_reserve(
  499         struct rvt_qp *qp,
  500         struct rvt_swqe *wqe)
  501 {
  502         wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
  503         atomic_inc(&qp->s_reserved_used);
  504 }
  505 
  506 /**
  507  * rvt_qp_wqe_unreserve - clean reserved operation
  508  * @qp - the rvt qp
  509  * @wqe - the send wqe
  510  *
  511  * This decrements the reserve use count.
  512  *
  513  * This call MUST precede the change to
  514  * s_last to insure that post send sees a stable
  515  * s_avail.
  516  *
  517  * An smp_mp__after_atomic() is used to insure
  518  * the compiler does not juggle the order of the s_last
  519  * ring index and the decrementing of s_reserved_used.
  520  */
  521 static inline void rvt_qp_wqe_unreserve(
  522         struct rvt_qp *qp,
  523         struct rvt_swqe *wqe)
  524 {
  525         if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
  526                 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
  527                 atomic_dec(&qp->s_reserved_used);
  528                 /* insure no compiler re-order up to s_last change */
  529                 smp_mb__after_atomic();
  530         }
  531 }
  532 
  533 extern const int  ib_rvt_state_ops[];
  534 
  535 struct rvt_dev_info;
  536 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
  537 
  538 #endif          /* DEF_RDMAVT_INCQP_H */

Cache object: bb9f699db99ea33de5a6ea44fb56f873


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.