1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2015 - 2022 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34 /*$FreeBSD$*/
35
36 #ifndef IRDMA_VERBS_H
37 #define IRDMA_VERBS_H
38
39 #define IRDMA_MAX_SAVED_PHY_PGADDR 4
40 #define IRDMA_FLUSH_DELAY_MS 20
41
42 #define IRDMA_PKEY_TBL_SZ 1
43 #define IRDMA_DEFAULT_PKEY 0xFFFF
44
45 #define iwdev_to_idev(iwdev) (&(iwdev)->rf->sc_dev)
46
47 struct irdma_ucontext {
48 struct ib_ucontext ibucontext;
49 struct irdma_device *iwdev;
50 struct rdma_user_mmap_entry *db_mmap_entry;
51 struct list_head cq_reg_mem_list;
52 spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
53 struct list_head qp_reg_mem_list;
54 spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
55 /* FIXME: Move to kcompat ideally. Used < 4.20.0 for old diassasscoaite flow */
56 struct list_head vma_list;
57 struct mutex vma_list_mutex; /* protect the vma_list */
58 int abi_ver;
59 bool legacy_mode;
60 };
61
62 struct irdma_pd {
63 struct ib_pd ibpd;
64 struct irdma_sc_pd sc_pd;
65 };
66
67 struct irdma_av {
68 u8 macaddr[16];
69 struct ib_ah_attr attrs;
70 union {
71 struct sockaddr saddr;
72 struct sockaddr_in saddr_in;
73 struct sockaddr_in6 saddr_in6;
74 } sgid_addr, dgid_addr;
75 u8 net_type;
76 };
77
78 struct irdma_ah {
79 struct ib_ah ibah;
80 struct irdma_sc_ah sc_ah;
81 struct irdma_pd *pd;
82 struct irdma_av av;
83 u8 sgid_index;
84 union ib_gid dgid;
85 };
86
87 struct irdma_hmc_pble {
88 union {
89 u32 idx;
90 dma_addr_t addr;
91 };
92 };
93
94 struct irdma_cq_mr {
95 struct irdma_hmc_pble cq_pbl;
96 dma_addr_t shadow;
97 bool split;
98 };
99
100 struct irdma_qp_mr {
101 struct irdma_hmc_pble sq_pbl;
102 struct irdma_hmc_pble rq_pbl;
103 dma_addr_t shadow;
104 struct page *sq_page;
105 };
106
107 struct irdma_cq_buf {
108 struct irdma_dma_mem kmem_buf;
109 struct irdma_cq_uk cq_uk;
110 struct irdma_hw *hw;
111 struct list_head list;
112 struct work_struct work;
113 };
114
115 struct irdma_pbl {
116 struct list_head list;
117 union {
118 struct irdma_qp_mr qp_mr;
119 struct irdma_cq_mr cq_mr;
120 };
121
122 bool pbl_allocated:1;
123 bool on_list:1;
124 u64 user_base;
125 struct irdma_pble_alloc pble_alloc;
126 struct irdma_mr *iwmr;
127 };
128
129 struct irdma_mr {
130 union {
131 struct ib_mr ibmr;
132 struct ib_mw ibmw;
133 };
134 struct ib_umem *region;
135 int access;
136 u8 is_hwreg;
137 u16 type;
138 u32 page_cnt;
139 u64 page_size;
140 u64 page_msk;
141 u32 npages;
142 u32 stag;
143 u64 len;
144 u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
145 struct irdma_pbl iwpbl;
146 };
147
148 struct irdma_cq {
149 struct ib_cq ibcq;
150 struct irdma_sc_cq sc_cq;
151 u16 cq_head;
152 u16 cq_size;
153 u16 cq_num;
154 bool user_mode;
155 atomic_t armed;
156 enum irdma_cmpl_notify last_notify;
157 u32 polled_cmpls;
158 u32 cq_mem_size;
159 struct irdma_dma_mem kmem;
160 struct irdma_dma_mem kmem_shadow;
161 struct completion free_cq;
162 atomic_t refcnt;
163 spinlock_t lock; /* for poll cq */
164 struct irdma_pbl *iwpbl;
165 struct irdma_pbl *iwpbl_shadow;
166 struct list_head resize_list;
167 struct irdma_cq_poll_info cur_cqe;
168 struct list_head cmpl_generated;
169 };
170
171 struct irdma_cmpl_gen {
172 struct list_head list;
173 struct irdma_cq_poll_info cpi;
174 };
175
176 struct disconn_work {
177 struct work_struct work;
178 struct irdma_qp *iwqp;
179 };
180
181 struct iw_cm_id;
182
183 struct irdma_qp_kmode {
184 struct irdma_dma_mem dma_mem;
185 u32 *sig_trk_mem;
186 struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
187 u64 *rq_wrid_mem;
188 };
189
190 struct irdma_qp {
191 struct ib_qp ibqp;
192 struct irdma_sc_qp sc_qp;
193 struct irdma_device *iwdev;
194 struct irdma_cq *iwscq;
195 struct irdma_cq *iwrcq;
196 struct irdma_pd *iwpd;
197 struct rdma_user_mmap_entry *push_wqe_mmap_entry;
198 struct rdma_user_mmap_entry *push_db_mmap_entry;
199 struct irdma_qp_host_ctx_info ctx_info;
200 union {
201 struct irdma_iwarp_offload_info iwarp_info;
202 struct irdma_roce_offload_info roce_info;
203 };
204
205 union {
206 struct irdma_tcp_offload_info tcp_info;
207 struct irdma_udp_offload_info udp_info;
208 };
209
210 struct irdma_ah roce_ah;
211 struct list_head teardown_entry;
212 atomic_t refcnt;
213 struct iw_cm_id *cm_id;
214 struct irdma_cm_node *cm_node;
215 struct delayed_work dwork_flush;
216 struct ib_mr *lsmm_mr;
217 atomic_t hw_mod_qp_pend;
218 enum ib_qp_state ibqp_state;
219 u32 qp_mem_size;
220 u32 last_aeq;
221 int max_send_wr;
222 int max_recv_wr;
223 atomic_t close_timer_started;
224 spinlock_t lock; /* serialize posting WRs to SQ/RQ */
225 struct irdma_qp_context *iwqp_context;
226 void *pbl_vbase;
227 dma_addr_t pbl_pbase;
228 struct page *page;
229 u8 iwarp_state;
230 u16 term_sq_flush_code;
231 u16 term_rq_flush_code;
232 u8 hw_iwarp_state;
233 u8 hw_tcp_state;
234 struct irdma_qp_kmode kqp;
235 struct irdma_dma_mem host_ctx;
236 struct timer_list terminate_timer;
237 struct irdma_pbl *iwpbl;
238 struct irdma_sge *sg_list;
239 struct irdma_dma_mem q2_ctx_mem;
240 struct irdma_dma_mem ietf_mem;
241 struct completion free_qp;
242 wait_queue_head_t waitq;
243 wait_queue_head_t mod_qp_waitq;
244 u8 rts_ae_rcvd;
245 u8 active_conn : 1;
246 u8 user_mode : 1;
247 u8 hte_added : 1;
248 u8 flush_issued : 1;
249 u8 sig_all : 1;
250 u8 pau_mode : 1;
251 };
252
253 enum irdma_mmap_flag {
254 IRDMA_MMAP_IO_NC,
255 IRDMA_MMAP_IO_WC,
256 };
257
258 struct irdma_user_mmap_entry {
259 struct rdma_user_mmap_entry rdma_entry;
260 u64 bar_offset;
261 u8 mmap_flag;
262 };
263
264 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
265 {
266 return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
267 }
268
269 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
270 {
271 return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
272 }
273
274 /**
275 * irdma_mcast_mac_v4 - Get the multicast MAC for an IP address
276 * @ip_addr: IPv4 address
277 * @mac: pointer to result MAC address
278 *
279 */
280 static inline void irdma_mcast_mac_v4(u32 *ip_addr, u8 *mac)
281 {
282 u8 *ip = (u8 *)ip_addr;
283 unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, ip[2] & 0x7F, ip[1],
284 ip[0]};
285
286 ether_addr_copy(mac, mac4);
287 }
288
289 /**
290 * irdma_mcast_mac_v6 - Get the multicast MAC for an IP address
291 * @ip_addr: IPv6 address
292 * @mac: pointer to result MAC address
293 *
294 */
295 static inline void irdma_mcast_mac_v6(u32 *ip_addr, u8 *mac)
296 {
297 u8 *ip = (u8 *)ip_addr;
298 unsigned char mac6[ETH_ALEN] = {0x33, 0x33, ip[3], ip[2], ip[1], ip[0]};
299
300 ether_addr_copy(mac, mac6);
301 }
302
303 struct rdma_user_mmap_entry*
304 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
305 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
306 int irdma_ib_register_device(struct irdma_device *iwdev);
307 void irdma_ib_unregister_device(struct irdma_device *iwdev);
308 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
309 void irdma_generate_flush_completions(struct irdma_qp *iwqp);
310 void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
311 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
312 void irdma_sched_qp_flush_work(struct irdma_qp *iwqp);
313 void irdma_flush_worker(struct work_struct *work);
314 #endif /* IRDMA_VERBS_H */
Cache object: 6f5ca8871b6dceb7b85ca814791e790f
|