1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3 *
4 * Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING the madirectory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use source and binary forms, with or
15 * withmodification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retathe above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
34 * SOFTWARE.
35 *
36 * $FreeBSD$
37 */
38
39 #if !defined(CM_MSGS_H)
40 #define CM_MSGS_H
41
42 #include <rdma/ib_mad.h>
43 #include <rdma/ib_cm.h>
44
45 /*
46 * Parameters to routines below should be in network-byte order, and values
47 * are returned in network-byte order.
48 */
49
50 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
51
52 struct cm_req_msg {
53 struct ib_mad_hdr hdr;
54
55 __be32 local_comm_id;
56 __be32 rsvd4;
57 __be64 service_id;
58 __be64 local_ca_guid;
59 __be32 rsvd24;
60 __be32 local_qkey;
61 /* local QPN:24, responder resources:8 */
62 __be32 offset32;
63 /* local EECN:24, initiator depth:8 */
64 __be32 offset36;
65 /*
66 * remote EECN:24, remote CM response timeout:5,
67 * transport service type:2, end-to-end flow control:1
68 */
69 __be32 offset40;
70 /* starting PSN:24, local CM response timeout:5, retry count:3 */
71 __be32 offset44;
72 __be16 pkey;
73 /* path MTU:4, RDC exists:1, RNR retry count:3. */
74 u8 offset50;
75 /* max CM Retries:4, SRQ:1, extended transport type:3 */
76 u8 offset51;
77
78 __be16 primary_local_lid;
79 __be16 primary_remote_lid;
80 union ib_gid primary_local_gid;
81 union ib_gid primary_remote_gid;
82 /* flow label:20, rsvd:6, packet rate:6 */
83 __be32 primary_offset88;
84 u8 primary_traffic_class;
85 u8 primary_hop_limit;
86 /* SL:4, subnet local:1, rsvd:3 */
87 u8 primary_offset94;
88 /* local ACK timeout:5, rsvd:3 */
89 u8 primary_offset95;
90
91 __be16 alt_local_lid;
92 __be16 alt_remote_lid;
93 union ib_gid alt_local_gid;
94 union ib_gid alt_remote_gid;
95 /* flow label:20, rsvd:6, packet rate:6 */
96 __be32 alt_offset132;
97 u8 alt_traffic_class;
98 u8 alt_hop_limit;
99 /* SL:4, subnet local:1, rsvd:3 */
100 u8 alt_offset138;
101 /* local ACK timeout:5, rsvd:3 */
102 u8 alt_offset139;
103
104 u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
105
106 } __attribute__ ((packed));
107
108 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
109 {
110 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
111 }
112
113 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
114 {
115 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
116 (be32_to_cpu(req_msg->offset32) &
117 0x000000FF));
118 }
119
120 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
121 {
122 return (u8) be32_to_cpu(req_msg->offset32);
123 }
124
125 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
126 {
127 req_msg->offset32 = cpu_to_be32(resp_res |
128 (be32_to_cpu(req_msg->offset32) &
129 0xFFFFFF00));
130 }
131
132 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
133 {
134 return (u8) be32_to_cpu(req_msg->offset36);
135 }
136
137 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
138 u8 init_depth)
139 {
140 req_msg->offset36 = cpu_to_be32(init_depth |
141 (be32_to_cpu(req_msg->offset36) &
142 0xFFFFFF00));
143 }
144
145 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
146 {
147 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
148 }
149
150 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
151 u8 resp_timeout)
152 {
153 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
154 (be32_to_cpu(req_msg->offset40) &
155 0xFFFFFF07));
156 }
157
158 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
159 {
160 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
161 switch(transport_type) {
162 case 0: return IB_QPT_RC;
163 case 1: return IB_QPT_UC;
164 case 3:
165 switch (req_msg->offset51 & 0x7) {
166 case 1: return IB_QPT_XRC_TGT;
167 default: return 0;
168 }
169 default: return 0;
170 }
171 }
172
173 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
174 enum ib_qp_type qp_type)
175 {
176 switch(qp_type) {
177 case IB_QPT_UC:
178 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
179 req_msg->offset40) &
180 0xFFFFFFF9) | 0x2);
181 break;
182 case IB_QPT_XRC_INI:
183 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
184 req_msg->offset40) &
185 0xFFFFFFF9) | 0x6);
186 req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
187 break;
188 default:
189 req_msg->offset40 = cpu_to_be32(be32_to_cpu(
190 req_msg->offset40) &
191 0xFFFFFFF9);
192 }
193 }
194
195 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
196 {
197 return be32_to_cpu(req_msg->offset40) & 0x1;
198 }
199
200 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
201 u8 flow_ctrl)
202 {
203 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
204 (be32_to_cpu(req_msg->offset40) &
205 0xFFFFFFFE));
206 }
207
208 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
209 {
210 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
211 }
212
213 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
214 __be32 starting_psn)
215 {
216 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
217 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
218 }
219
220 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
221 {
222 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
223 }
224
225 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
226 u8 resp_timeout)
227 {
228 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
229 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
230 }
231
232 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
233 {
234 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
235 }
236
237 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
238 u8 retry_count)
239 {
240 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
241 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
242 }
243
244 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
245 {
246 return req_msg->offset50 >> 4;
247 }
248
249 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
250 {
251 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
252 }
253
254 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
255 {
256 return req_msg->offset50 & 0x7;
257 }
258
259 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
260 u8 rnr_retry_count)
261 {
262 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
263 (rnr_retry_count & 0x7));
264 }
265
266 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
267 {
268 return req_msg->offset51 >> 4;
269 }
270
271 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
272 u8 retries)
273 {
274 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
275 }
276
277 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
278 {
279 return (req_msg->offset51 & 0x8) >> 3;
280 }
281
282 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
283 {
284 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
285 ((srq & 0x1) << 3));
286 }
287
288 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
289 {
290 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
291 }
292
293 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
294 __be32 flow_label)
295 {
296 req_msg->primary_offset88 = cpu_to_be32(
297 (be32_to_cpu(req_msg->primary_offset88) &
298 0x00000FFF) |
299 (be32_to_cpu(flow_label) << 12));
300 }
301
302 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
303 {
304 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
305 }
306
307 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
308 u8 rate)
309 {
310 req_msg->primary_offset88 = cpu_to_be32(
311 (be32_to_cpu(req_msg->primary_offset88) &
312 0xFFFFFFC0) | (rate & 0x3F));
313 }
314
315 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
316 {
317 return (u8) (req_msg->primary_offset94 >> 4);
318 }
319
320 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
321 {
322 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
323 (sl << 4));
324 }
325
326 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
327 {
328 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
329 }
330
331 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
332 u8 subnet_local)
333 {
334 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
335 ((subnet_local & 0x1) << 3));
336 }
337
338 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
339 {
340 return (u8) (req_msg->primary_offset95 >> 3);
341 }
342
343 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
344 u8 local_ack_timeout)
345 {
346 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
347 (local_ack_timeout << 3));
348 }
349
350 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
351 {
352 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
353 }
354
355 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
356 __be32 flow_label)
357 {
358 req_msg->alt_offset132 = cpu_to_be32(
359 (be32_to_cpu(req_msg->alt_offset132) &
360 0x00000FFF) |
361 (be32_to_cpu(flow_label) << 12));
362 }
363
364 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
365 {
366 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
367 }
368
369 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
370 u8 rate)
371 {
372 req_msg->alt_offset132 = cpu_to_be32(
373 (be32_to_cpu(req_msg->alt_offset132) &
374 0xFFFFFFC0) | (rate & 0x3F));
375 }
376
377 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
378 {
379 return (u8) (req_msg->alt_offset138 >> 4);
380 }
381
382 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
383 {
384 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
385 (sl << 4));
386 }
387
388 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
389 {
390 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
391 }
392
393 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
394 u8 subnet_local)
395 {
396 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
397 ((subnet_local & 0x1) << 3));
398 }
399
400 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
401 {
402 return (u8) (req_msg->alt_offset139 >> 3);
403 }
404
405 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
406 u8 local_ack_timeout)
407 {
408 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
409 (local_ack_timeout << 3));
410 }
411
412 /* Message REJected or MRAed */
413 enum cm_msg_response {
414 CM_MSG_RESPONSE_REQ = 0x0,
415 CM_MSG_RESPONSE_REP = 0x1,
416 CM_MSG_RESPONSE_OTHER = 0x2
417 };
418
419 struct cm_mra_msg {
420 struct ib_mad_hdr hdr;
421
422 __be32 local_comm_id;
423 __be32 remote_comm_id;
424 /* message MRAed:2, rsvd:6 */
425 u8 offset8;
426 /* service timeout:5, rsvd:3 */
427 u8 offset9;
428
429 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
430
431 } __attribute__ ((packed));
432
433 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
434 {
435 return (u8) (mra_msg->offset8 >> 6);
436 }
437
438 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
439 {
440 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
441 }
442
443 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
444 {
445 return (u8) (mra_msg->offset9 >> 3);
446 }
447
448 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
449 u8 service_timeout)
450 {
451 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
452 (service_timeout << 3));
453 }
454
455 struct cm_rej_msg {
456 struct ib_mad_hdr hdr;
457
458 __be32 local_comm_id;
459 __be32 remote_comm_id;
460 /* message REJected:2, rsvd:6 */
461 u8 offset8;
462 /* reject info length:7, rsvd:1. */
463 u8 offset9;
464 __be16 reason;
465 u8 ari[IB_CM_REJ_ARI_LENGTH];
466
467 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
468
469 } __attribute__ ((packed));
470
471 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
472 {
473 return (u8) (rej_msg->offset8 >> 6);
474 }
475
476 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
477 {
478 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
479 }
480
481 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
482 {
483 return (u8) (rej_msg->offset9 >> 1);
484 }
485
486 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
487 u8 len)
488 {
489 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
490 }
491
492 struct cm_rep_msg {
493 struct ib_mad_hdr hdr;
494
495 __be32 local_comm_id;
496 __be32 remote_comm_id;
497 __be32 local_qkey;
498 /* local QPN:24, rsvd:8 */
499 __be32 offset12;
500 /* local EECN:24, rsvd:8 */
501 __be32 offset16;
502 /* starting PSN:24 rsvd:8 */
503 __be32 offset20;
504 u8 resp_resources;
505 u8 initiator_depth;
506 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
507 u8 offset26;
508 /* RNR retry count:3, SRQ:1, rsvd:5 */
509 u8 offset27;
510 __be64 local_ca_guid;
511
512 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
513
514 } __attribute__ ((packed));
515
516 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
517 {
518 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
519 }
520
521 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
522 {
523 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
524 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
525 }
526
527 static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
528 {
529 return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
530 }
531
532 static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
533 {
534 rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
535 (be32_to_cpu(rep_msg->offset16) & 0x000000FF));
536 }
537
538 static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
539 {
540 return (qp_type == IB_QPT_XRC_INI) ?
541 cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
542 }
543
544 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
545 {
546 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
547 }
548
549 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
550 __be32 starting_psn)
551 {
552 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
553 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
554 }
555
556 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
557 {
558 return (u8) (rep_msg->offset26 >> 3);
559 }
560
561 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
562 u8 target_ack_delay)
563 {
564 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
565 (target_ack_delay << 3));
566 }
567
568 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
569 {
570 return (u8) ((rep_msg->offset26 & 0x06) >> 1);
571 }
572
573 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
574 {
575 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
576 ((failover & 0x3) << 1));
577 }
578
579 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
580 {
581 return (u8) (rep_msg->offset26 & 0x01);
582 }
583
584 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
585 u8 flow_ctrl)
586 {
587 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
588 (flow_ctrl & 0x1));
589 }
590
591 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
592 {
593 return (u8) (rep_msg->offset27 >> 5);
594 }
595
596 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
597 u8 rnr_retry_count)
598 {
599 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
600 (rnr_retry_count << 5));
601 }
602
603 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
604 {
605 return (u8) ((rep_msg->offset27 >> 4) & 0x1);
606 }
607
608 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
609 {
610 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
611 ((srq & 0x1) << 4));
612 }
613
614 struct cm_rtu_msg {
615 struct ib_mad_hdr hdr;
616
617 __be32 local_comm_id;
618 __be32 remote_comm_id;
619
620 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
621
622 } __attribute__ ((packed));
623
624 struct cm_dreq_msg {
625 struct ib_mad_hdr hdr;
626
627 __be32 local_comm_id;
628 __be32 remote_comm_id;
629 /* remote QPN/EECN:24, rsvd:8 */
630 __be32 offset8;
631
632 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
633
634 } __attribute__ ((packed));
635
636 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
637 {
638 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
639 }
640
641 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
642 {
643 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
644 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
645 }
646
647 struct cm_drep_msg {
648 struct ib_mad_hdr hdr;
649
650 __be32 local_comm_id;
651 __be32 remote_comm_id;
652
653 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
654
655 } __attribute__ ((packed));
656
657 struct cm_lap_msg {
658 struct ib_mad_hdr hdr;
659
660 __be32 local_comm_id;
661 __be32 remote_comm_id;
662
663 __be32 rsvd8;
664 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
665 __be32 offset12;
666 __be32 rsvd16;
667
668 __be16 alt_local_lid;
669 __be16 alt_remote_lid;
670 union ib_gid alt_local_gid;
671 union ib_gid alt_remote_gid;
672 /* flow label:20, rsvd:4, traffic class:8 */
673 __be32 offset56;
674 u8 alt_hop_limit;
675 /* rsvd:2, packet rate:6 */
676 u8 offset61;
677 /* SL:4, subnet local:1, rsvd:3 */
678 u8 offset62;
679 /* local ACK timeout:5, rsvd:3 */
680 u8 offset63;
681
682 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
683 } __attribute__ ((packed));
684
685 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
686 {
687 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
688 }
689
690 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
691 {
692 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
693 (be32_to_cpu(lap_msg->offset12) &
694 0x000000FF));
695 }
696
697 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
698 {
699 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
700 }
701
702 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
703 u8 resp_timeout)
704 {
705 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
706 (be32_to_cpu(lap_msg->offset12) &
707 0xFFFFFF07));
708 }
709
710 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
711 {
712 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
713 }
714
715 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
716 __be32 flow_label)
717 {
718 lap_msg->offset56 = cpu_to_be32(
719 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
720 (be32_to_cpu(flow_label) << 12));
721 }
722
723 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
724 {
725 return (u8) be32_to_cpu(lap_msg->offset56);
726 }
727
728 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
729 u8 traffic_class)
730 {
731 lap_msg->offset56 = cpu_to_be32(traffic_class |
732 (be32_to_cpu(lap_msg->offset56) &
733 0xFFFFFF00));
734 }
735
736 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
737 {
738 return lap_msg->offset61 & 0x3F;
739 }
740
741 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
742 u8 packet_rate)
743 {
744 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
745 }
746
747 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
748 {
749 return lap_msg->offset62 >> 4;
750 }
751
752 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
753 {
754 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
755 }
756
757 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
758 {
759 return (lap_msg->offset62 >> 3) & 0x1;
760 }
761
762 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
763 u8 subnet_local)
764 {
765 lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
766 (lap_msg->offset61 & 0xF7);
767 }
768 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
769 {
770 return lap_msg->offset63 >> 3;
771 }
772
773 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
774 u8 local_ack_timeout)
775 {
776 lap_msg->offset63 = (local_ack_timeout << 3) |
777 (lap_msg->offset63 & 0x07);
778 }
779
780 struct cm_apr_msg {
781 struct ib_mad_hdr hdr;
782
783 __be32 local_comm_id;
784 __be32 remote_comm_id;
785
786 u8 info_length;
787 u8 ap_status;
788 __be16 rsvd;
789 u8 info[IB_CM_APR_INFO_LENGTH];
790
791 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
792 } __attribute__ ((packed));
793
794 struct cm_sidr_req_msg {
795 struct ib_mad_hdr hdr;
796
797 __be32 request_id;
798 __be16 pkey;
799 __be16 rsvd;
800 __be64 service_id;
801
802 u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
803 } __attribute__ ((packed));
804
805 struct cm_sidr_rep_msg {
806 struct ib_mad_hdr hdr;
807
808 __be32 request_id;
809 u8 status;
810 u8 info_length;
811 __be16 rsvd;
812 /* QPN:24, rsvd:8 */
813 __be32 offset8;
814 __be64 service_id;
815 __be32 qkey;
816 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
817
818 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
819 } __attribute__ ((packed));
820
821 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
822 {
823 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
824 }
825
826 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
827 __be32 qpn)
828 {
829 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
830 (be32_to_cpu(sidr_rep_msg->offset8) &
831 0x000000FF));
832 }
833
834 #endif /* CM_MSGS_H */
Cache object: c214f8edefb243a37f549d626b4e4a2c
|