Merge branch 'master'
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / infiniband / core / cm_msgs.h
1 /*
2 * Copyright (c) 2004 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING the madirectory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use source and binary forms, with or
13 * withmodification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retathe above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
32 * SOFTWARE.
33 */
34 #if !defined(CM_MSGS_H)
35 #define CM_MSGS_H
36
37 #include <rdma/ib_mad.h>
38
39 /*
40 * Parameters to routines below should be in network-byte order, and values
41 * are returned in network-byte order.
42 */
43
44 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
45
46 #define CM_REQ_ATTR_ID __constant_htons(0x0010)
47 #define CM_MRA_ATTR_ID __constant_htons(0x0011)
48 #define CM_REJ_ATTR_ID __constant_htons(0x0012)
49 #define CM_REP_ATTR_ID __constant_htons(0x0013)
50 #define CM_RTU_ATTR_ID __constant_htons(0x0014)
51 #define CM_DREQ_ATTR_ID __constant_htons(0x0015)
52 #define CM_DREP_ATTR_ID __constant_htons(0x0016)
53 #define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017)
54 #define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018)
55 #define CM_LAP_ATTR_ID __constant_htons(0x0019)
56 #define CM_APR_ATTR_ID __constant_htons(0x001A)
57
58 enum cm_msg_sequence {
59 CM_MSG_SEQUENCE_REQ,
60 CM_MSG_SEQUENCE_LAP,
61 CM_MSG_SEQUENCE_DREQ,
62 CM_MSG_SEQUENCE_SIDR
63 };
64
65 struct cm_req_msg {
66 struct ib_mad_hdr hdr;
67
68 __be32 local_comm_id;
69 __be32 rsvd4;
70 __be64 service_id;
71 __be64 local_ca_guid;
72 __be32 rsvd24;
73 __be32 local_qkey;
74 /* local QPN:24, responder resources:8 */
75 __be32 offset32;
76 /* local EECN:24, initiator depth:8 */
77 __be32 offset36;
78 /*
79 * remote EECN:24, remote CM response timeout:5,
80 * transport service type:2, end-to-end flow control:1
81 */
82 __be32 offset40;
83 /* starting PSN:24, local CM response timeout:5, retry count:3 */
84 __be32 offset44;
85 __be16 pkey;
86 /* path MTU:4, RDC exists:1, RNR retry count:3. */
87 u8 offset50;
88 /* max CM Retries:4, SRQ:1, rsvd:3 */
89 u8 offset51;
90
91 __be16 primary_local_lid;
92 __be16 primary_remote_lid;
93 union ib_gid primary_local_gid;
94 union ib_gid primary_remote_gid;
95 /* flow label:20, rsvd:6, packet rate:6 */
96 __be32 primary_offset88;
97 u8 primary_traffic_class;
98 u8 primary_hop_limit;
99 /* SL:4, subnet local:1, rsvd:3 */
100 u8 primary_offset94;
101 /* local ACK timeout:5, rsvd:3 */
102 u8 primary_offset95;
103
104 __be16 alt_local_lid;
105 __be16 alt_remote_lid;
106 union ib_gid alt_local_gid;
107 union ib_gid alt_remote_gid;
108 /* flow label:20, rsvd:6, packet rate:6 */
109 __be32 alt_offset132;
110 u8 alt_traffic_class;
111 u8 alt_hop_limit;
112 /* SL:4, subnet local:1, rsvd:3 */
113 u8 alt_offset138;
114 /* local ACK timeout:5, rsvd:3 */
115 u8 alt_offset139;
116
117 u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
118
119 } __attribute__ ((packed));
120
121 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
122 {
123 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
124 }
125
126 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
127 {
128 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
129 (be32_to_cpu(req_msg->offset32) &
130 0x000000FF));
131 }
132
133 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
134 {
135 return (u8) be32_to_cpu(req_msg->offset32);
136 }
137
138 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
139 {
140 req_msg->offset32 = cpu_to_be32(resp_res |
141 (be32_to_cpu(req_msg->offset32) &
142 0xFFFFFF00));
143 }
144
145 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
146 {
147 return (u8) be32_to_cpu(req_msg->offset36);
148 }
149
150 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
151 u8 init_depth)
152 {
153 req_msg->offset36 = cpu_to_be32(init_depth |
154 (be32_to_cpu(req_msg->offset36) &
155 0xFFFFFF00));
156 }
157
158 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
159 {
160 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
161 }
162
163 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
164 u8 resp_timeout)
165 {
166 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
167 (be32_to_cpu(req_msg->offset40) &
168 0xFFFFFF07));
169 }
170
171 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
172 {
173 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
174 switch(transport_type) {
175 case 0: return IB_QPT_RC;
176 case 1: return IB_QPT_UC;
177 default: return 0;
178 }
179 }
180
181 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
182 enum ib_qp_type qp_type)
183 {
184 switch(qp_type) {
185 case IB_QPT_UC:
186 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
187 req_msg->offset40) &
188 0xFFFFFFF9) | 0x2);
189 break;
190 default:
191 req_msg->offset40 = cpu_to_be32(be32_to_cpu(
192 req_msg->offset40) &
193 0xFFFFFFF9);
194 }
195 }
196
197 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
198 {
199 return be32_to_cpu(req_msg->offset40) & 0x1;
200 }
201
202 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
203 u8 flow_ctrl)
204 {
205 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
206 (be32_to_cpu(req_msg->offset40) &
207 0xFFFFFFFE));
208 }
209
210 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
211 {
212 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
213 }
214
215 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
216 __be32 starting_psn)
217 {
218 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
219 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
220 }
221
222 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
223 {
224 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
225 }
226
227 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
228 u8 resp_timeout)
229 {
230 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
231 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
232 }
233
234 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
235 {
236 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
237 }
238
239 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
240 u8 retry_count)
241 {
242 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
243 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
244 }
245
246 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
247 {
248 return req_msg->offset50 >> 4;
249 }
250
251 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
252 {
253 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
254 }
255
256 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
257 {
258 return req_msg->offset50 & 0x7;
259 }
260
261 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
262 u8 rnr_retry_count)
263 {
264 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
265 (rnr_retry_count & 0x7));
266 }
267
268 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
269 {
270 return req_msg->offset51 >> 4;
271 }
272
273 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
274 u8 retries)
275 {
276 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
277 }
278
279 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
280 {
281 return (req_msg->offset51 & 0x8) >> 3;
282 }
283
284 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
285 {
286 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
287 ((srq & 0x1) << 3));
288 }
289
290 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
291 {
292 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
293 }
294
295 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
296 __be32 flow_label)
297 {
298 req_msg->primary_offset88 = cpu_to_be32(
299 (be32_to_cpu(req_msg->primary_offset88) &
300 0x00000FFF) |
301 (be32_to_cpu(flow_label) << 12));
302 }
303
304 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
305 {
306 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
307 }
308
309 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
310 u8 rate)
311 {
312 req_msg->primary_offset88 = cpu_to_be32(
313 (be32_to_cpu(req_msg->primary_offset88) &
314 0xFFFFFFC0) | (rate & 0x3F));
315 }
316
317 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
318 {
319 return (u8) (req_msg->primary_offset94 >> 4);
320 }
321
322 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
323 {
324 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
325 (sl << 4));
326 }
327
328 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
329 {
330 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
331 }
332
333 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
334 u8 subnet_local)
335 {
336 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
337 ((subnet_local & 0x1) << 3));
338 }
339
340 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
341 {
342 return (u8) (req_msg->primary_offset95 >> 3);
343 }
344
345 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
346 u8 local_ack_timeout)
347 {
348 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
349 (local_ack_timeout << 3));
350 }
351
352 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
353 {
354 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
355 }
356
357 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
358 __be32 flow_label)
359 {
360 req_msg->alt_offset132 = cpu_to_be32(
361 (be32_to_cpu(req_msg->alt_offset132) &
362 0x00000FFF) |
363 (be32_to_cpu(flow_label) << 12));
364 }
365
366 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
367 {
368 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
369 }
370
371 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
372 u8 rate)
373 {
374 req_msg->alt_offset132 = cpu_to_be32(
375 (be32_to_cpu(req_msg->alt_offset132) &
376 0xFFFFFFC0) | (rate & 0x3F));
377 }
378
379 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
380 {
381 return (u8) (req_msg->alt_offset138 >> 4);
382 }
383
384 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
385 {
386 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
387 (sl << 4));
388 }
389
390 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
391 {
392 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
393 }
394
395 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
396 u8 subnet_local)
397 {
398 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
399 ((subnet_local & 0x1) << 3));
400 }
401
402 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
403 {
404 return (u8) (req_msg->alt_offset139 >> 3);
405 }
406
407 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
408 u8 local_ack_timeout)
409 {
410 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
411 (local_ack_timeout << 3));
412 }
413
414 /* Message REJected or MRAed */
415 enum cm_msg_response {
416 CM_MSG_RESPONSE_REQ = 0x0,
417 CM_MSG_RESPONSE_REP = 0x1,
418 CM_MSG_RESPONSE_OTHER = 0x2
419 };
420
421 struct cm_mra_msg {
422 struct ib_mad_hdr hdr;
423
424 __be32 local_comm_id;
425 __be32 remote_comm_id;
426 /* message MRAed:2, rsvd:6 */
427 u8 offset8;
428 /* service timeout:5, rsvd:3 */
429 u8 offset9;
430
431 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
432
433 } __attribute__ ((packed));
434
435 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
436 {
437 return (u8) (mra_msg->offset8 >> 6);
438 }
439
440 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
441 {
442 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
443 }
444
445 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
446 {
447 return (u8) (mra_msg->offset9 >> 3);
448 }
449
450 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
451 u8 service_timeout)
452 {
453 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
454 (service_timeout << 3));
455 }
456
457 struct cm_rej_msg {
458 struct ib_mad_hdr hdr;
459
460 __be32 local_comm_id;
461 __be32 remote_comm_id;
462 /* message REJected:2, rsvd:6 */
463 u8 offset8;
464 /* reject info length:7, rsvd:1. */
465 u8 offset9;
466 __be16 reason;
467 u8 ari[IB_CM_REJ_ARI_LENGTH];
468
469 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
470
471 } __attribute__ ((packed));
472
473 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
474 {
475 return (u8) (rej_msg->offset8 >> 6);
476 }
477
478 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
479 {
480 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
481 }
482
483 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
484 {
485 return (u8) (rej_msg->offset9 >> 1);
486 }
487
488 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
489 u8 len)
490 {
491 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
492 }
493
494 struct cm_rep_msg {
495 struct ib_mad_hdr hdr;
496
497 __be32 local_comm_id;
498 __be32 remote_comm_id;
499 __be32 local_qkey;
500 /* local QPN:24, rsvd:8 */
501 __be32 offset12;
502 /* local EECN:24, rsvd:8 */
503 __be32 offset16;
504 /* starting PSN:24 rsvd:8 */
505 __be32 offset20;
506 u8 resp_resources;
507 u8 initiator_depth;
508 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
509 u8 offset26;
510 /* RNR retry count:3, SRQ:1, rsvd:5 */
511 u8 offset27;
512 __be64 local_ca_guid;
513
514 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
515
516 } __attribute__ ((packed));
517
518 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
519 {
520 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
521 }
522
523 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
524 {
525 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
526 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
527 }
528
529 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
530 {
531 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
532 }
533
534 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
535 __be32 starting_psn)
536 {
537 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
538 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
539 }
540
541 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
542 {
543 return (u8) (rep_msg->offset26 >> 3);
544 }
545
546 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
547 u8 target_ack_delay)
548 {
549 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
550 (target_ack_delay << 3));
551 }
552
553 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
554 {
555 return (u8) ((rep_msg->offset26 & 0x06) >> 1);
556 }
557
558 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
559 {
560 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
561 ((failover & 0x3) << 1));
562 }
563
564 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
565 {
566 return (u8) (rep_msg->offset26 & 0x01);
567 }
568
569 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
570 u8 flow_ctrl)
571 {
572 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
573 (flow_ctrl & 0x1));
574 }
575
576 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
577 {
578 return (u8) (rep_msg->offset27 >> 5);
579 }
580
581 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
582 u8 rnr_retry_count)
583 {
584 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
585 (rnr_retry_count << 5));
586 }
587
588 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
589 {
590 return (u8) ((rep_msg->offset27 >> 4) & 0x1);
591 }
592
593 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
594 {
595 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
596 ((srq & 0x1) << 4));
597 }
598
599 struct cm_rtu_msg {
600 struct ib_mad_hdr hdr;
601
602 __be32 local_comm_id;
603 __be32 remote_comm_id;
604
605 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
606
607 } __attribute__ ((packed));
608
609 struct cm_dreq_msg {
610 struct ib_mad_hdr hdr;
611
612 __be32 local_comm_id;
613 __be32 remote_comm_id;
614 /* remote QPN/EECN:24, rsvd:8 */
615 __be32 offset8;
616
617 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
618
619 } __attribute__ ((packed));
620
621 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
622 {
623 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
624 }
625
626 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
627 {
628 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
629 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
630 }
631
632 struct cm_drep_msg {
633 struct ib_mad_hdr hdr;
634
635 __be32 local_comm_id;
636 __be32 remote_comm_id;
637
638 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
639
640 } __attribute__ ((packed));
641
642 struct cm_lap_msg {
643 struct ib_mad_hdr hdr;
644
645 __be32 local_comm_id;
646 __be32 remote_comm_id;
647
648 __be32 rsvd8;
649 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
650 __be32 offset12;
651 __be32 rsvd16;
652
653 __be16 alt_local_lid;
654 __be16 alt_remote_lid;
655 union ib_gid alt_local_gid;
656 union ib_gid alt_remote_gid;
657 /* flow label:20, rsvd:4, traffic class:8 */
658 __be32 offset56;
659 u8 alt_hop_limit;
660 /* rsvd:2, packet rate:6 */
661 u8 offset61;
662 /* SL:4, subnet local:1, rsvd:3 */
663 u8 offset62;
664 /* local ACK timeout:5, rsvd:3 */
665 u8 offset63;
666
667 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
668 } __attribute__ ((packed));
669
670 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
671 {
672 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
673 }
674
675 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
676 {
677 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
678 (be32_to_cpu(lap_msg->offset12) &
679 0x000000FF));
680 }
681
682 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
683 {
684 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
685 }
686
687 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
688 u8 resp_timeout)
689 {
690 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
691 (be32_to_cpu(lap_msg->offset12) &
692 0xFFFFFF07));
693 }
694
695 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
696 {
697 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
698 }
699
700 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
701 __be32 flow_label)
702 {
703 lap_msg->offset56 = cpu_to_be32(
704 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
705 (be32_to_cpu(flow_label) << 12));
706 }
707
708 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
709 {
710 return (u8) be32_to_cpu(lap_msg->offset56);
711 }
712
713 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
714 u8 traffic_class)
715 {
716 lap_msg->offset56 = cpu_to_be32(traffic_class |
717 (be32_to_cpu(lap_msg->offset56) &
718 0xFFFFFF00));
719 }
720
721 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
722 {
723 return lap_msg->offset61 & 0x3F;
724 }
725
726 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
727 u8 packet_rate)
728 {
729 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
730 }
731
732 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
733 {
734 return lap_msg->offset62 >> 4;
735 }
736
737 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
738 {
739 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
740 }
741
742 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
743 {
744 return (lap_msg->offset62 >> 3) & 0x1;
745 }
746
747 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
748 u8 subnet_local)
749 {
750 lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
751 (lap_msg->offset61 & 0xF7);
752 }
753 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
754 {
755 return lap_msg->offset63 >> 3;
756 }
757
758 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
759 u8 local_ack_timeout)
760 {
761 lap_msg->offset63 = (local_ack_timeout << 3) |
762 (lap_msg->offset63 & 0x07);
763 }
764
765 struct cm_apr_msg {
766 struct ib_mad_hdr hdr;
767
768 __be32 local_comm_id;
769 __be32 remote_comm_id;
770
771 u8 info_length;
772 u8 ap_status;
773 u8 info[IB_CM_APR_INFO_LENGTH];
774
775 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
776 } __attribute__ ((packed));
777
778 struct cm_sidr_req_msg {
779 struct ib_mad_hdr hdr;
780
781 __be32 request_id;
782 __be16 pkey;
783 __be16 rsvd;
784 __be64 service_id;
785
786 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
787 } __attribute__ ((packed));
788
789 struct cm_sidr_rep_msg {
790 struct ib_mad_hdr hdr;
791
792 __be32 request_id;
793 u8 status;
794 u8 info_length;
795 __be16 rsvd;
796 /* QPN:24, rsvd:8 */
797 __be32 offset8;
798 __be64 service_id;
799 __be32 qkey;
800 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
801
802 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
803 } __attribute__ ((packed));
804
805 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
806 {
807 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
808 }
809
810 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
811 __be32 qpn)
812 {
813 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
814 (be32_to_cpu(sidr_rep_msg->offset8) &
815 0x000000FF));
816 }
817
818 #endif /* CM_MSGS_H */