Merge refs/heads/drm-latest from master.kernel.org:/pub/scm/linux/kernel/git/airlied...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / infiniband / core / cm_msgs.h
1 /*
2 * Copyright (c) 2004 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING the madirectory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use source and binary forms, with or
13 * withmodification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retathe above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
32 * SOFTWARE.
33 */
34 #if !defined(CM_MSGS_H)
35 #define CM_MSGS_H
36
37 #include <rdma/ib_mad.h>
38
39 /*
40 * Parameters to routines below should be in network-byte order, and values
41 * are returned in network-byte order.
42 */
43
44 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
45
46 #define CM_REQ_ATTR_ID __constant_htons(0x0010)
47 #define CM_MRA_ATTR_ID __constant_htons(0x0011)
48 #define CM_REJ_ATTR_ID __constant_htons(0x0012)
49 #define CM_REP_ATTR_ID __constant_htons(0x0013)
50 #define CM_RTU_ATTR_ID __constant_htons(0x0014)
51 #define CM_DREQ_ATTR_ID __constant_htons(0x0015)
52 #define CM_DREP_ATTR_ID __constant_htons(0x0016)
53 #define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017)
54 #define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018)
55 #define CM_LAP_ATTR_ID __constant_htons(0x0019)
56 #define CM_APR_ATTR_ID __constant_htons(0x001A)
57
58 enum cm_msg_sequence {
59 CM_MSG_SEQUENCE_REQ,
60 CM_MSG_SEQUENCE_LAP,
61 CM_MSG_SEQUENCE_DREQ,
62 CM_MSG_SEQUENCE_SIDR
63 };
64
65 struct cm_req_msg {
66 struct ib_mad_hdr hdr;
67
68 __be32 local_comm_id;
69 __be32 rsvd4;
70 __be64 service_id;
71 __be64 local_ca_guid;
72 __be32 rsvd24;
73 __be32 local_qkey;
74 /* local QPN:24, responder resources:8 */
75 __be32 offset32;
76 /* local EECN:24, initiator depth:8 */
77 __be32 offset36;
78 /*
79 * remote EECN:24, remote CM response timeout:5,
80 * transport service type:2, end-to-end flow control:1
81 */
82 __be32 offset40;
83 /* starting PSN:24, local CM response timeout:5, retry count:3 */
84 __be32 offset44;
85 __be16 pkey;
86 /* path MTU:4, RDC exists:1, RNR retry count:3. */
87 u8 offset50;
88 /* max CM Retries:4, SRQ:1, rsvd:3 */
89 u8 offset51;
90
91 __be16 primary_local_lid;
92 __be16 primary_remote_lid;
93 union ib_gid primary_local_gid;
94 union ib_gid primary_remote_gid;
95 /* flow label:20, rsvd:6, packet rate:6 */
96 __be32 primary_offset88;
97 u8 primary_traffic_class;
98 u8 primary_hop_limit;
99 /* SL:4, subnet local:1, rsvd:3 */
100 u8 primary_offset94;
101 /* local ACK timeout:5, rsvd:3 */
102 u8 primary_offset95;
103
104 __be16 alt_local_lid;
105 __be16 alt_remote_lid;
106 union ib_gid alt_local_gid;
107 union ib_gid alt_remote_gid;
108 /* flow label:20, rsvd:6, packet rate:6 */
109 __be32 alt_offset132;
110 u8 alt_traffic_class;
111 u8 alt_hop_limit;
112 /* SL:4, subnet local:1, rsvd:3 */
113 u8 alt_offset138;
114 /* local ACK timeout:5, rsvd:3 */
115 u8 alt_offset139;
116
117 u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
118
119 } __attribute__ ((packed));
120
121 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
122 {
123 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
124 }
125
126 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
127 {
128 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
129 (be32_to_cpu(req_msg->offset32) &
130 0x000000FF));
131 }
132
133 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
134 {
135 return (u8) be32_to_cpu(req_msg->offset32);
136 }
137
138 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
139 {
140 req_msg->offset32 = cpu_to_be32(resp_res |
141 (be32_to_cpu(req_msg->offset32) &
142 0xFFFFFF00));
143 }
144
145 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
146 {
147 return (u8) be32_to_cpu(req_msg->offset36);
148 }
149
150 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
151 u8 init_depth)
152 {
153 req_msg->offset36 = cpu_to_be32(init_depth |
154 (be32_to_cpu(req_msg->offset36) &
155 0xFFFFFF00));
156 }
157
158 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
159 {
160 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
161 }
162
163 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
164 u8 resp_timeout)
165 {
166 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
167 (be32_to_cpu(req_msg->offset40) &
168 0xFFFFFF07));
169 }
170
171 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
172 {
173 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
174 switch(transport_type) {
175 case 0: return IB_QPT_RC;
176 case 1: return IB_QPT_UC;
177 default: return 0;
178 }
179 }
180
181 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
182 enum ib_qp_type qp_type)
183 {
184 switch(qp_type) {
185 case IB_QPT_UC:
186 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
187 req_msg->offset40) &
188 0xFFFFFFF9) | 0x2);
189 default:
190 req_msg->offset40 = cpu_to_be32(be32_to_cpu(
191 req_msg->offset40) &
192 0xFFFFFFF9);
193 }
194 }
195
196 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
197 {
198 return be32_to_cpu(req_msg->offset40) & 0x1;
199 }
200
201 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
202 u8 flow_ctrl)
203 {
204 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
205 (be32_to_cpu(req_msg->offset40) &
206 0xFFFFFFFE));
207 }
208
209 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
210 {
211 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
212 }
213
214 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
215 __be32 starting_psn)
216 {
217 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
218 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
219 }
220
221 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
222 {
223 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
224 }
225
226 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
227 u8 resp_timeout)
228 {
229 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
230 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
231 }
232
233 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
234 {
235 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
236 }
237
238 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
239 u8 retry_count)
240 {
241 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
242 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
243 }
244
245 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
246 {
247 return req_msg->offset50 >> 4;
248 }
249
250 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
251 {
252 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
253 }
254
255 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
256 {
257 return req_msg->offset50 & 0x7;
258 }
259
260 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
261 u8 rnr_retry_count)
262 {
263 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
264 (rnr_retry_count & 0x7));
265 }
266
267 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
268 {
269 return req_msg->offset51 >> 4;
270 }
271
272 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
273 u8 retries)
274 {
275 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
276 }
277
278 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
279 {
280 return (req_msg->offset51 & 0x8) >> 3;
281 }
282
283 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
284 {
285 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
286 ((srq & 0x1) << 3));
287 }
288
289 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
290 {
291 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
292 }
293
294 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
295 __be32 flow_label)
296 {
297 req_msg->primary_offset88 = cpu_to_be32(
298 (be32_to_cpu(req_msg->primary_offset88) &
299 0x00000FFF) |
300 (be32_to_cpu(flow_label) << 12));
301 }
302
303 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
304 {
305 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
306 }
307
308 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
309 u8 rate)
310 {
311 req_msg->primary_offset88 = cpu_to_be32(
312 (be32_to_cpu(req_msg->primary_offset88) &
313 0xFFFFFFC0) | (rate & 0x3F));
314 }
315
316 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
317 {
318 return (u8) (req_msg->primary_offset94 >> 4);
319 }
320
321 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
322 {
323 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
324 (sl << 4));
325 }
326
327 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
328 {
329 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
330 }
331
332 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
333 u8 subnet_local)
334 {
335 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
336 ((subnet_local & 0x1) << 3));
337 }
338
339 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
340 {
341 return (u8) (req_msg->primary_offset95 >> 3);
342 }
343
344 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
345 u8 local_ack_timeout)
346 {
347 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
348 (local_ack_timeout << 3));
349 }
350
351 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
352 {
353 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
354 }
355
356 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
357 __be32 flow_label)
358 {
359 req_msg->alt_offset132 = cpu_to_be32(
360 (be32_to_cpu(req_msg->alt_offset132) &
361 0x00000FFF) |
362 (be32_to_cpu(flow_label) << 12));
363 }
364
365 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
366 {
367 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
368 }
369
370 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
371 u8 rate)
372 {
373 req_msg->alt_offset132 = cpu_to_be32(
374 (be32_to_cpu(req_msg->alt_offset132) &
375 0xFFFFFFC0) | (rate & 0x3F));
376 }
377
378 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
379 {
380 return (u8) (req_msg->alt_offset138 >> 4);
381 }
382
383 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
384 {
385 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
386 (sl << 4));
387 }
388
389 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
390 {
391 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
392 }
393
394 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
395 u8 subnet_local)
396 {
397 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
398 ((subnet_local & 0x1) << 3));
399 }
400
401 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
402 {
403 return (u8) (req_msg->alt_offset139 >> 3);
404 }
405
406 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
407 u8 local_ack_timeout)
408 {
409 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
410 (local_ack_timeout << 3));
411 }
412
413 /* Message REJected or MRAed */
414 enum cm_msg_response {
415 CM_MSG_RESPONSE_REQ = 0x0,
416 CM_MSG_RESPONSE_REP = 0x1,
417 CM_MSG_RESPONSE_OTHER = 0x2
418 };
419
420 struct cm_mra_msg {
421 struct ib_mad_hdr hdr;
422
423 __be32 local_comm_id;
424 __be32 remote_comm_id;
425 /* message MRAed:2, rsvd:6 */
426 u8 offset8;
427 /* service timeout:5, rsvd:3 */
428 u8 offset9;
429
430 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
431
432 } __attribute__ ((packed));
433
434 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
435 {
436 return (u8) (mra_msg->offset8 >> 6);
437 }
438
439 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
440 {
441 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
442 }
443
444 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
445 {
446 return (u8) (mra_msg->offset9 >> 3);
447 }
448
449 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
450 u8 service_timeout)
451 {
452 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
453 (service_timeout << 3));
454 }
455
456 struct cm_rej_msg {
457 struct ib_mad_hdr hdr;
458
459 __be32 local_comm_id;
460 __be32 remote_comm_id;
461 /* message REJected:2, rsvd:6 */
462 u8 offset8;
463 /* reject info length:7, rsvd:1. */
464 u8 offset9;
465 __be16 reason;
466 u8 ari[IB_CM_REJ_ARI_LENGTH];
467
468 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
469
470 } __attribute__ ((packed));
471
472 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
473 {
474 return (u8) (rej_msg->offset8 >> 6);
475 }
476
477 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
478 {
479 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
480 }
481
482 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
483 {
484 return (u8) (rej_msg->offset9 >> 1);
485 }
486
487 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
488 u8 len)
489 {
490 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
491 }
492
493 struct cm_rep_msg {
494 struct ib_mad_hdr hdr;
495
496 __be32 local_comm_id;
497 __be32 remote_comm_id;
498 __be32 local_qkey;
499 /* local QPN:24, rsvd:8 */
500 __be32 offset12;
501 /* local EECN:24, rsvd:8 */
502 __be32 offset16;
503 /* starting PSN:24 rsvd:8 */
504 __be32 offset20;
505 u8 resp_resources;
506 u8 initiator_depth;
507 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
508 u8 offset26;
509 /* RNR retry count:3, SRQ:1, rsvd:5 */
510 u8 offset27;
511 __be64 local_ca_guid;
512
513 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
514
515 } __attribute__ ((packed));
516
517 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
518 {
519 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
520 }
521
522 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
523 {
524 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
525 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
526 }
527
528 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
529 {
530 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
531 }
532
533 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
534 __be32 starting_psn)
535 {
536 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
537 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
538 }
539
540 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
541 {
542 return (u8) (rep_msg->offset26 >> 3);
543 }
544
545 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
546 u8 target_ack_delay)
547 {
548 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
549 (target_ack_delay << 3));
550 }
551
552 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
553 {
554 return (u8) ((rep_msg->offset26 & 0x06) >> 1);
555 }
556
557 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
558 {
559 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
560 ((failover & 0x3) << 1));
561 }
562
563 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
564 {
565 return (u8) (rep_msg->offset26 & 0x01);
566 }
567
568 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
569 u8 flow_ctrl)
570 {
571 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
572 (flow_ctrl & 0x1));
573 }
574
575 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
576 {
577 return (u8) (rep_msg->offset27 >> 5);
578 }
579
580 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
581 u8 rnr_retry_count)
582 {
583 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
584 (rnr_retry_count << 5));
585 }
586
587 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
588 {
589 return (u8) ((rep_msg->offset27 >> 4) & 0x1);
590 }
591
592 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
593 {
594 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
595 ((srq & 0x1) << 4));
596 }
597
598 struct cm_rtu_msg {
599 struct ib_mad_hdr hdr;
600
601 __be32 local_comm_id;
602 __be32 remote_comm_id;
603
604 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
605
606 } __attribute__ ((packed));
607
608 struct cm_dreq_msg {
609 struct ib_mad_hdr hdr;
610
611 __be32 local_comm_id;
612 __be32 remote_comm_id;
613 /* remote QPN/EECN:24, rsvd:8 */
614 __be32 offset8;
615
616 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
617
618 } __attribute__ ((packed));
619
620 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
621 {
622 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
623 }
624
625 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
626 {
627 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
628 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
629 }
630
631 struct cm_drep_msg {
632 struct ib_mad_hdr hdr;
633
634 __be32 local_comm_id;
635 __be32 remote_comm_id;
636
637 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
638
639 } __attribute__ ((packed));
640
641 struct cm_lap_msg {
642 struct ib_mad_hdr hdr;
643
644 __be32 local_comm_id;
645 __be32 remote_comm_id;
646
647 __be32 rsvd8;
648 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
649 __be32 offset12;
650 __be32 rsvd16;
651
652 __be16 alt_local_lid;
653 __be16 alt_remote_lid;
654 union ib_gid alt_local_gid;
655 union ib_gid alt_remote_gid;
656 /* flow label:20, rsvd:4, traffic class:8 */
657 __be32 offset56;
658 u8 alt_hop_limit;
659 /* rsvd:2, packet rate:6 */
660 u8 offset61;
661 /* SL:4, subnet local:1, rsvd:3 */
662 u8 offset62;
663 /* local ACK timeout:5, rsvd:3 */
664 u8 offset63;
665
666 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
667 } __attribute__ ((packed));
668
669 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
670 {
671 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
672 }
673
674 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
675 {
676 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
677 (be32_to_cpu(lap_msg->offset12) &
678 0x000000FF));
679 }
680
681 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
682 {
683 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
684 }
685
686 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
687 u8 resp_timeout)
688 {
689 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
690 (be32_to_cpu(lap_msg->offset12) &
691 0xFFFFFF07));
692 }
693
694 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
695 {
696 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
697 }
698
699 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
700 __be32 flow_label)
701 {
702 lap_msg->offset56 = cpu_to_be32(
703 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
704 (be32_to_cpu(flow_label) << 12));
705 }
706
707 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
708 {
709 return (u8) be32_to_cpu(lap_msg->offset56);
710 }
711
712 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
713 u8 traffic_class)
714 {
715 lap_msg->offset56 = cpu_to_be32(traffic_class |
716 (be32_to_cpu(lap_msg->offset56) &
717 0xFFFFFF00));
718 }
719
720 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
721 {
722 return lap_msg->offset61 & 0x3F;
723 }
724
725 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
726 u8 packet_rate)
727 {
728 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
729 }
730
731 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
732 {
733 return lap_msg->offset62 >> 4;
734 }
735
736 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
737 {
738 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
739 }
740
741 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
742 {
743 return (lap_msg->offset62 >> 3) & 0x1;
744 }
745
746 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
747 u8 subnet_local)
748 {
749 lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
750 (lap_msg->offset61 & 0xF7);
751 }
752 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
753 {
754 return lap_msg->offset63 >> 3;
755 }
756
757 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
758 u8 local_ack_timeout)
759 {
760 lap_msg->offset63 = (local_ack_timeout << 3) |
761 (lap_msg->offset63 & 0x07);
762 }
763
764 struct cm_apr_msg {
765 struct ib_mad_hdr hdr;
766
767 __be32 local_comm_id;
768 __be32 remote_comm_id;
769
770 u8 info_length;
771 u8 ap_status;
772 u8 info[IB_CM_APR_INFO_LENGTH];
773
774 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
775 } __attribute__ ((packed));
776
777 struct cm_sidr_req_msg {
778 struct ib_mad_hdr hdr;
779
780 __be32 request_id;
781 __be16 pkey;
782 __be16 rsvd;
783 __be64 service_id;
784
785 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
786 } __attribute__ ((packed));
787
788 struct cm_sidr_rep_msg {
789 struct ib_mad_hdr hdr;
790
791 __be32 request_id;
792 u8 status;
793 u8 info_length;
794 __be16 rsvd;
795 /* QPN:24, rsvd:8 */
796 __be32 offset8;
797 __be64 service_id;
798 __be32 qkey;
799 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
800
801 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
802 } __attribute__ ((packed));
803
804 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
805 {
806 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
807 }
808
809 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
810 __be32 qpn)
811 {
812 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
813 (be32_to_cpu(sidr_rep_msg->offset8) &
814 0x000000FF));
815 }
816
817 #endif /* CM_MSGS_H */