2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/log2.h>
35 #include <linux/slab.h>
36 #include <linux/netdevice.h>
38 #include <rdma/ib_cache.h>
39 #include <rdma/ib_pack.h>
40 #include <rdma/ib_addr.h>
41 #include <rdma/ib_mad.h>
43 #include <linux/mlx4/qp.h>
49 MLX4_IB_ACK_REQ_FREQ
= 8,
53 MLX4_IB_DEFAULT_SCHED_QUEUE
= 0x83,
54 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
55 MLX4_IB_LINK_TYPE_IB
= 0,
56 MLX4_IB_LINK_TYPE_ETH
= 1
61 * Largest possible UD header: send with GRH and immediate
62 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
63 * tag. (LRH would only use 8 bytes, so Ethernet is the
66 MLX4_IB_UD_HEADER_SIZE
= 82,
67 MLX4_IB_LSO_HEADER_SPARE
= 128,
71 MLX4_IB_IBOE_ETHERTYPE
= 0x8915
79 struct ib_ud_header ud_header
;
80 u8 header_buf
[MLX4_IB_UD_HEADER_SIZE
];
84 MLX4_IB_MIN_SQ_STRIDE
= 6,
85 MLX4_IB_CACHE_LINE_SIZE
= 64,
90 MLX4_RAW_QP_MSGMAX
= 31,
93 static const __be32 mlx4_ib_opcode
[] = {
94 [IB_WR_SEND
] = cpu_to_be32(MLX4_OPCODE_SEND
),
95 [IB_WR_LSO
] = cpu_to_be32(MLX4_OPCODE_LSO
),
96 [IB_WR_SEND_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_SEND_IMM
),
97 [IB_WR_RDMA_WRITE
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE
),
98 [IB_WR_RDMA_WRITE_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM
),
99 [IB_WR_RDMA_READ
] = cpu_to_be32(MLX4_OPCODE_RDMA_READ
),
100 [IB_WR_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS
),
101 [IB_WR_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA
),
102 [IB_WR_SEND_WITH_INV
] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL
),
103 [IB_WR_LOCAL_INV
] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL
),
104 [IB_WR_FAST_REG_MR
] = cpu_to_be32(MLX4_OPCODE_FMR
),
105 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS
),
106 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA
),
107 [IB_WR_BIND_MW
] = cpu_to_be32(MLX4_OPCODE_BIND_MW
),
110 static struct mlx4_ib_sqp
*to_msqp(struct mlx4_ib_qp
*mqp
)
112 return container_of(mqp
, struct mlx4_ib_sqp
, qp
);
115 static int is_tunnel_qp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
117 if (!mlx4_is_master(dev
->dev
))
120 return qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_tunnel_sqpn
&&
121 qp
->mqp
.qpn
< dev
->dev
->phys_caps
.base_tunnel_sqpn
+
125 static int is_sqp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
130 /* PPF or Native -- real SQP */
131 real_sqp
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
132 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
133 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 3);
136 /* VF or PF -- proxy SQP */
137 if (mlx4_is_mfunc(dev
->dev
)) {
138 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
139 if (qp
->mqp
.qpn
== dev
->dev
->caps
.qp0_proxy
[i
] ||
140 qp
->mqp
.qpn
== dev
->dev
->caps
.qp1_proxy
[i
]) {
149 /* used for INIT/CLOSE port logic */
150 static int is_qp0(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
155 /* PPF or Native -- real QP0 */
156 real_qp0
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
157 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
158 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 1);
161 /* VF or PF -- proxy QP0 */
162 if (mlx4_is_mfunc(dev
->dev
)) {
163 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
164 if (qp
->mqp
.qpn
== dev
->dev
->caps
.qp0_proxy
[i
]) {
173 static void *get_wqe(struct mlx4_ib_qp
*qp
, int offset
)
175 return mlx4_buf_offset(&qp
->buf
, offset
);
178 static void *get_recv_wqe(struct mlx4_ib_qp
*qp
, int n
)
180 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
183 static void *get_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
185 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< qp
->sq
.wqe_shift
));
189 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
190 * first four bytes of every 64 byte chunk with
191 * 0x7FFFFFF | (invalid_ownership_value << 31).
193 * When the max work request size is less than or equal to the WQE
194 * basic block size, as an optimization, we can stamp all WQEs with
195 * 0xffffffff, and skip the very first chunk of each WQE.
197 static void stamp_send_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
205 struct mlx4_wqe_ctrl_seg
*ctrl
;
207 if (qp
->sq_max_wqes_per_wr
> 1) {
208 s
= roundup(size
, 1U << qp
->sq
.wqe_shift
);
209 for (i
= 0; i
< s
; i
+= 64) {
210 ind
= (i
>> qp
->sq
.wqe_shift
) + n
;
211 stamp
= ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(0x7fffffff) :
212 cpu_to_be32(0xffffffff);
213 buf
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
214 wqe
= buf
+ (i
& ((1 << qp
->sq
.wqe_shift
) - 1));
218 ctrl
= buf
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
219 s
= (ctrl
->fence_size
& 0x3f) << 4;
220 for (i
= 64; i
< s
; i
+= 64) {
222 *wqe
= cpu_to_be32(0xffffffff);
227 static void post_nop_wqe(struct mlx4_ib_qp
*qp
, int n
, int size
)
229 struct mlx4_wqe_ctrl_seg
*ctrl
;
230 struct mlx4_wqe_inline_seg
*inl
;
234 ctrl
= wqe
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
235 s
= sizeof(struct mlx4_wqe_ctrl_seg
);
237 if (qp
->ibqp
.qp_type
== IB_QPT_UD
) {
238 struct mlx4_wqe_datagram_seg
*dgram
= wqe
+ sizeof *ctrl
;
239 struct mlx4_av
*av
= (struct mlx4_av
*)dgram
->av
;
240 memset(dgram
, 0, sizeof *dgram
);
241 av
->port_pd
= cpu_to_be32((qp
->port
<< 24) | to_mpd(qp
->ibqp
.pd
)->pdn
);
242 s
+= sizeof(struct mlx4_wqe_datagram_seg
);
245 /* Pad the remainder of the WQE with an inline data segment. */
248 inl
->byte_count
= cpu_to_be32(1 << 31 | (size
- s
- sizeof *inl
));
250 ctrl
->srcrb_flags
= 0;
251 ctrl
->fence_size
= size
/ 16;
253 * Make sure descriptor is fully written before setting ownership bit
254 * (because HW can start executing as soon as we do).
258 ctrl
->owner_opcode
= cpu_to_be32(MLX4_OPCODE_NOP
| MLX4_WQE_CTRL_NEC
) |
259 (n
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0);
261 stamp_send_wqe(qp
, n
+ qp
->sq_spare_wqes
, size
);
264 /* Post NOP WQE to prevent wrap-around in the middle of WR */
265 static inline unsigned pad_wraparound(struct mlx4_ib_qp
*qp
, int ind
)
267 unsigned s
= qp
->sq
.wqe_cnt
- (ind
& (qp
->sq
.wqe_cnt
- 1));
268 if (unlikely(s
< qp
->sq_max_wqes_per_wr
)) {
269 post_nop_wqe(qp
, ind
, s
<< qp
->sq
.wqe_shift
);
275 static void mlx4_ib_qp_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
277 struct ib_event event
;
278 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
280 if (type
== MLX4_EVENT_TYPE_PATH_MIG
)
281 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
283 if (ibqp
->event_handler
) {
284 event
.device
= ibqp
->device
;
285 event
.element
.qp
= ibqp
;
287 case MLX4_EVENT_TYPE_PATH_MIG
:
288 event
.event
= IB_EVENT_PATH_MIG
;
290 case MLX4_EVENT_TYPE_COMM_EST
:
291 event
.event
= IB_EVENT_COMM_EST
;
293 case MLX4_EVENT_TYPE_SQ_DRAINED
:
294 event
.event
= IB_EVENT_SQ_DRAINED
;
296 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
297 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
299 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
300 event
.event
= IB_EVENT_QP_FATAL
;
302 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
303 event
.event
= IB_EVENT_PATH_MIG_ERR
;
305 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
306 event
.event
= IB_EVENT_QP_REQ_ERR
;
308 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
309 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
312 pr_warn("Unexpected event type %d "
313 "on QP %06x\n", type
, qp
->qpn
);
317 ibqp
->event_handler(&event
, ibqp
->qp_context
);
321 static int send_wqe_overhead(enum mlx4_ib_qp_type type
, u32 flags
)
324 * UD WQEs must have a datagram segment.
325 * RC and UC WQEs might have a remote address segment.
326 * MLX WQEs need two extra inline data segments (for the UD
327 * header and space for the ICRC).
331 return sizeof (struct mlx4_wqe_ctrl_seg
) +
332 sizeof (struct mlx4_wqe_datagram_seg
) +
333 ((flags
& MLX4_IB_QP_LSO
) ? MLX4_IB_LSO_HEADER_SPARE
: 0);
334 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
335 case MLX4_IB_QPT_PROXY_SMI
:
336 case MLX4_IB_QPT_PROXY_GSI
:
337 return sizeof (struct mlx4_wqe_ctrl_seg
) +
338 sizeof (struct mlx4_wqe_datagram_seg
) + 64;
339 case MLX4_IB_QPT_TUN_SMI_OWNER
:
340 case MLX4_IB_QPT_TUN_GSI
:
341 return sizeof (struct mlx4_wqe_ctrl_seg
) +
342 sizeof (struct mlx4_wqe_datagram_seg
);
345 return sizeof (struct mlx4_wqe_ctrl_seg
) +
346 sizeof (struct mlx4_wqe_raddr_seg
);
348 return sizeof (struct mlx4_wqe_ctrl_seg
) +
349 sizeof (struct mlx4_wqe_atomic_seg
) +
350 sizeof (struct mlx4_wqe_raddr_seg
);
351 case MLX4_IB_QPT_SMI
:
352 case MLX4_IB_QPT_GSI
:
353 return sizeof (struct mlx4_wqe_ctrl_seg
) +
354 ALIGN(MLX4_IB_UD_HEADER_SIZE
+
355 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE
,
357 sizeof (struct mlx4_wqe_inline_seg
),
358 sizeof (struct mlx4_wqe_data_seg
)) +
360 sizeof (struct mlx4_wqe_inline_seg
),
361 sizeof (struct mlx4_wqe_data_seg
));
363 return sizeof (struct mlx4_wqe_ctrl_seg
);
367 static int set_rq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
368 int is_user
, int has_rq
, struct mlx4_ib_qp
*qp
)
370 /* Sanity check RQ size before proceeding */
371 if (cap
->max_recv_wr
> dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
||
372 cap
->max_recv_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
))
376 if (cap
->max_recv_wr
)
379 qp
->rq
.wqe_cnt
= qp
->rq
.max_gs
= 0;
381 /* HW requires >= 1 RQ entry with >= 1 gather entry */
382 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
))
385 qp
->rq
.wqe_cnt
= roundup_pow_of_two(max(1U, cap
->max_recv_wr
));
386 qp
->rq
.max_gs
= roundup_pow_of_two(max(1U, cap
->max_recv_sge
));
387 qp
->rq
.wqe_shift
= ilog2(qp
->rq
.max_gs
* sizeof (struct mlx4_wqe_data_seg
));
390 /* leave userspace return values as they were, so as not to break ABI */
392 cap
->max_recv_wr
= qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
393 cap
->max_recv_sge
= qp
->rq
.max_gs
;
395 cap
->max_recv_wr
= qp
->rq
.max_post
=
396 min(dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
, qp
->rq
.wqe_cnt
);
397 cap
->max_recv_sge
= min(qp
->rq
.max_gs
,
398 min(dev
->dev
->caps
.max_sq_sg
,
399 dev
->dev
->caps
.max_rq_sg
));
405 static int set_kernel_sq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
406 enum mlx4_ib_qp_type type
, struct mlx4_ib_qp
*qp
)
410 /* Sanity check SQ size before proceeding */
411 if (cap
->max_send_wr
> (dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
) ||
412 cap
->max_send_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
) ||
413 cap
->max_inline_data
+ send_wqe_overhead(type
, qp
->flags
) +
414 sizeof (struct mlx4_wqe_inline_seg
) > dev
->dev
->caps
.max_sq_desc_sz
)
418 * For MLX transport we need 2 extra S/G entries:
419 * one for the header and one for the checksum at the end
421 if ((type
== MLX4_IB_QPT_SMI
|| type
== MLX4_IB_QPT_GSI
||
422 type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) &&
423 cap
->max_send_sge
+ 2 > dev
->dev
->caps
.max_sq_sg
)
426 s
= max(cap
->max_send_sge
* sizeof (struct mlx4_wqe_data_seg
),
427 cap
->max_inline_data
+ sizeof (struct mlx4_wqe_inline_seg
)) +
428 send_wqe_overhead(type
, qp
->flags
);
430 if (s
> dev
->dev
->caps
.max_sq_desc_sz
)
434 * Hermon supports shrinking WQEs, such that a single work
435 * request can include multiple units of 1 << wqe_shift. This
436 * way, work requests can differ in size, and do not have to
437 * be a power of 2 in size, saving memory and speeding up send
438 * WR posting. Unfortunately, if we do this then the
439 * wqe_index field in CQEs can't be used to look up the WR ID
440 * anymore, so we do this only if selective signaling is off.
442 * Further, on 32-bit platforms, we can't use vmap() to make
443 * the QP buffer virtually contiguous. Thus we have to use
444 * constant-sized WRs to make sure a WR is always fully within
445 * a single page-sized chunk.
447 * Finally, we use NOP work requests to pad the end of the
448 * work queue, to avoid wrap-around in the middle of WR. We
449 * set NEC bit to avoid getting completions with error for
450 * these NOP WRs, but since NEC is only supported starting
451 * with firmware 2.2.232, we use constant-sized WRs for older
454 * And, since MLX QPs only support SEND, we use constant-sized
457 * We look for the smallest value of wqe_shift such that the
458 * resulting number of wqes does not exceed device
461 * We set WQE size to at least 64 bytes, this way stamping
462 * invalidates each WQE.
464 if (dev
->dev
->caps
.fw_ver
>= MLX4_FW_VER_WQE_CTRL_NEC
&&
465 qp
->sq_signal_bits
&& BITS_PER_LONG
== 64 &&
466 type
!= MLX4_IB_QPT_SMI
&& type
!= MLX4_IB_QPT_GSI
&&
467 !(type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_PROXY_SMI
|
468 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
)))
469 qp
->sq
.wqe_shift
= ilog2(64);
471 qp
->sq
.wqe_shift
= ilog2(roundup_pow_of_two(s
));
474 qp
->sq_max_wqes_per_wr
= DIV_ROUND_UP(s
, 1U << qp
->sq
.wqe_shift
);
477 * We need to leave 2 KB + 1 WR of headroom in the SQ to
478 * allow HW to prefetch.
480 qp
->sq_spare_wqes
= (2048 >> qp
->sq
.wqe_shift
) + qp
->sq_max_wqes_per_wr
;
481 qp
->sq
.wqe_cnt
= roundup_pow_of_two(cap
->max_send_wr
*
482 qp
->sq_max_wqes_per_wr
+
485 if (qp
->sq
.wqe_cnt
<= dev
->dev
->caps
.max_wqes
)
488 if (qp
->sq_max_wqes_per_wr
<= 1)
494 qp
->sq
.max_gs
= (min(dev
->dev
->caps
.max_sq_desc_sz
,
495 (qp
->sq_max_wqes_per_wr
<< qp
->sq
.wqe_shift
)) -
496 send_wqe_overhead(type
, qp
->flags
)) /
497 sizeof (struct mlx4_wqe_data_seg
);
499 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
500 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
501 if (qp
->rq
.wqe_shift
> qp
->sq
.wqe_shift
) {
503 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
505 qp
->rq
.offset
= qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
;
509 cap
->max_send_wr
= qp
->sq
.max_post
=
510 (qp
->sq
.wqe_cnt
- qp
->sq_spare_wqes
) / qp
->sq_max_wqes_per_wr
;
511 cap
->max_send_sge
= min(qp
->sq
.max_gs
,
512 min(dev
->dev
->caps
.max_sq_sg
,
513 dev
->dev
->caps
.max_rq_sg
));
514 /* We don't support inline sends for kernel QPs (yet) */
515 cap
->max_inline_data
= 0;
520 static int set_user_sq_size(struct mlx4_ib_dev
*dev
,
521 struct mlx4_ib_qp
*qp
,
522 struct mlx4_ib_create_qp
*ucmd
)
524 /* Sanity check SQ size before proceeding */
525 if ((1 << ucmd
->log_sq_bb_count
) > dev
->dev
->caps
.max_wqes
||
526 ucmd
->log_sq_stride
>
527 ilog2(roundup_pow_of_two(dev
->dev
->caps
.max_sq_desc_sz
)) ||
528 ucmd
->log_sq_stride
< MLX4_IB_MIN_SQ_STRIDE
)
531 qp
->sq
.wqe_cnt
= 1 << ucmd
->log_sq_bb_count
;
532 qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
534 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
535 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
540 static int alloc_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
545 kmalloc(sizeof (struct mlx4_ib_buf
) * qp
->rq
.wqe_cnt
,
547 if (!qp
->sqp_proxy_rcv
)
549 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
550 qp
->sqp_proxy_rcv
[i
].addr
=
551 kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr
),
553 if (!qp
->sqp_proxy_rcv
[i
].addr
)
555 qp
->sqp_proxy_rcv
[i
].map
=
556 ib_dma_map_single(dev
, qp
->sqp_proxy_rcv
[i
].addr
,
557 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
565 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
566 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
568 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
570 kfree(qp
->sqp_proxy_rcv
);
571 qp
->sqp_proxy_rcv
= NULL
;
575 static void free_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
579 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
580 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
581 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
583 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
585 kfree(qp
->sqp_proxy_rcv
);
588 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
590 if (attr
->qp_type
== IB_QPT_XRC_INI
|| attr
->qp_type
== IB_QPT_XRC_TGT
)
596 static int create_qp_common(struct mlx4_ib_dev
*dev
, struct ib_pd
*pd
,
597 struct ib_qp_init_attr
*init_attr
,
598 struct ib_udata
*udata
, int sqpn
, struct mlx4_ib_qp
**caller_qp
)
602 struct mlx4_ib_sqp
*sqp
;
603 struct mlx4_ib_qp
*qp
;
604 enum mlx4_ib_qp_type qp_type
= (enum mlx4_ib_qp_type
) init_attr
->qp_type
;
606 /* When tunneling special qps, we use a plain UD qp */
608 if (mlx4_is_mfunc(dev
->dev
) &&
609 (!mlx4_is_master(dev
->dev
) ||
610 !(init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
))) {
611 if (init_attr
->qp_type
== IB_QPT_GSI
)
612 qp_type
= MLX4_IB_QPT_PROXY_GSI
;
613 else if (mlx4_is_master(dev
->dev
))
614 qp_type
= MLX4_IB_QPT_PROXY_SMI_OWNER
;
616 qp_type
= MLX4_IB_QPT_PROXY_SMI
;
619 /* add extra sg entry for tunneling */
620 init_attr
->cap
.max_recv_sge
++;
621 } else if (init_attr
->create_flags
& MLX4_IB_SRIOV_TUNNEL_QP
) {
622 struct mlx4_ib_qp_tunnel_init_attr
*tnl_init
=
623 container_of(init_attr
,
624 struct mlx4_ib_qp_tunnel_init_attr
, init_attr
);
625 if ((tnl_init
->proxy_qp_type
!= IB_QPT_SMI
&&
626 tnl_init
->proxy_qp_type
!= IB_QPT_GSI
) ||
627 !mlx4_is_master(dev
->dev
))
629 if (tnl_init
->proxy_qp_type
== IB_QPT_GSI
)
630 qp_type
= MLX4_IB_QPT_TUN_GSI
;
631 else if (tnl_init
->slave
== mlx4_master_func_num(dev
->dev
))
632 qp_type
= MLX4_IB_QPT_TUN_SMI_OWNER
;
634 qp_type
= MLX4_IB_QPT_TUN_SMI
;
635 /* we are definitely in the PPF here, since we are creating
636 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
637 qpn
= dev
->dev
->phys_caps
.base_tunnel_sqpn
+ 8 * tnl_init
->slave
638 + tnl_init
->proxy_qp_type
* 2 + tnl_init
->port
- 1;
643 if (qp_type
== MLX4_IB_QPT_SMI
|| qp_type
== MLX4_IB_QPT_GSI
||
644 (qp_type
& (MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_SMI_OWNER
|
645 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
646 sqp
= kzalloc(sizeof (struct mlx4_ib_sqp
), GFP_KERNEL
);
651 qp
= kzalloc(sizeof (struct mlx4_ib_qp
), GFP_KERNEL
);
658 qp
->mlx4_ib_qp_type
= qp_type
;
660 mutex_init(&qp
->mutex
);
661 spin_lock_init(&qp
->sq
.lock
);
662 spin_lock_init(&qp
->rq
.lock
);
663 INIT_LIST_HEAD(&qp
->gid_list
);
664 INIT_LIST_HEAD(&qp
->steering_rules
);
666 qp
->state
= IB_QPS_RESET
;
667 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
668 qp
->sq_signal_bits
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
670 err
= set_rq_size(dev
, &init_attr
->cap
, !!pd
->uobject
, qp_has_rq(init_attr
), qp
);
675 struct mlx4_ib_create_qp ucmd
;
677 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
)) {
682 qp
->sq_no_prefetch
= ucmd
.sq_no_prefetch
;
684 err
= set_user_sq_size(dev
, qp
, &ucmd
);
688 qp
->umem
= ib_umem_get(pd
->uobject
->context
, ucmd
.buf_addr
,
690 if (IS_ERR(qp
->umem
)) {
691 err
= PTR_ERR(qp
->umem
);
695 err
= mlx4_mtt_init(dev
->dev
, ib_umem_page_count(qp
->umem
),
696 ilog2(qp
->umem
->page_size
), &qp
->mtt
);
700 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
704 if (qp_has_rq(init_attr
)) {
705 err
= mlx4_ib_db_map_user(to_mucontext(pd
->uobject
->context
),
706 ucmd
.db_addr
, &qp
->db
);
711 qp
->sq_no_prefetch
= 0;
713 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
714 qp
->flags
|= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
716 if (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
717 qp
->flags
|= MLX4_IB_QP_LSO
;
719 err
= set_kernel_sq_size(dev
, &init_attr
->cap
, qp_type
, qp
);
723 if (qp_has_rq(init_attr
)) {
724 err
= mlx4_db_alloc(dev
->dev
, &qp
->db
, 0);
731 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
, PAGE_SIZE
* 2, &qp
->buf
)) {
736 err
= mlx4_mtt_init(dev
->dev
, qp
->buf
.npages
, qp
->buf
.page_shift
,
741 err
= mlx4_buf_write_mtt(dev
->dev
, &qp
->mtt
, &qp
->buf
);
745 qp
->sq
.wrid
= kmalloc(qp
->sq
.wqe_cnt
* sizeof (u64
), GFP_KERNEL
);
746 qp
->rq
.wrid
= kmalloc(qp
->rq
.wqe_cnt
* sizeof (u64
), GFP_KERNEL
);
748 if (!qp
->sq
.wrid
|| !qp
->rq
.wrid
) {
755 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
756 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
757 if (alloc_proxy_bufs(pd
->device
, qp
)) {
763 /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
764 * BlueFlame setup flow wrongly causes VLAN insertion. */
765 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
)
766 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1 << 8, &qpn
);
768 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1, &qpn
);
773 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
);
777 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
)
778 qp
->mqp
.qpn
|= (1 << 23);
781 * Hardware wants QPN written in big-endian order (after
782 * shifting) for send doorbell. Precompute this value to save
783 * a little bit when posting sends.
785 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
787 qp
->mqp
.event
= mlx4_ib_qp_event
;
794 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
796 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
797 free_proxy_bufs(pd
->device
, qp
);
800 if (qp_has_rq(init_attr
))
801 mlx4_ib_db_unmap_user(to_mucontext(pd
->uobject
->context
), &qp
->db
);
808 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
812 ib_umem_release(qp
->umem
);
814 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
817 if (!pd
->uobject
&& qp_has_rq(init_attr
))
818 mlx4_db_free(dev
->dev
, &qp
->db
);
826 static enum mlx4_qp_state
to_mlx4_state(enum ib_qp_state state
)
829 case IB_QPS_RESET
: return MLX4_QP_STATE_RST
;
830 case IB_QPS_INIT
: return MLX4_QP_STATE_INIT
;
831 case IB_QPS_RTR
: return MLX4_QP_STATE_RTR
;
832 case IB_QPS_RTS
: return MLX4_QP_STATE_RTS
;
833 case IB_QPS_SQD
: return MLX4_QP_STATE_SQD
;
834 case IB_QPS_SQE
: return MLX4_QP_STATE_SQER
;
835 case IB_QPS_ERR
: return MLX4_QP_STATE_ERR
;
840 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
841 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
843 if (send_cq
== recv_cq
) {
844 spin_lock_irq(&send_cq
->lock
);
845 __acquire(&recv_cq
->lock
);
846 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
847 spin_lock_irq(&send_cq
->lock
);
848 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
850 spin_lock_irq(&recv_cq
->lock
);
851 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
855 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
856 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
858 if (send_cq
== recv_cq
) {
859 __release(&recv_cq
->lock
);
860 spin_unlock_irq(&send_cq
->lock
);
861 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
862 spin_unlock(&recv_cq
->lock
);
863 spin_unlock_irq(&send_cq
->lock
);
865 spin_unlock(&send_cq
->lock
);
866 spin_unlock_irq(&recv_cq
->lock
);
870 static void del_gid_entries(struct mlx4_ib_qp
*qp
)
872 struct mlx4_ib_gid_entry
*ge
, *tmp
;
874 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
880 static struct mlx4_ib_pd
*get_pd(struct mlx4_ib_qp
*qp
)
882 if (qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
883 return to_mpd(to_mxrcd(qp
->ibqp
.xrcd
)->pd
);
885 return to_mpd(qp
->ibqp
.pd
);
888 static void get_cqs(struct mlx4_ib_qp
*qp
,
889 struct mlx4_ib_cq
**send_cq
, struct mlx4_ib_cq
**recv_cq
)
891 switch (qp
->ibqp
.qp_type
) {
893 *send_cq
= to_mcq(to_mxrcd(qp
->ibqp
.xrcd
)->cq
);
897 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
901 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
902 *recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
907 static void destroy_qp_common(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
,
910 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
912 if (qp
->state
!= IB_QPS_RESET
)
913 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
914 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
915 pr_warn("modify QP %06x to RESET failed.\n",
918 get_cqs(qp
, &send_cq
, &recv_cq
);
920 mlx4_ib_lock_cqs(send_cq
, recv_cq
);
923 __mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
924 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
): NULL
);
925 if (send_cq
!= recv_cq
)
926 __mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
929 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
931 mlx4_ib_unlock_cqs(send_cq
, recv_cq
);
933 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
935 if (!is_sqp(dev
, qp
) && !is_tunnel_qp(dev
, qp
))
936 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
938 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
942 mlx4_ib_db_unmap_user(to_mucontext(qp
->ibqp
.uobject
->context
),
944 ib_umem_release(qp
->umem
);
948 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
949 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
))
950 free_proxy_bufs(&dev
->ib_dev
, qp
);
951 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
953 mlx4_db_free(dev
->dev
, &qp
->db
);
959 static u32
get_sqp_num(struct mlx4_ib_dev
*dev
, struct ib_qp_init_attr
*attr
)
962 if (!mlx4_is_mfunc(dev
->dev
) ||
963 (mlx4_is_master(dev
->dev
) &&
964 attr
->create_flags
& MLX4_IB_SRIOV_SQP
)) {
965 return dev
->dev
->phys_caps
.base_sqpn
+
966 (attr
->qp_type
== IB_QPT_SMI
? 0 : 2) +
969 /* PF or VF -- creating proxies */
970 if (attr
->qp_type
== IB_QPT_SMI
)
971 return dev
->dev
->caps
.qp0_proxy
[attr
->port_num
- 1];
973 return dev
->dev
->caps
.qp1_proxy
[attr
->port_num
- 1];
976 struct ib_qp
*mlx4_ib_create_qp(struct ib_pd
*pd
,
977 struct ib_qp_init_attr
*init_attr
,
978 struct ib_udata
*udata
)
980 struct mlx4_ib_qp
*qp
= NULL
;
985 * We only support LSO, vendor flag1, and multicast loopback blocking,
986 * and only for kernel UD QPs.
988 if (init_attr
->create_flags
& ~(MLX4_IB_QP_LSO
|
989 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
|
990 MLX4_IB_SRIOV_TUNNEL_QP
| MLX4_IB_SRIOV_SQP
))
991 return ERR_PTR(-EINVAL
);
993 if (init_attr
->create_flags
&&
995 ((init_attr
->create_flags
& ~MLX4_IB_SRIOV_SQP
) &&
996 init_attr
->qp_type
!= IB_QPT_UD
) ||
997 ((init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
) &&
998 init_attr
->qp_type
> IB_QPT_GSI
)))
999 return ERR_PTR(-EINVAL
);
1001 switch (init_attr
->qp_type
) {
1002 case IB_QPT_XRC_TGT
:
1003 pd
= to_mxrcd(init_attr
->xrcd
)->pd
;
1004 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1005 init_attr
->send_cq
= to_mxrcd(init_attr
->xrcd
)->cq
;
1007 case IB_QPT_XRC_INI
:
1008 if (!(to_mdev(pd
->device
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
1009 return ERR_PTR(-ENOSYS
);
1010 init_attr
->recv_cq
= init_attr
->send_cq
;
1014 case IB_QPT_RAW_PACKET
:
1015 qp
= kzalloc(sizeof *qp
, GFP_KERNEL
);
1017 return ERR_PTR(-ENOMEM
);
1021 err
= create_qp_common(to_mdev(pd
->device
), pd
, init_attr
,
1024 return ERR_PTR(err
);
1026 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1034 /* Userspace is not allowed to create special QPs: */
1036 return ERR_PTR(-EINVAL
);
1038 err
= create_qp_common(to_mdev(pd
->device
), pd
, init_attr
, udata
,
1039 get_sqp_num(to_mdev(pd
->device
), init_attr
),
1042 return ERR_PTR(err
);
1044 qp
->port
= init_attr
->port_num
;
1045 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 : 1;
1050 /* Don't support raw QPs */
1051 return ERR_PTR(-EINVAL
);
1057 int mlx4_ib_destroy_qp(struct ib_qp
*qp
)
1059 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
1060 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1061 struct mlx4_ib_pd
*pd
;
1063 if (is_qp0(dev
, mqp
))
1064 mlx4_CLOSE_PORT(dev
->dev
, mqp
->port
);
1067 destroy_qp_common(dev
, mqp
, !!pd
->ibpd
.uobject
);
1069 if (is_sqp(dev
, mqp
))
1070 kfree(to_msqp(mqp
));
1077 static int to_mlx4_st(struct mlx4_ib_dev
*dev
, enum mlx4_ib_qp_type type
)
1080 case MLX4_IB_QPT_RC
: return MLX4_QP_ST_RC
;
1081 case MLX4_IB_QPT_UC
: return MLX4_QP_ST_UC
;
1082 case MLX4_IB_QPT_UD
: return MLX4_QP_ST_UD
;
1083 case MLX4_IB_QPT_XRC_INI
:
1084 case MLX4_IB_QPT_XRC_TGT
: return MLX4_QP_ST_XRC
;
1085 case MLX4_IB_QPT_SMI
:
1086 case MLX4_IB_QPT_GSI
:
1087 case MLX4_IB_QPT_RAW_PACKET
: return MLX4_QP_ST_MLX
;
1089 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
1090 case MLX4_IB_QPT_TUN_SMI_OWNER
: return (mlx4_is_mfunc(dev
->dev
) ?
1091 MLX4_QP_ST_MLX
: -1);
1092 case MLX4_IB_QPT_PROXY_SMI
:
1093 case MLX4_IB_QPT_TUN_SMI
:
1094 case MLX4_IB_QPT_PROXY_GSI
:
1095 case MLX4_IB_QPT_TUN_GSI
: return (mlx4_is_mfunc(dev
->dev
) ?
1096 MLX4_QP_ST_UD
: -1);
1101 static __be32
to_mlx4_access_flags(struct mlx4_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1106 u32 hw_access_flags
= 0;
1108 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1109 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1111 dest_rd_atomic
= qp
->resp_depth
;
1113 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1114 access_flags
= attr
->qp_access_flags
;
1116 access_flags
= qp
->atomic_rd_en
;
1118 if (!dest_rd_atomic
)
1119 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1121 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1122 hw_access_flags
|= MLX4_QP_BIT_RRE
;
1123 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1124 hw_access_flags
|= MLX4_QP_BIT_RAE
;
1125 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1126 hw_access_flags
|= MLX4_QP_BIT_RWE
;
1128 return cpu_to_be32(hw_access_flags
);
1131 static void store_sqp_attrs(struct mlx4_ib_sqp
*sqp
, const struct ib_qp_attr
*attr
,
1134 if (attr_mask
& IB_QP_PKEY_INDEX
)
1135 sqp
->pkey_index
= attr
->pkey_index
;
1136 if (attr_mask
& IB_QP_QKEY
)
1137 sqp
->qkey
= attr
->qkey
;
1138 if (attr_mask
& IB_QP_SQ_PSN
)
1139 sqp
->send_psn
= attr
->sq_psn
;
1142 static void mlx4_set_sched(struct mlx4_qp_path
*path
, u8 port
)
1144 path
->sched_queue
= (path
->sched_queue
& 0xbf) | ((port
- 1) << 6);
1147 static int mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_ah_attr
*ah
,
1148 struct mlx4_qp_path
*path
, u8 port
)
1151 int is_eth
= rdma_port_get_link_layer(&dev
->ib_dev
, port
) ==
1152 IB_LINK_LAYER_ETHERNET
;
1158 path
->grh_mylmc
= ah
->src_path_bits
& 0x7f;
1159 path
->rlid
= cpu_to_be16(ah
->dlid
);
1160 if (ah
->static_rate
) {
1161 path
->static_rate
= ah
->static_rate
+ MLX4_STAT_RATE_OFFSET
;
1162 while (path
->static_rate
> IB_RATE_2_5_GBPS
+ MLX4_STAT_RATE_OFFSET
&&
1163 !(1 << path
->static_rate
& dev
->dev
->caps
.stat_rate_support
))
1164 --path
->static_rate
;
1166 path
->static_rate
= 0;
1168 if (ah
->ah_flags
& IB_AH_GRH
) {
1169 if (ah
->grh
.sgid_index
>= dev
->dev
->caps
.gid_table_len
[port
]) {
1170 pr_err("sgid_index (%u) too large. max is %d\n",
1171 ah
->grh
.sgid_index
, dev
->dev
->caps
.gid_table_len
[port
] - 1);
1175 path
->grh_mylmc
|= 1 << 7;
1176 path
->mgid_index
= ah
->grh
.sgid_index
;
1177 path
->hop_limit
= ah
->grh
.hop_limit
;
1178 path
->tclass_flowlabel
=
1179 cpu_to_be32((ah
->grh
.traffic_class
<< 20) |
1180 (ah
->grh
.flow_label
));
1181 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
1185 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1186 ((port
- 1) << 6) | ((ah
->sl
& 7) << 3);
1188 if (!(ah
->ah_flags
& IB_AH_GRH
))
1191 err
= mlx4_ib_resolve_grh(dev
, ah
, mac
, &is_mcast
, port
);
1195 memcpy(path
->dmac
, mac
, 6);
1196 path
->ackto
= MLX4_IB_LINK_TYPE_ETH
;
1197 /* use index 0 into MAC table for IBoE */
1198 path
->grh_mylmc
&= 0x80;
1200 vlan_tag
= rdma_get_vlan_id(&dev
->iboe
.gid_table
[port
- 1][ah
->grh
.sgid_index
]);
1201 if (vlan_tag
< 0x1000) {
1202 if (mlx4_find_cached_vlan(dev
->dev
, port
, vlan_tag
, &vidx
))
1205 path
->vlan_index
= vidx
;
1209 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1210 ((port
- 1) << 6) | ((ah
->sl
& 0xf) << 2);
1215 static void update_mcg_macs(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1217 struct mlx4_ib_gid_entry
*ge
, *tmp
;
1219 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1220 if (!ge
->added
&& mlx4_ib_add_mc(dev
, qp
, &ge
->gid
)) {
1222 ge
->port
= qp
->port
;
1227 static int __mlx4_ib_modify_qp(struct ib_qp
*ibqp
,
1228 const struct ib_qp_attr
*attr
, int attr_mask
,
1229 enum ib_qp_state cur_state
, enum ib_qp_state new_state
)
1231 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1232 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1233 struct mlx4_ib_pd
*pd
;
1234 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
1235 struct mlx4_qp_context
*context
;
1236 enum mlx4_qp_optpar optpar
= 0;
1240 context
= kzalloc(sizeof *context
, GFP_KERNEL
);
1244 context
->flags
= cpu_to_be32((to_mlx4_state(new_state
) << 28) |
1245 (to_mlx4_st(dev
, qp
->mlx4_ib_qp_type
) << 16));
1247 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
1248 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
1250 optpar
|= MLX4_QP_OPTPAR_PM_STATE
;
1251 switch (attr
->path_mig_state
) {
1252 case IB_MIG_MIGRATED
:
1253 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
1256 context
->flags
|= cpu_to_be32(MLX4_QP_PM_REARM
<< 11);
1259 context
->flags
|= cpu_to_be32(MLX4_QP_PM_ARMED
<< 11);
1264 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
)
1265 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 11;
1266 else if (ibqp
->qp_type
== IB_QPT_RAW_PACKET
)
1267 context
->mtu_msgmax
= (MLX4_RAW_QP_MTU
<< 5) | MLX4_RAW_QP_MSGMAX
;
1268 else if (ibqp
->qp_type
== IB_QPT_UD
) {
1269 if (qp
->flags
& MLX4_IB_QP_LSO
)
1270 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) |
1271 ilog2(dev
->dev
->caps
.max_gso_sz
);
1273 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
1274 } else if (attr_mask
& IB_QP_PATH_MTU
) {
1275 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_4096
) {
1276 pr_err("path MTU (%u) is invalid\n",
1280 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
1281 ilog2(dev
->dev
->caps
.max_msg_sz
);
1285 context
->rq_size_stride
= ilog2(qp
->rq
.wqe_cnt
) << 3;
1286 context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
1289 context
->sq_size_stride
= ilog2(qp
->sq
.wqe_cnt
) << 3;
1290 context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
1292 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
1293 context
->sq_size_stride
|= !!qp
->sq_no_prefetch
<< 7;
1294 context
->xrcd
= cpu_to_be32((u32
) qp
->xrcdn
);
1297 if (qp
->ibqp
.uobject
)
1298 context
->usr_page
= cpu_to_be32(to_mucontext(ibqp
->uobject
->context
)->uar
.index
);
1300 context
->usr_page
= cpu_to_be32(dev
->priv_uar
.index
);
1302 if (attr_mask
& IB_QP_DEST_QPN
)
1303 context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
1305 if (attr_mask
& IB_QP_PORT
) {
1306 if (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_SQD
&&
1307 !(attr_mask
& IB_QP_AV
)) {
1308 mlx4_set_sched(&context
->pri_path
, attr
->port_num
);
1309 optpar
|= MLX4_QP_OPTPAR_SCHED_QUEUE
;
1313 if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
1314 if (dev
->counters
[qp
->port
- 1] != -1) {
1315 context
->pri_path
.counter_index
=
1316 dev
->counters
[qp
->port
- 1];
1317 optpar
|= MLX4_QP_OPTPAR_COUNTER_INDEX
;
1319 context
->pri_path
.counter_index
= 0xff;
1322 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1323 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
1324 context
->pri_path
.disable_pkey_check
= 0x40;
1325 context
->pri_path
.pkey_index
= attr
->pkey_index
;
1326 optpar
|= MLX4_QP_OPTPAR_PKEY_INDEX
;
1329 if (attr_mask
& IB_QP_AV
) {
1330 if (mlx4_set_path(dev
, &attr
->ah_attr
, &context
->pri_path
,
1331 attr_mask
& IB_QP_PORT
?
1332 attr
->port_num
: qp
->port
))
1335 optpar
|= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
|
1336 MLX4_QP_OPTPAR_SCHED_QUEUE
);
1339 if (attr_mask
& IB_QP_TIMEOUT
) {
1340 context
->pri_path
.ackto
|= attr
->timeout
<< 3;
1341 optpar
|= MLX4_QP_OPTPAR_ACK_TIMEOUT
;
1344 if (attr_mask
& IB_QP_ALT_PATH
) {
1345 if (attr
->alt_port_num
== 0 ||
1346 attr
->alt_port_num
> dev
->dev
->caps
.num_ports
)
1349 if (attr
->alt_pkey_index
>=
1350 dev
->dev
->caps
.pkey_table_len
[attr
->alt_port_num
])
1353 if (mlx4_set_path(dev
, &attr
->alt_ah_attr
, &context
->alt_path
,
1354 attr
->alt_port_num
))
1357 context
->alt_path
.pkey_index
= attr
->alt_pkey_index
;
1358 context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
1359 optpar
|= MLX4_QP_OPTPAR_ALT_ADDR_PATH
;
1363 get_cqs(qp
, &send_cq
, &recv_cq
);
1364 context
->pd
= cpu_to_be32(pd
->pdn
);
1365 context
->cqn_send
= cpu_to_be32(send_cq
->mcq
.cqn
);
1366 context
->cqn_recv
= cpu_to_be32(recv_cq
->mcq
.cqn
);
1367 context
->params1
= cpu_to_be32(MLX4_IB_ACK_REQ_FREQ
<< 28);
1369 /* Set "fast registration enabled" for all kernel QPs */
1370 if (!qp
->ibqp
.uobject
)
1371 context
->params1
|= cpu_to_be32(1 << 11);
1373 if (attr_mask
& IB_QP_RNR_RETRY
) {
1374 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
1375 optpar
|= MLX4_QP_OPTPAR_RNR_RETRY
;
1378 if (attr_mask
& IB_QP_RETRY_CNT
) {
1379 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
1380 optpar
|= MLX4_QP_OPTPAR_RETRY_COUNT
;
1383 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1384 if (attr
->max_rd_atomic
)
1386 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
1387 optpar
|= MLX4_QP_OPTPAR_SRA_MAX
;
1390 if (attr_mask
& IB_QP_SQ_PSN
)
1391 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
1393 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1394 if (attr
->max_dest_rd_atomic
)
1396 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
1397 optpar
|= MLX4_QP_OPTPAR_RRA_MAX
;
1400 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
1401 context
->params2
|= to_mlx4_access_flags(qp
, attr
, attr_mask
);
1402 optpar
|= MLX4_QP_OPTPAR_RWE
| MLX4_QP_OPTPAR_RRE
| MLX4_QP_OPTPAR_RAE
;
1406 context
->params2
|= cpu_to_be32(MLX4_QP_BIT_RIC
);
1408 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1409 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
1410 optpar
|= MLX4_QP_OPTPAR_RNR_TIMEOUT
;
1412 if (attr_mask
& IB_QP_RQ_PSN
)
1413 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
1415 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
1416 if (attr_mask
& IB_QP_QKEY
) {
1417 if (qp
->mlx4_ib_qp_type
&
1418 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))
1419 context
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
1421 if (mlx4_is_mfunc(dev
->dev
) &&
1422 !(qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
) &&
1423 (attr
->qkey
& MLX4_RESERVED_QKEY_MASK
) ==
1424 MLX4_RESERVED_QKEY_BASE
) {
1425 pr_err("Cannot use reserved QKEY"
1426 " 0x%x (range 0xffff0000..0xffffffff"
1427 " is reserved)\n", attr
->qkey
);
1431 context
->qkey
= cpu_to_be32(attr
->qkey
);
1433 optpar
|= MLX4_QP_OPTPAR_Q_KEY
;
1437 context
->srqn
= cpu_to_be32(1 << 24 | to_msrq(ibqp
->srq
)->msrq
.srqn
);
1439 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1440 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
1442 if (cur_state
== IB_QPS_INIT
&&
1443 new_state
== IB_QPS_RTR
&&
1444 (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
||
1445 ibqp
->qp_type
== IB_QPT_UD
||
1446 ibqp
->qp_type
== IB_QPT_RAW_PACKET
)) {
1447 context
->pri_path
.sched_queue
= (qp
->port
- 1) << 6;
1448 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
1449 qp
->mlx4_ib_qp_type
&
1450 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) {
1451 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
;
1452 if (qp
->mlx4_ib_qp_type
!= MLX4_IB_QPT_SMI
)
1453 context
->pri_path
.fl
= 0x80;
1455 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
1456 context
->pri_path
.fl
= 0x80;
1457 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_SCHED_QUEUE
;
1461 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
1462 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
1467 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1468 context
->rlkey
|= (1 << 4);
1471 * Before passing a kernel QP to the HW, make sure that the
1472 * ownership bits of the send queue are set and the SQ
1473 * headroom is stamped so that the hardware doesn't start
1474 * processing stale work requests.
1476 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
1477 struct mlx4_wqe_ctrl_seg
*ctrl
;
1480 for (i
= 0; i
< qp
->sq
.wqe_cnt
; ++i
) {
1481 ctrl
= get_send_wqe(qp
, i
);
1482 ctrl
->owner_opcode
= cpu_to_be32(1 << 31);
1483 if (qp
->sq_max_wqes_per_wr
== 1)
1484 ctrl
->fence_size
= 1 << (qp
->sq
.wqe_shift
- 4);
1486 stamp_send_wqe(qp
, i
, 1 << qp
->sq
.wqe_shift
);
1490 err
= mlx4_qp_modify(dev
->dev
, &qp
->mtt
, to_mlx4_state(cur_state
),
1491 to_mlx4_state(new_state
), context
, optpar
,
1492 sqd_event
, &qp
->mqp
);
1496 qp
->state
= new_state
;
1498 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1499 qp
->atomic_rd_en
= attr
->qp_access_flags
;
1500 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1501 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
1502 if (attr_mask
& IB_QP_PORT
) {
1503 qp
->port
= attr
->port_num
;
1504 update_mcg_macs(dev
, qp
);
1506 if (attr_mask
& IB_QP_ALT_PATH
)
1507 qp
->alt_port
= attr
->alt_port_num
;
1509 if (is_sqp(dev
, qp
))
1510 store_sqp_attrs(to_msqp(qp
), attr
, attr_mask
);
1513 * If we moved QP0 to RTR, bring the IB link up; if we moved
1514 * QP0 to RESET or ERROR, bring the link back down.
1516 if (is_qp0(dev
, qp
)) {
1517 if (cur_state
!= IB_QPS_RTR
&& new_state
== IB_QPS_RTR
)
1518 if (mlx4_INIT_PORT(dev
->dev
, qp
->port
))
1519 pr_warn("INIT_PORT failed for port %d\n",
1522 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
1523 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
))
1524 mlx4_CLOSE_PORT(dev
->dev
, qp
->port
);
1528 * If we moved a kernel QP to RESET, clean up all old CQ
1529 * entries and reinitialize the QP.
1531 if (new_state
== IB_QPS_RESET
&& !ibqp
->uobject
) {
1532 mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1533 ibqp
->srq
? to_msrq(ibqp
->srq
): NULL
);
1534 if (send_cq
!= recv_cq
)
1535 mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1541 qp
->sq_next_wqe
= 0;
1551 int mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1552 int attr_mask
, struct ib_udata
*udata
)
1554 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
1555 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
1556 enum ib_qp_state cur_state
, new_state
;
1559 mutex_lock(&qp
->mutex
);
1561 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
1562 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1564 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
, attr_mask
)) {
1565 pr_debug("qpn 0x%x: invalid attribute mask specified "
1566 "for transition %d to %d. qp_type %d,"
1567 " attr_mask 0x%x\n",
1568 ibqp
->qp_num
, cur_state
, new_state
,
1569 ibqp
->qp_type
, attr_mask
);
1573 if ((attr_mask
& IB_QP_PORT
) &&
1574 (attr
->port_num
== 0 || attr
->port_num
> dev
->num_ports
)) {
1575 pr_debug("qpn 0x%x: invalid port number (%d) specified "
1576 "for transition %d to %d. qp_type %d\n",
1577 ibqp
->qp_num
, attr
->port_num
, cur_state
,
1578 new_state
, ibqp
->qp_type
);
1582 if ((attr_mask
& IB_QP_PORT
) && (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) &&
1583 (rdma_port_get_link_layer(&dev
->ib_dev
, attr
->port_num
) !=
1584 IB_LINK_LAYER_ETHERNET
))
1587 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1588 int p
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1589 if (attr
->pkey_index
>= dev
->dev
->caps
.pkey_table_len
[p
]) {
1590 pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
1591 "for transition %d to %d. qp_type %d\n",
1592 ibqp
->qp_num
, attr
->pkey_index
, cur_state
,
1593 new_state
, ibqp
->qp_type
);
1598 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1599 attr
->max_rd_atomic
> dev
->dev
->caps
.max_qp_init_rdma
) {
1600 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
1601 "Transition %d to %d. qp_type %d\n",
1602 ibqp
->qp_num
, attr
->max_rd_atomic
, cur_state
,
1603 new_state
, ibqp
->qp_type
);
1607 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1608 attr
->max_dest_rd_atomic
> dev
->dev
->caps
.max_qp_dest_rdma
) {
1609 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
1610 "Transition %d to %d. qp_type %d\n",
1611 ibqp
->qp_num
, attr
->max_dest_rd_atomic
, cur_state
,
1612 new_state
, ibqp
->qp_type
);
1616 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1621 err
= __mlx4_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
);
1624 mutex_unlock(&qp
->mutex
);
1628 static int build_sriov_qp0_header(struct mlx4_ib_sqp
*sqp
,
1629 struct ib_send_wr
*wr
,
1630 void *wqe
, unsigned *mlx_seg_len
)
1632 struct mlx4_ib_dev
*mdev
= to_mdev(sqp
->qp
.ibqp
.device
);
1633 struct ib_device
*ib_dev
= &mdev
->ib_dev
;
1634 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
1635 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
1636 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
1644 if (wr
->opcode
!= IB_WR_SEND
)
1649 for (i
= 0; i
< wr
->num_sge
; ++i
)
1650 send_size
+= wr
->sg_list
[i
].length
;
1652 /* for proxy-qp0 sends, need to add in size of tunnel header */
1653 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
1654 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
)
1655 send_size
+= sizeof (struct mlx4_ib_tunnel_header
);
1657 ib_ud_header_init(send_size
, 1, 0, 0, 0, 0, &sqp
->ud_header
);
1659 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
) {
1660 sqp
->ud_header
.lrh
.service_level
=
1661 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
1662 sqp
->ud_header
.lrh
.destination_lid
=
1663 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
1664 sqp
->ud_header
.lrh
.source_lid
=
1665 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
1668 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
1670 /* force loopback */
1671 mlx
->flags
|= cpu_to_be32(MLX4_WQE_MLX_VL15
| 0x1 | MLX4_WQE_MLX_SLR
);
1672 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
1674 sqp
->ud_header
.lrh
.virtual_lane
= 0;
1675 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
1676 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, 0, &pkey
);
1677 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
1678 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_SMI_OWNER
)
1679 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1681 sqp
->ud_header
.bth
.destination_qpn
=
1682 cpu_to_be32(mdev
->dev
->caps
.qp0_tunnel
[sqp
->qp
.port
- 1]);
1684 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
1685 if (mlx4_get_parav_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
1687 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(qkey
);
1688 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.mqp
.qpn
);
1690 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1691 sqp
->ud_header
.immediate_present
= 0;
1693 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
1696 * Inline data segments may not cross a 64 byte boundary. If
1697 * our UD header is bigger than the space available up to the
1698 * next 64 byte boundary in the WQE, use two inline data
1699 * segments to hold the UD header.
1701 spc
= MLX4_INLINE_ALIGN
-
1702 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
1703 if (header_size
<= spc
) {
1704 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
1705 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
1708 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
1709 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
1711 inl
= (void *) (inl
+ 1) + spc
;
1712 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
1714 * Need a barrier here to make sure all the data is
1715 * visible before the byte_count field is set.
1716 * Otherwise the HCA prefetcher could grab the 64-byte
1717 * chunk with this inline segment and get a valid (!=
1718 * 0xffffffff) byte count but stale data, and end up
1719 * generating a packet with bad headers.
1721 * The first inline segment's byte_count field doesn't
1722 * need a barrier, because it comes after a
1723 * control/MLX segment and therefore is at an offset
1727 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
1732 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
1736 static int build_mlx_header(struct mlx4_ib_sqp
*sqp
, struct ib_send_wr
*wr
,
1737 void *wqe
, unsigned *mlx_seg_len
)
1739 struct ib_device
*ib_dev
= sqp
->qp
.ibqp
.device
;
1740 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
1741 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
1742 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
1743 struct net_device
*ndev
;
1753 bool is_vlan
= false;
1757 for (i
= 0; i
< wr
->num_sge
; ++i
)
1758 send_size
+= wr
->sg_list
[i
].length
;
1760 is_eth
= rdma_port_get_link_layer(sqp
->qp
.ibqp
.device
, sqp
->qp
.port
) == IB_LINK_LAYER_ETHERNET
;
1761 is_grh
= mlx4_ib_ah_grh_present(ah
);
1763 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
1764 /* When multi-function is enabled, the ib_core gid
1765 * indexes don't necessarily match the hw ones, so
1766 * we must use our own cache */
1767 sgid
.global
.subnet_prefix
=
1768 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
1770 sgid
.global
.interface_id
=
1771 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
1772 guid_cache
[ah
->av
.ib
.gid_index
];
1774 err
= ib_get_cached_gid(ib_dev
,
1775 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
1776 ah
->av
.ib
.gid_index
, &sgid
);
1781 vlan
= rdma_get_vlan_id(&sgid
);
1782 is_vlan
= vlan
< 0x1000;
1784 ib_ud_header_init(send_size
, !is_eth
, is_eth
, is_vlan
, is_grh
, 0, &sqp
->ud_header
);
1787 sqp
->ud_header
.lrh
.service_level
=
1788 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
1789 sqp
->ud_header
.lrh
.destination_lid
= ah
->av
.ib
.dlid
;
1790 sqp
->ud_header
.lrh
.source_lid
= cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
1794 sqp
->ud_header
.grh
.traffic_class
=
1795 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
1796 sqp
->ud_header
.grh
.flow_label
=
1797 ah
->av
.ib
.sl_tclass_flowlabel
& cpu_to_be32(0xfffff);
1798 sqp
->ud_header
.grh
.hop_limit
= ah
->av
.ib
.hop_limit
;
1799 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
1800 /* When multi-function is enabled, the ib_core gid
1801 * indexes don't necessarily match the hw ones, so
1802 * we must use our own cache */
1803 sqp
->ud_header
.grh
.source_gid
.global
.subnet_prefix
=
1804 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
1806 sqp
->ud_header
.grh
.source_gid
.global
.interface_id
=
1807 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
1808 guid_cache
[ah
->av
.ib
.gid_index
];
1810 ib_get_cached_gid(ib_dev
,
1811 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
1812 ah
->av
.ib
.gid_index
,
1813 &sqp
->ud_header
.grh
.source_gid
);
1814 memcpy(sqp
->ud_header
.grh
.destination_gid
.raw
,
1815 ah
->av
.ib
.dgid
, 16);
1818 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
1821 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MLX4_WQE_MLX_VL15
: 0) |
1822 (sqp
->ud_header
.lrh
.destination_lid
==
1823 IB_LID_PERMISSIVE
? MLX4_WQE_MLX_SLR
: 0) |
1824 (sqp
->ud_header
.lrh
.service_level
<< 8));
1825 if (ah
->av
.ib
.port_pd
& cpu_to_be32(0x80000000))
1826 mlx
->flags
|= cpu_to_be32(0x1); /* force loopback */
1827 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
1830 switch (wr
->opcode
) {
1832 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1833 sqp
->ud_header
.immediate_present
= 0;
1835 case IB_WR_SEND_WITH_IMM
:
1836 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
1837 sqp
->ud_header
.immediate_present
= 1;
1838 sqp
->ud_header
.immediate_data
= wr
->ex
.imm_data
;
1846 u16 pcp
= (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 29) << 13;
1848 mlx
->sched_prio
= cpu_to_be16(pcp
);
1850 memcpy(sqp
->ud_header
.eth
.dmac_h
, ah
->av
.eth
.mac
, 6);
1851 /* FIXME: cache smac value? */
1852 ndev
= to_mdev(sqp
->qp
.ibqp
.device
)->iboe
.netdevs
[sqp
->qp
.port
- 1];
1855 smac
= ndev
->dev_addr
;
1856 memcpy(sqp
->ud_header
.eth
.smac_h
, smac
, 6);
1857 if (!memcmp(sqp
->ud_header
.eth
.smac_h
, sqp
->ud_header
.eth
.dmac_h
, 6))
1858 mlx
->flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
1860 sqp
->ud_header
.eth
.type
= cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE
);
1862 sqp
->ud_header
.vlan
.type
= cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE
);
1863 sqp
->ud_header
.vlan
.tag
= cpu_to_be16(vlan
| pcp
);
1866 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 : 0;
1867 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
1868 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
1870 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
1871 if (!sqp
->qp
.ibqp
.qp_num
)
1872 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, sqp
->pkey_index
, &pkey
);
1874 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, wr
->wr
.ud
.pkey_index
, &pkey
);
1875 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
1876 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1877 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
1878 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
& 0x80000000 ?
1879 sqp
->qkey
: wr
->wr
.ud
.remote_qkey
);
1880 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
1882 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
1885 pr_err("built UD header of size %d:\n", header_size
);
1886 for (i
= 0; i
< header_size
/ 4; ++i
) {
1888 pr_err(" [%02x] ", i
* 4);
1890 be32_to_cpu(((__be32
*) sqp
->header_buf
)[i
]));
1891 if ((i
+ 1) % 8 == 0)
1898 * Inline data segments may not cross a 64 byte boundary. If
1899 * our UD header is bigger than the space available up to the
1900 * next 64 byte boundary in the WQE, use two inline data
1901 * segments to hold the UD header.
1903 spc
= MLX4_INLINE_ALIGN
-
1904 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
1905 if (header_size
<= spc
) {
1906 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
1907 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
1910 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
1911 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
1913 inl
= (void *) (inl
+ 1) + spc
;
1914 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
1916 * Need a barrier here to make sure all the data is
1917 * visible before the byte_count field is set.
1918 * Otherwise the HCA prefetcher could grab the 64-byte
1919 * chunk with this inline segment and get a valid (!=
1920 * 0xffffffff) byte count but stale data, and end up
1921 * generating a packet with bad headers.
1923 * The first inline segment's byte_count field doesn't
1924 * need a barrier, because it comes after a
1925 * control/MLX segment and therefore is at an offset
1929 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
1934 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
1938 static int mlx4_wq_overflow(struct mlx4_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
1941 struct mlx4_ib_cq
*cq
;
1943 cur
= wq
->head
- wq
->tail
;
1944 if (likely(cur
+ nreq
< wq
->max_post
))
1948 spin_lock(&cq
->lock
);
1949 cur
= wq
->head
- wq
->tail
;
1950 spin_unlock(&cq
->lock
);
1952 return cur
+ nreq
>= wq
->max_post
;
1955 static __be32
convert_access(int acc
)
1957 return (acc
& IB_ACCESS_REMOTE_ATOMIC
?
1958 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
) : 0) |
1959 (acc
& IB_ACCESS_REMOTE_WRITE
?
1960 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
) : 0) |
1961 (acc
& IB_ACCESS_REMOTE_READ
?
1962 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
) : 0) |
1963 (acc
& IB_ACCESS_LOCAL_WRITE
? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE
) : 0) |
1964 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ
);
1967 static void set_fmr_seg(struct mlx4_wqe_fmr_seg
*fseg
, struct ib_send_wr
*wr
)
1969 struct mlx4_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(wr
->wr
.fast_reg
.page_list
);
1972 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; ++i
)
1973 mfrpl
->mapped_page_list
[i
] =
1974 cpu_to_be64(wr
->wr
.fast_reg
.page_list
->page_list
[i
] |
1975 MLX4_MTT_FLAG_PRESENT
);
1977 fseg
->flags
= convert_access(wr
->wr
.fast_reg
.access_flags
);
1978 fseg
->mem_key
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
1979 fseg
->buf_list
= cpu_to_be64(mfrpl
->map
);
1980 fseg
->start_addr
= cpu_to_be64(wr
->wr
.fast_reg
.iova_start
);
1981 fseg
->reg_len
= cpu_to_be64(wr
->wr
.fast_reg
.length
);
1982 fseg
->offset
= 0; /* XXX -- is this just for ZBVA? */
1983 fseg
->page_size
= cpu_to_be32(wr
->wr
.fast_reg
.page_shift
);
1984 fseg
->reserved
[0] = 0;
1985 fseg
->reserved
[1] = 0;
1988 static void set_bind_seg(struct mlx4_wqe_bind_seg
*bseg
, struct ib_send_wr
*wr
)
1991 convert_access(wr
->wr
.bind_mw
.bind_info
.mw_access_flags
) &
1992 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
|
1993 MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
|
1994 MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
);
1996 if (wr
->wr
.bind_mw
.mw
->type
== IB_MW_TYPE_2
)
1997 bseg
->flags2
|= cpu_to_be32(MLX4_WQE_BIND_TYPE_2
);
1998 if (wr
->wr
.bind_mw
.bind_info
.mw_access_flags
& IB_ZERO_BASED
)
1999 bseg
->flags2
|= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED
);
2000 bseg
->new_rkey
= cpu_to_be32(wr
->wr
.bind_mw
.rkey
);
2001 bseg
->lkey
= cpu_to_be32(wr
->wr
.bind_mw
.bind_info
.mr
->lkey
);
2002 bseg
->addr
= cpu_to_be64(wr
->wr
.bind_mw
.bind_info
.addr
);
2003 bseg
->length
= cpu_to_be64(wr
->wr
.bind_mw
.bind_info
.length
);
2006 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg
*iseg
, u32 rkey
)
2008 memset(iseg
, 0, sizeof(*iseg
));
2009 iseg
->mem_key
= cpu_to_be32(rkey
);
2012 static __always_inline
void set_raddr_seg(struct mlx4_wqe_raddr_seg
*rseg
,
2013 u64 remote_addr
, u32 rkey
)
2015 rseg
->raddr
= cpu_to_be64(remote_addr
);
2016 rseg
->rkey
= cpu_to_be32(rkey
);
2020 static void set_atomic_seg(struct mlx4_wqe_atomic_seg
*aseg
, struct ib_send_wr
*wr
)
2022 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
2023 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
2024 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2025 } else if (wr
->opcode
== IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
) {
2026 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2027 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
2029 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2035 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg
*aseg
,
2036 struct ib_send_wr
*wr
)
2038 aseg
->swap_add
= cpu_to_be64(wr
->wr
.atomic
.swap
);
2039 aseg
->swap_add_mask
= cpu_to_be64(wr
->wr
.atomic
.swap_mask
);
2040 aseg
->compare
= cpu_to_be64(wr
->wr
.atomic
.compare_add
);
2041 aseg
->compare_mask
= cpu_to_be64(wr
->wr
.atomic
.compare_add_mask
);
2044 static void set_datagram_seg(struct mlx4_wqe_datagram_seg
*dseg
,
2045 struct ib_send_wr
*wr
)
2047 memcpy(dseg
->av
, &to_mah(wr
->wr
.ud
.ah
)->av
, sizeof (struct mlx4_av
));
2048 dseg
->dqpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2049 dseg
->qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
2050 dseg
->vlan
= to_mah(wr
->wr
.ud
.ah
)->av
.eth
.vlan
;
2051 memcpy(dseg
->mac
, to_mah(wr
->wr
.ud
.ah
)->av
.eth
.mac
, 6);
2054 static void set_tunnel_datagram_seg(struct mlx4_ib_dev
*dev
,
2055 struct mlx4_wqe_datagram_seg
*dseg
,
2056 struct ib_send_wr
*wr
, enum ib_qp_type qpt
)
2058 union mlx4_ext_av
*av
= &to_mah(wr
->wr
.ud
.ah
)->av
;
2059 struct mlx4_av sqp_av
= {0};
2060 int port
= *((u8
*) &av
->ib
.port_pd
) & 0x3;
2062 /* force loopback */
2063 sqp_av
.port_pd
= av
->ib
.port_pd
| cpu_to_be32(0x80000000);
2064 sqp_av
.g_slid
= av
->ib
.g_slid
& 0x7f; /* no GRH */
2065 sqp_av
.sl_tclass_flowlabel
= av
->ib
.sl_tclass_flowlabel
&
2066 cpu_to_be32(0xf0000000);
2068 memcpy(dseg
->av
, &sqp_av
, sizeof (struct mlx4_av
));
2069 /* This function used only for sending on QP1 proxies */
2070 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.qp1_tunnel
[port
- 1]);
2071 /* Use QKEY from the QP context, which is set by master */
2072 dseg
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
2075 static void build_tunnel_header(struct ib_send_wr
*wr
, void *wqe
, unsigned *mlx_seg_len
)
2077 struct mlx4_wqe_inline_seg
*inl
= wqe
;
2078 struct mlx4_ib_tunnel_header hdr
;
2079 struct mlx4_ib_ah
*ah
= to_mah(wr
->wr
.ud
.ah
);
2083 memcpy(&hdr
.av
, &ah
->av
, sizeof hdr
.av
);
2084 hdr
.remote_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2085 hdr
.pkey_index
= cpu_to_be16(wr
->wr
.ud
.pkey_index
);
2086 hdr
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
2088 spc
= MLX4_INLINE_ALIGN
-
2089 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2090 if (sizeof (hdr
) <= spc
) {
2091 memcpy(inl
+ 1, &hdr
, sizeof (hdr
));
2093 inl
->byte_count
= cpu_to_be32(1 << 31 | sizeof (hdr
));
2096 memcpy(inl
+ 1, &hdr
, spc
);
2098 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2100 inl
= (void *) (inl
+ 1) + spc
;
2101 memcpy(inl
+ 1, (void *) &hdr
+ spc
, sizeof (hdr
) - spc
);
2103 inl
->byte_count
= cpu_to_be32(1 << 31 | (sizeof (hdr
) - spc
));
2108 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + sizeof (hdr
), 16);
2111 static void set_mlx_icrc_seg(void *dseg
)
2114 struct mlx4_wqe_inline_seg
*iseg
= dseg
;
2119 * Need a barrier here before writing the byte_count field to
2120 * make sure that all the data is visible before the
2121 * byte_count field is set. Otherwise, if the segment begins
2122 * a new cacheline, the HCA prefetcher could grab the 64-byte
2123 * chunk and get a valid (!= * 0xffffffff) byte count but
2124 * stale data, and end up sending the wrong data.
2128 iseg
->byte_count
= cpu_to_be32((1 << 31) | 4);
2131 static void set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
2133 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
2134 dseg
->addr
= cpu_to_be64(sg
->addr
);
2137 * Need a barrier here before writing the byte_count field to
2138 * make sure that all the data is visible before the
2139 * byte_count field is set. Otherwise, if the segment begins
2140 * a new cacheline, the HCA prefetcher could grab the 64-byte
2141 * chunk and get a valid (!= * 0xffffffff) byte count but
2142 * stale data, and end up sending the wrong data.
2146 dseg
->byte_count
= cpu_to_be32(sg
->length
);
2149 static void __set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
2151 dseg
->byte_count
= cpu_to_be32(sg
->length
);
2152 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
2153 dseg
->addr
= cpu_to_be64(sg
->addr
);
2156 static int build_lso_seg(struct mlx4_wqe_lso_seg
*wqe
, struct ib_send_wr
*wr
,
2157 struct mlx4_ib_qp
*qp
, unsigned *lso_seg_len
,
2158 __be32
*lso_hdr_sz
, __be32
*blh
)
2160 unsigned halign
= ALIGN(sizeof *wqe
+ wr
->wr
.ud
.hlen
, 16);
2162 if (unlikely(halign
> MLX4_IB_CACHE_LINE_SIZE
))
2163 *blh
= cpu_to_be32(1 << 6);
2165 if (unlikely(!(qp
->flags
& MLX4_IB_QP_LSO
) &&
2166 wr
->num_sge
> qp
->sq
.max_gs
- (halign
>> 4)))
2169 memcpy(wqe
->header
, wr
->wr
.ud
.header
, wr
->wr
.ud
.hlen
);
2171 *lso_hdr_sz
= cpu_to_be32((wr
->wr
.ud
.mss
- wr
->wr
.ud
.hlen
) << 16 |
2173 *lso_seg_len
= halign
;
2177 static __be32
send_ieth(struct ib_send_wr
*wr
)
2179 switch (wr
->opcode
) {
2180 case IB_WR_SEND_WITH_IMM
:
2181 case IB_WR_RDMA_WRITE_WITH_IMM
:
2182 return wr
->ex
.imm_data
;
2184 case IB_WR_SEND_WITH_INV
:
2185 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
2192 static void add_zero_len_inline(void *wqe
)
2194 struct mlx4_wqe_inline_seg
*inl
= wqe
;
2196 inl
->byte_count
= cpu_to_be32(1 << 31);
2199 int mlx4_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
2200 struct ib_send_wr
**bad_wr
)
2202 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2204 struct mlx4_wqe_ctrl_seg
*ctrl
;
2205 struct mlx4_wqe_data_seg
*dseg
;
2206 unsigned long flags
;
2210 int uninitialized_var(stamp
);
2211 int uninitialized_var(size
);
2212 unsigned uninitialized_var(seglen
);
2215 __be32
uninitialized_var(lso_hdr_sz
);
2219 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
2221 ind
= qp
->sq_next_wqe
;
2223 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2227 if (mlx4_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
2233 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
2239 ctrl
= wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
2240 qp
->sq
.wrid
[(qp
->sq
.head
+ nreq
) & (qp
->sq
.wqe_cnt
- 1)] = wr
->wr_id
;
2243 (wr
->send_flags
& IB_SEND_SIGNALED
?
2244 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) : 0) |
2245 (wr
->send_flags
& IB_SEND_SOLICITED
?
2246 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED
) : 0) |
2247 ((wr
->send_flags
& IB_SEND_IP_CSUM
) ?
2248 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM
|
2249 MLX4_WQE_CTRL_TCP_UDP_CSUM
) : 0) |
2252 ctrl
->imm
= send_ieth(wr
);
2254 wqe
+= sizeof *ctrl
;
2255 size
= sizeof *ctrl
/ 16;
2257 switch (qp
->mlx4_ib_qp_type
) {
2258 case MLX4_IB_QPT_RC
:
2259 case MLX4_IB_QPT_UC
:
2260 switch (wr
->opcode
) {
2261 case IB_WR_ATOMIC_CMP_AND_SWP
:
2262 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2263 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
2264 set_raddr_seg(wqe
, wr
->wr
.atomic
.remote_addr
,
2265 wr
->wr
.atomic
.rkey
);
2266 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2268 set_atomic_seg(wqe
, wr
);
2269 wqe
+= sizeof (struct mlx4_wqe_atomic_seg
);
2271 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
2272 sizeof (struct mlx4_wqe_atomic_seg
)) / 16;
2276 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
2277 set_raddr_seg(wqe
, wr
->wr
.atomic
.remote_addr
,
2278 wr
->wr
.atomic
.rkey
);
2279 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2281 set_masked_atomic_seg(wqe
, wr
);
2282 wqe
+= sizeof (struct mlx4_wqe_masked_atomic_seg
);
2284 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
2285 sizeof (struct mlx4_wqe_masked_atomic_seg
)) / 16;
2289 case IB_WR_RDMA_READ
:
2290 case IB_WR_RDMA_WRITE
:
2291 case IB_WR_RDMA_WRITE_WITH_IMM
:
2292 set_raddr_seg(wqe
, wr
->wr
.rdma
.remote_addr
,
2294 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
2295 size
+= sizeof (struct mlx4_wqe_raddr_seg
) / 16;
2298 case IB_WR_LOCAL_INV
:
2299 ctrl
->srcrb_flags
|=
2300 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2301 set_local_inv_seg(wqe
, wr
->ex
.invalidate_rkey
);
2302 wqe
+= sizeof (struct mlx4_wqe_local_inval_seg
);
2303 size
+= sizeof (struct mlx4_wqe_local_inval_seg
) / 16;
2306 case IB_WR_FAST_REG_MR
:
2307 ctrl
->srcrb_flags
|=
2308 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2309 set_fmr_seg(wqe
, wr
);
2310 wqe
+= sizeof (struct mlx4_wqe_fmr_seg
);
2311 size
+= sizeof (struct mlx4_wqe_fmr_seg
) / 16;
2315 ctrl
->srcrb_flags
|=
2316 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
2317 set_bind_seg(wqe
, wr
);
2318 wqe
+= sizeof(struct mlx4_wqe_bind_seg
);
2319 size
+= sizeof(struct mlx4_wqe_bind_seg
) / 16;
2322 /* No extra segments required for sends */
2327 case MLX4_IB_QPT_TUN_SMI_OWNER
:
2328 err
= build_sriov_qp0_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2329 if (unlikely(err
)) {
2334 size
+= seglen
/ 16;
2336 case MLX4_IB_QPT_TUN_SMI
:
2337 case MLX4_IB_QPT_TUN_GSI
:
2338 /* this is a UD qp used in MAD responses to slaves. */
2339 set_datagram_seg(wqe
, wr
);
2340 /* set the forced-loopback bit in the data seg av */
2341 *(__be32
*) wqe
|= cpu_to_be32(0x80000000);
2342 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2343 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2345 case MLX4_IB_QPT_UD
:
2346 set_datagram_seg(wqe
, wr
);
2347 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2348 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2350 if (wr
->opcode
== IB_WR_LSO
) {
2351 err
= build_lso_seg(wqe
, wr
, qp
, &seglen
, &lso_hdr_sz
, &blh
);
2352 if (unlikely(err
)) {
2356 lso_wqe
= (__be32
*) wqe
;
2358 size
+= seglen
/ 16;
2362 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
2363 if (unlikely(!mlx4_is_master(to_mdev(ibqp
->device
)->dev
))) {
2368 err
= build_sriov_qp0_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2369 if (unlikely(err
)) {
2374 size
+= seglen
/ 16;
2375 /* to start tunnel header on a cache-line boundary */
2376 add_zero_len_inline(wqe
);
2379 build_tunnel_header(wr
, wqe
, &seglen
);
2381 size
+= seglen
/ 16;
2383 case MLX4_IB_QPT_PROXY_SMI
:
2384 /* don't allow QP0 sends on guests */
2388 case MLX4_IB_QPT_PROXY_GSI
:
2389 /* If we are tunneling special qps, this is a UD qp.
2390 * In this case we first add a UD segment targeting
2391 * the tunnel qp, and then add a header with address
2393 set_tunnel_datagram_seg(to_mdev(ibqp
->device
), wqe
, wr
, ibqp
->qp_type
);
2394 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
2395 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
2396 build_tunnel_header(wr
, wqe
, &seglen
);
2398 size
+= seglen
/ 16;
2401 case MLX4_IB_QPT_SMI
:
2402 case MLX4_IB_QPT_GSI
:
2403 err
= build_mlx_header(to_msqp(qp
), wr
, ctrl
, &seglen
);
2404 if (unlikely(err
)) {
2409 size
+= seglen
/ 16;
2417 * Write data segments in reverse order, so as to
2418 * overwrite cacheline stamp last within each
2419 * cacheline. This avoids issues with WQE
2424 dseg
+= wr
->num_sge
- 1;
2425 size
+= wr
->num_sge
* (sizeof (struct mlx4_wqe_data_seg
) / 16);
2427 /* Add one more inline data segment for ICRC for MLX sends */
2428 if (unlikely(qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
2429 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
||
2430 qp
->mlx4_ib_qp_type
&
2431 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
2432 set_mlx_icrc_seg(dseg
+ 1);
2433 size
+= sizeof (struct mlx4_wqe_data_seg
) / 16;
2436 for (i
= wr
->num_sge
- 1; i
>= 0; --i
, --dseg
)
2437 set_data_seg(dseg
, wr
->sg_list
+ i
);
2440 * Possibly overwrite stamping in cacheline with LSO
2441 * segment only after making sure all data segments
2445 *lso_wqe
= lso_hdr_sz
;
2447 ctrl
->fence_size
= (wr
->send_flags
& IB_SEND_FENCE
?
2448 MLX4_WQE_CTRL_FENCE
: 0) | size
;
2451 * Make sure descriptor is fully written before
2452 * setting ownership bit (because HW can start
2453 * executing as soon as we do).
2457 if (wr
->opcode
< 0 || wr
->opcode
>= ARRAY_SIZE(mlx4_ib_opcode
)) {
2463 ctrl
->owner_opcode
= mlx4_ib_opcode
[wr
->opcode
] |
2464 (ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0) | blh
;
2466 stamp
= ind
+ qp
->sq_spare_wqes
;
2467 ind
+= DIV_ROUND_UP(size
* 16, 1U << qp
->sq
.wqe_shift
);
2470 * We can improve latency by not stamping the last
2471 * send queue WQE until after ringing the doorbell, so
2472 * only stamp here if there are still more WQEs to post.
2474 * Same optimization applies to padding with NOP wqe
2475 * in case of WQE shrinking (used to prevent wrap-around
2476 * in the middle of WR).
2479 stamp_send_wqe(qp
, stamp
, size
* 16);
2480 ind
= pad_wraparound(qp
, ind
);
2486 qp
->sq
.head
+= nreq
;
2489 * Make sure that descriptors are written before
2494 writel(qp
->doorbell_qpn
,
2495 to_mdev(ibqp
->device
)->uar_map
+ MLX4_SEND_DOORBELL
);
2498 * Make sure doorbells don't leak out of SQ spinlock
2499 * and reach the HCA out of order.
2503 stamp_send_wqe(qp
, stamp
, size
* 16);
2505 ind
= pad_wraparound(qp
, ind
);
2506 qp
->sq_next_wqe
= ind
;
2509 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2514 int mlx4_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2515 struct ib_recv_wr
**bad_wr
)
2517 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2518 struct mlx4_wqe_data_seg
*scat
;
2519 unsigned long flags
;
2526 max_gs
= qp
->rq
.max_gs
;
2527 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2529 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
2531 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2532 if (mlx4_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2538 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2544 scat
= get_recv_wqe(qp
, ind
);
2546 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
2547 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
2548 ib_dma_sync_single_for_device(ibqp
->device
,
2549 qp
->sqp_proxy_rcv
[ind
].map
,
2550 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
2553 cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr
));
2554 /* use dma lkey from upper layer entry */
2555 scat
->lkey
= cpu_to_be32(wr
->sg_list
->lkey
);
2556 scat
->addr
= cpu_to_be64(qp
->sqp_proxy_rcv
[ind
].map
);
2561 for (i
= 0; i
< wr
->num_sge
; ++i
)
2562 __set_data_seg(scat
+ i
, wr
->sg_list
+ i
);
2565 scat
[i
].byte_count
= 0;
2566 scat
[i
].lkey
= cpu_to_be32(MLX4_INVALID_LKEY
);
2570 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
2572 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
2577 qp
->rq
.head
+= nreq
;
2580 * Make sure that descriptors are written before
2585 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2588 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2593 static inline enum ib_qp_state
to_ib_qp_state(enum mlx4_qp_state mlx4_state
)
2595 switch (mlx4_state
) {
2596 case MLX4_QP_STATE_RST
: return IB_QPS_RESET
;
2597 case MLX4_QP_STATE_INIT
: return IB_QPS_INIT
;
2598 case MLX4_QP_STATE_RTR
: return IB_QPS_RTR
;
2599 case MLX4_QP_STATE_RTS
: return IB_QPS_RTS
;
2600 case MLX4_QP_STATE_SQ_DRAINING
:
2601 case MLX4_QP_STATE_SQD
: return IB_QPS_SQD
;
2602 case MLX4_QP_STATE_SQER
: return IB_QPS_SQE
;
2603 case MLX4_QP_STATE_ERR
: return IB_QPS_ERR
;
2608 static inline enum ib_mig_state
to_ib_mig_state(int mlx4_mig_state
)
2610 switch (mlx4_mig_state
) {
2611 case MLX4_QP_PM_ARMED
: return IB_MIG_ARMED
;
2612 case MLX4_QP_PM_REARM
: return IB_MIG_REARM
;
2613 case MLX4_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
2618 static int to_ib_qp_access_flags(int mlx4_flags
)
2622 if (mlx4_flags
& MLX4_QP_BIT_RRE
)
2623 ib_flags
|= IB_ACCESS_REMOTE_READ
;
2624 if (mlx4_flags
& MLX4_QP_BIT_RWE
)
2625 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
2626 if (mlx4_flags
& MLX4_QP_BIT_RAE
)
2627 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
2632 static void to_ib_ah_attr(struct mlx4_ib_dev
*ibdev
, struct ib_ah_attr
*ib_ah_attr
,
2633 struct mlx4_qp_path
*path
)
2635 struct mlx4_dev
*dev
= ibdev
->dev
;
2638 memset(ib_ah_attr
, 0, sizeof *ib_ah_attr
);
2639 ib_ah_attr
->port_num
= path
->sched_queue
& 0x40 ? 2 : 1;
2641 if (ib_ah_attr
->port_num
== 0 || ib_ah_attr
->port_num
> dev
->caps
.num_ports
)
2644 is_eth
= rdma_port_get_link_layer(&ibdev
->ib_dev
, ib_ah_attr
->port_num
) ==
2645 IB_LINK_LAYER_ETHERNET
;
2647 ib_ah_attr
->sl
= ((path
->sched_queue
>> 3) & 0x7) |
2648 ((path
->sched_queue
& 4) << 1);
2650 ib_ah_attr
->sl
= (path
->sched_queue
>> 2) & 0xf;
2652 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
2653 ib_ah_attr
->src_path_bits
= path
->grh_mylmc
& 0x7f;
2654 ib_ah_attr
->static_rate
= path
->static_rate
? path
->static_rate
- 5 : 0;
2655 ib_ah_attr
->ah_flags
= (path
->grh_mylmc
& (1 << 7)) ? IB_AH_GRH
: 0;
2656 if (ib_ah_attr
->ah_flags
) {
2657 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
;
2658 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
2659 ib_ah_attr
->grh
.traffic_class
=
2660 (be32_to_cpu(path
->tclass_flowlabel
) >> 20) & 0xff;
2661 ib_ah_attr
->grh
.flow_label
=
2662 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff;
2663 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
2664 path
->rgid
, sizeof ib_ah_attr
->grh
.dgid
.raw
);
2668 int mlx4_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
2669 struct ib_qp_init_attr
*qp_init_attr
)
2671 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
2672 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2673 struct mlx4_qp_context context
;
2677 mutex_lock(&qp
->mutex
);
2679 if (qp
->state
== IB_QPS_RESET
) {
2680 qp_attr
->qp_state
= IB_QPS_RESET
;
2684 err
= mlx4_qp_query(dev
->dev
, &qp
->mqp
, &context
);
2690 mlx4_state
= be32_to_cpu(context
.flags
) >> 28;
2692 qp
->state
= to_ib_qp_state(mlx4_state
);
2693 qp_attr
->qp_state
= qp
->state
;
2694 qp_attr
->path_mtu
= context
.mtu_msgmax
>> 5;
2695 qp_attr
->path_mig_state
=
2696 to_ib_mig_state((be32_to_cpu(context
.flags
) >> 11) & 0x3);
2697 qp_attr
->qkey
= be32_to_cpu(context
.qkey
);
2698 qp_attr
->rq_psn
= be32_to_cpu(context
.rnr_nextrecvpsn
) & 0xffffff;
2699 qp_attr
->sq_psn
= be32_to_cpu(context
.next_send_psn
) & 0xffffff;
2700 qp_attr
->dest_qp_num
= be32_to_cpu(context
.remote_qpn
) & 0xffffff;
2701 qp_attr
->qp_access_flags
=
2702 to_ib_qp_access_flags(be32_to_cpu(context
.params2
));
2704 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
2705 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
.pri_path
);
2706 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
.alt_path
);
2707 qp_attr
->alt_pkey_index
= context
.alt_path
.pkey_index
& 0x7f;
2708 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
2711 qp_attr
->pkey_index
= context
.pri_path
.pkey_index
& 0x7f;
2712 if (qp_attr
->qp_state
== IB_QPS_INIT
)
2713 qp_attr
->port_num
= qp
->port
;
2715 qp_attr
->port_num
= context
.pri_path
.sched_queue
& 0x40 ? 2 : 1;
2717 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2718 qp_attr
->sq_draining
= mlx4_state
== MLX4_QP_STATE_SQ_DRAINING
;
2720 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
.params1
) >> 21) & 0x7);
2722 qp_attr
->max_dest_rd_atomic
=
2723 1 << ((be32_to_cpu(context
.params2
) >> 21) & 0x7);
2724 qp_attr
->min_rnr_timer
=
2725 (be32_to_cpu(context
.rnr_nextrecvpsn
) >> 24) & 0x1f;
2726 qp_attr
->timeout
= context
.pri_path
.ackto
>> 3;
2727 qp_attr
->retry_cnt
= (be32_to_cpu(context
.params1
) >> 16) & 0x7;
2728 qp_attr
->rnr_retry
= (be32_to_cpu(context
.params1
) >> 13) & 0x7;
2729 qp_attr
->alt_timeout
= context
.alt_path
.ackto
>> 3;
2732 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
2733 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
2734 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
2736 if (!ibqp
->uobject
) {
2737 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
2738 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
2740 qp_attr
->cap
.max_send_wr
= 0;
2741 qp_attr
->cap
.max_send_sge
= 0;
2745 * We don't support inline sends for kernel QPs (yet), and we
2746 * don't know what userspace's value should be.
2748 qp_attr
->cap
.max_inline_data
= 0;
2750 qp_init_attr
->cap
= qp_attr
->cap
;
2752 qp_init_attr
->create_flags
= 0;
2753 if (qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
2754 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
2756 if (qp
->flags
& MLX4_IB_QP_LSO
)
2757 qp_init_attr
->create_flags
|= IB_QP_CREATE_IPOIB_UD_LSO
;
2759 qp_init_attr
->sq_sig_type
=
2760 qp
->sq_signal_bits
== cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) ?
2761 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
2764 mutex_unlock(&qp
->mutex
);