2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/sched.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_cache.h>
46 #include <rdma/ib_pack.h>
48 #include "mthca_dev.h"
49 #include "mthca_cmd.h"
50 #include "mthca_memfree.h"
51 #include "mthca_wqe.h"
54 MTHCA_MAX_DIRECT_QP_SIZE
= 4 * PAGE_SIZE
,
55 MTHCA_ACK_REQ_FREQ
= 10,
56 MTHCA_FLIGHT_LIMIT
= 9,
57 MTHCA_UD_HEADER_SIZE
= 72, /* largest UD header possible */
58 MTHCA_INLINE_HEADER_SIZE
= 4, /* data segment overhead for inline */
59 MTHCA_INLINE_CHUNK_SIZE
= 16 /* inline data segment chunk */
63 MTHCA_QP_STATE_RST
= 0,
64 MTHCA_QP_STATE_INIT
= 1,
65 MTHCA_QP_STATE_RTR
= 2,
66 MTHCA_QP_STATE_RTS
= 3,
67 MTHCA_QP_STATE_SQE
= 4,
68 MTHCA_QP_STATE_SQD
= 5,
69 MTHCA_QP_STATE_ERR
= 6,
70 MTHCA_QP_STATE_DRAINING
= 7
82 MTHCA_QP_PM_MIGRATED
= 0x3,
83 MTHCA_QP_PM_ARMED
= 0x0,
84 MTHCA_QP_PM_REARM
= 0x1
88 /* qp_context flags */
89 MTHCA_QP_BIT_DE
= 1 << 8,
91 MTHCA_QP_BIT_SRE
= 1 << 15,
92 MTHCA_QP_BIT_SWE
= 1 << 14,
93 MTHCA_QP_BIT_SAE
= 1 << 13,
94 MTHCA_QP_BIT_SIC
= 1 << 4,
95 MTHCA_QP_BIT_SSC
= 1 << 3,
97 MTHCA_QP_BIT_RRE
= 1 << 15,
98 MTHCA_QP_BIT_RWE
= 1 << 14,
99 MTHCA_QP_BIT_RAE
= 1 << 13,
100 MTHCA_QP_BIT_RIC
= 1 << 4,
101 MTHCA_QP_BIT_RSC
= 1 << 3
105 MTHCA_SEND_DOORBELL_FENCE
= 1 << 5
108 struct mthca_qp_path
{
117 __be32 sl_tclass_flowlabel
;
119 } __attribute__((packed
));
121 struct mthca_qp_context
{
123 __be32 tavor_sched_queue
; /* Reserved on Arbel */
125 u8 rq_size_stride
; /* Reserved on Tavor */
126 u8 sq_size_stride
; /* Reserved on Tavor */
127 u8 rlkey_arbel_sched_queue
; /* Reserved on Tavor */
132 struct mthca_qp_path pri_path
;
133 struct mthca_qp_path alt_path
;
140 __be32 next_send_psn
;
142 __be32 snd_wqe_base_l
; /* Next send WQE on Tavor */
143 __be32 snd_db_index
; /* (debugging only entries) */
144 __be32 last_acked_psn
;
147 __be32 rnr_nextrecvpsn
;
150 __be32 rcv_wqe_base_l
; /* Next recv WQE on Tavor */
151 __be32 rcv_db_index
; /* (debugging only entries) */
155 __be16 rq_wqe_counter
; /* reserved on Tavor */
156 __be16 sq_wqe_counter
; /* reserved on Tavor */
158 } __attribute__((packed
));
160 struct mthca_qp_param
{
161 __be32 opt_param_mask
;
163 struct mthca_qp_context context
;
165 } __attribute__((packed
));
168 MTHCA_QP_OPTPAR_ALT_ADDR_PATH
= 1 << 0,
169 MTHCA_QP_OPTPAR_RRE
= 1 << 1,
170 MTHCA_QP_OPTPAR_RAE
= 1 << 2,
171 MTHCA_QP_OPTPAR_RWE
= 1 << 3,
172 MTHCA_QP_OPTPAR_PKEY_INDEX
= 1 << 4,
173 MTHCA_QP_OPTPAR_Q_KEY
= 1 << 5,
174 MTHCA_QP_OPTPAR_RNR_TIMEOUT
= 1 << 6,
175 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
= 1 << 7,
176 MTHCA_QP_OPTPAR_SRA_MAX
= 1 << 8,
177 MTHCA_QP_OPTPAR_RRA_MAX
= 1 << 9,
178 MTHCA_QP_OPTPAR_PM_STATE
= 1 << 10,
179 MTHCA_QP_OPTPAR_PORT_NUM
= 1 << 11,
180 MTHCA_QP_OPTPAR_RETRY_COUNT
= 1 << 12,
181 MTHCA_QP_OPTPAR_ALT_RNR_RETRY
= 1 << 13,
182 MTHCA_QP_OPTPAR_ACK_TIMEOUT
= 1 << 14,
183 MTHCA_QP_OPTPAR_RNR_RETRY
= 1 << 15,
184 MTHCA_QP_OPTPAR_SCHED_QUEUE
= 1 << 16
187 static const u8 mthca_opcode
[] = {
188 [IB_WR_SEND
] = MTHCA_OPCODE_SEND
,
189 [IB_WR_SEND_WITH_IMM
] = MTHCA_OPCODE_SEND_IMM
,
190 [IB_WR_RDMA_WRITE
] = MTHCA_OPCODE_RDMA_WRITE
,
191 [IB_WR_RDMA_WRITE_WITH_IMM
] = MTHCA_OPCODE_RDMA_WRITE_IMM
,
192 [IB_WR_RDMA_READ
] = MTHCA_OPCODE_RDMA_READ
,
193 [IB_WR_ATOMIC_CMP_AND_SWP
] = MTHCA_OPCODE_ATOMIC_CS
,
194 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MTHCA_OPCODE_ATOMIC_FA
,
197 static int is_sqp(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
199 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
200 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 3;
203 static int is_qp0(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
205 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
206 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 1;
209 static void *get_recv_wqe(struct mthca_qp
*qp
, int n
)
212 return qp
->queue
.direct
.buf
+ (n
<< qp
->rq
.wqe_shift
);
214 return qp
->queue
.page_list
[(n
<< qp
->rq
.wqe_shift
) >> PAGE_SHIFT
].buf
+
215 ((n
<< qp
->rq
.wqe_shift
) & (PAGE_SIZE
- 1));
218 static void *get_send_wqe(struct mthca_qp
*qp
, int n
)
221 return qp
->queue
.direct
.buf
+ qp
->send_wqe_offset
+
222 (n
<< qp
->sq
.wqe_shift
);
224 return qp
->queue
.page_list
[(qp
->send_wqe_offset
+
225 (n
<< qp
->sq
.wqe_shift
)) >>
227 ((qp
->send_wqe_offset
+ (n
<< qp
->sq
.wqe_shift
)) &
231 static void mthca_wq_reset(struct mthca_wq
*wq
)
234 wq
->last_comp
= wq
->max
- 1;
239 void mthca_qp_event(struct mthca_dev
*dev
, u32 qpn
,
240 enum ib_event_type event_type
)
243 struct ib_event event
;
245 spin_lock(&dev
->qp_table
.lock
);
246 qp
= mthca_array_get(&dev
->qp_table
.qp
, qpn
& (dev
->limits
.num_qps
- 1));
249 spin_unlock(&dev
->qp_table
.lock
);
252 mthca_warn(dev
, "Async event for bogus QP %08x\n", qpn
);
256 if (event_type
== IB_EVENT_PATH_MIG
)
257 qp
->port
= qp
->alt_port
;
259 event
.device
= &dev
->ib_dev
;
260 event
.event
= event_type
;
261 event
.element
.qp
= &qp
->ibqp
;
262 if (qp
->ibqp
.event_handler
)
263 qp
->ibqp
.event_handler(&event
, qp
->ibqp
.qp_context
);
265 spin_lock(&dev
->qp_table
.lock
);
268 spin_unlock(&dev
->qp_table
.lock
);
271 static int to_mthca_state(enum ib_qp_state ib_state
)
274 case IB_QPS_RESET
: return MTHCA_QP_STATE_RST
;
275 case IB_QPS_INIT
: return MTHCA_QP_STATE_INIT
;
276 case IB_QPS_RTR
: return MTHCA_QP_STATE_RTR
;
277 case IB_QPS_RTS
: return MTHCA_QP_STATE_RTS
;
278 case IB_QPS_SQD
: return MTHCA_QP_STATE_SQD
;
279 case IB_QPS_SQE
: return MTHCA_QP_STATE_SQE
;
280 case IB_QPS_ERR
: return MTHCA_QP_STATE_ERR
;
285 enum { RC
, UC
, UD
, RD
, RDEE
, MLX
, NUM_TRANS
};
287 static int to_mthca_st(int transport
)
290 case RC
: return MTHCA_QP_ST_RC
;
291 case UC
: return MTHCA_QP_ST_UC
;
292 case UD
: return MTHCA_QP_ST_UD
;
293 case RD
: return MTHCA_QP_ST_RD
;
294 case MLX
: return MTHCA_QP_ST_MLX
;
299 static void store_attrs(struct mthca_sqp
*sqp
, struct ib_qp_attr
*attr
,
302 if (attr_mask
& IB_QP_PKEY_INDEX
)
303 sqp
->pkey_index
= attr
->pkey_index
;
304 if (attr_mask
& IB_QP_QKEY
)
305 sqp
->qkey
= attr
->qkey
;
306 if (attr_mask
& IB_QP_SQ_PSN
)
307 sqp
->send_psn
= attr
->sq_psn
;
310 static void init_port(struct mthca_dev
*dev
, int port
)
314 struct mthca_init_ib_param param
;
316 memset(¶m
, 0, sizeof param
);
318 param
.port_width
= dev
->limits
.port_width_cap
;
319 param
.vl_cap
= dev
->limits
.vl_cap
;
320 param
.mtu_cap
= dev
->limits
.mtu_cap
;
321 param
.gid_cap
= dev
->limits
.gid_table_len
;
322 param
.pkey_cap
= dev
->limits
.pkey_table_len
;
324 err
= mthca_INIT_IB(dev
, ¶m
, port
, &status
);
326 mthca_warn(dev
, "INIT_IB failed, return code %d.\n", err
);
328 mthca_warn(dev
, "INIT_IB returned status %02x.\n", status
);
331 static __be32
get_hw_access_flags(struct mthca_qp
*qp
, struct ib_qp_attr
*attr
,
336 u32 hw_access_flags
= 0;
338 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
339 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
341 dest_rd_atomic
= qp
->resp_depth
;
343 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
344 access_flags
= attr
->qp_access_flags
;
346 access_flags
= qp
->atomic_rd_en
;
349 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
351 if (access_flags
& IB_ACCESS_REMOTE_READ
)
352 hw_access_flags
|= MTHCA_QP_BIT_RRE
;
353 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
354 hw_access_flags
|= MTHCA_QP_BIT_RAE
;
355 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
356 hw_access_flags
|= MTHCA_QP_BIT_RWE
;
358 return cpu_to_be32(hw_access_flags
);
361 static inline enum ib_qp_state
to_ib_qp_state(int mthca_state
)
363 switch (mthca_state
) {
364 case MTHCA_QP_STATE_RST
: return IB_QPS_RESET
;
365 case MTHCA_QP_STATE_INIT
: return IB_QPS_INIT
;
366 case MTHCA_QP_STATE_RTR
: return IB_QPS_RTR
;
367 case MTHCA_QP_STATE_RTS
: return IB_QPS_RTS
;
368 case MTHCA_QP_STATE_DRAINING
:
369 case MTHCA_QP_STATE_SQD
: return IB_QPS_SQD
;
370 case MTHCA_QP_STATE_SQE
: return IB_QPS_SQE
;
371 case MTHCA_QP_STATE_ERR
: return IB_QPS_ERR
;
376 static inline enum ib_mig_state
to_ib_mig_state(int mthca_mig_state
)
378 switch (mthca_mig_state
) {
379 case 0: return IB_MIG_ARMED
;
380 case 1: return IB_MIG_REARM
;
381 case 3: return IB_MIG_MIGRATED
;
386 static int to_ib_qp_access_flags(int mthca_flags
)
390 if (mthca_flags
& MTHCA_QP_BIT_RRE
)
391 ib_flags
|= IB_ACCESS_REMOTE_READ
;
392 if (mthca_flags
& MTHCA_QP_BIT_RWE
)
393 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
394 if (mthca_flags
& MTHCA_QP_BIT_RAE
)
395 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
400 static void to_ib_ah_attr(struct mthca_dev
*dev
, struct ib_ah_attr
*ib_ah_attr
,
401 struct mthca_qp_path
*path
)
403 memset(ib_ah_attr
, 0, sizeof *ib_ah_attr
);
404 ib_ah_attr
->port_num
= (be32_to_cpu(path
->port_pkey
) >> 24) & 0x3;
406 if (ib_ah_attr
->port_num
== 0 || ib_ah_attr
->port_num
> dev
->limits
.num_ports
)
409 ib_ah_attr
->dlid
= be16_to_cpu(path
->rlid
);
410 ib_ah_attr
->sl
= be32_to_cpu(path
->sl_tclass_flowlabel
) >> 28;
411 ib_ah_attr
->src_path_bits
= path
->g_mylmc
& 0x7f;
412 ib_ah_attr
->static_rate
= mthca_rate_to_ib(dev
,
413 path
->static_rate
& 0xf,
414 ib_ah_attr
->port_num
);
415 ib_ah_attr
->ah_flags
= (path
->g_mylmc
& (1 << 7)) ? IB_AH_GRH
: 0;
416 if (ib_ah_attr
->ah_flags
) {
417 ib_ah_attr
->grh
.sgid_index
= path
->mgid_index
& (dev
->limits
.gid_table_len
- 1);
418 ib_ah_attr
->grh
.hop_limit
= path
->hop_limit
;
419 ib_ah_attr
->grh
.traffic_class
=
420 (be32_to_cpu(path
->sl_tclass_flowlabel
) >> 20) & 0xff;
421 ib_ah_attr
->grh
.flow_label
=
422 be32_to_cpu(path
->sl_tclass_flowlabel
) & 0xfffff;
423 memcpy(ib_ah_attr
->grh
.dgid
.raw
,
424 path
->rgid
, sizeof ib_ah_attr
->grh
.dgid
.raw
);
428 int mthca_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
429 struct ib_qp_init_attr
*qp_init_attr
)
431 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
432 struct mthca_qp
*qp
= to_mqp(ibqp
);
434 struct mthca_mailbox
*mailbox
= NULL
;
435 struct mthca_qp_param
*qp_param
;
436 struct mthca_qp_context
*context
;
440 if (qp
->state
== IB_QPS_RESET
) {
441 qp_attr
->qp_state
= IB_QPS_RESET
;
445 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
447 return PTR_ERR(mailbox
);
449 err
= mthca_QUERY_QP(dev
, qp
->qpn
, 0, mailbox
, &status
);
453 mthca_warn(dev
, "QUERY_QP returned status %02x\n", status
);
458 qp_param
= mailbox
->buf
;
459 context
= &qp_param
->context
;
460 mthca_state
= be32_to_cpu(context
->flags
) >> 28;
462 qp_attr
->qp_state
= to_ib_qp_state(mthca_state
);
463 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
464 qp_attr
->path_mig_state
=
465 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
466 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
467 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
468 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
469 qp_attr
->dest_qp_num
= be32_to_cpu(context
->remote_qpn
) & 0xffffff;
470 qp_attr
->qp_access_flags
=
471 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
473 if (qp
->transport
== RC
|| qp
->transport
== UC
) {
474 to_ib_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
475 to_ib_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
476 qp_attr
->alt_pkey_index
=
477 be32_to_cpu(context
->alt_path
.port_pkey
) & 0x7f;
478 qp_attr
->alt_port_num
= qp_attr
->alt_ah_attr
.port_num
;
481 qp_attr
->pkey_index
= be32_to_cpu(context
->pri_path
.port_pkey
) & 0x7f;
483 (be32_to_cpu(context
->pri_path
.port_pkey
) >> 24) & 0x3;
485 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
486 qp_attr
->sq_draining
= mthca_state
== MTHCA_QP_STATE_DRAINING
;
488 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
490 qp_attr
->max_dest_rd_atomic
=
491 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
492 qp_attr
->min_rnr_timer
=
493 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
494 qp_attr
->timeout
= context
->pri_path
.ackto
>> 3;
495 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
496 qp_attr
->rnr_retry
= context
->pri_path
.rnr_retry
>> 5;
497 qp_attr
->alt_timeout
= context
->alt_path
.ackto
>> 3;
500 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
501 qp_attr
->cap
.max_send_wr
= qp
->sq
.max
;
502 qp_attr
->cap
.max_recv_wr
= qp
->rq
.max
;
503 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
504 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
505 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
507 qp_init_attr
->cap
= qp_attr
->cap
;
510 mthca_free_mailbox(dev
, mailbox
);
514 static int mthca_path_set(struct mthca_dev
*dev
, struct ib_ah_attr
*ah
,
515 struct mthca_qp_path
*path
, u8 port
)
517 path
->g_mylmc
= ah
->src_path_bits
& 0x7f;
518 path
->rlid
= cpu_to_be16(ah
->dlid
);
519 path
->static_rate
= mthca_get_rate(dev
, ah
->static_rate
, port
);
521 if (ah
->ah_flags
& IB_AH_GRH
) {
522 if (ah
->grh
.sgid_index
>= dev
->limits
.gid_table_len
) {
523 mthca_dbg(dev
, "sgid_index (%u) too large. max is %d\n",
524 ah
->grh
.sgid_index
, dev
->limits
.gid_table_len
-1);
528 path
->g_mylmc
|= 1 << 7;
529 path
->mgid_index
= ah
->grh
.sgid_index
;
530 path
->hop_limit
= ah
->grh
.hop_limit
;
531 path
->sl_tclass_flowlabel
=
532 cpu_to_be32((ah
->sl
<< 28) |
533 (ah
->grh
.traffic_class
<< 20) |
534 (ah
->grh
.flow_label
));
535 memcpy(path
->rgid
, ah
->grh
.dgid
.raw
, 16);
537 path
->sl_tclass_flowlabel
= cpu_to_be32(ah
->sl
<< 28);
542 int mthca_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
, int attr_mask
,
543 struct ib_udata
*udata
)
545 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
546 struct mthca_qp
*qp
= to_mqp(ibqp
);
547 enum ib_qp_state cur_state
, new_state
;
548 struct mthca_mailbox
*mailbox
;
549 struct mthca_qp_param
*qp_param
;
550 struct mthca_qp_context
*qp_context
;
555 mutex_lock(&qp
->mutex
);
557 if (attr_mask
& IB_QP_CUR_STATE
) {
558 cur_state
= attr
->cur_qp_state
;
560 spin_lock_irq(&qp
->sq
.lock
);
561 spin_lock(&qp
->rq
.lock
);
562 cur_state
= qp
->state
;
563 spin_unlock(&qp
->rq
.lock
);
564 spin_unlock_irq(&qp
->sq
.lock
);
567 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
569 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
, attr_mask
)) {
570 mthca_dbg(dev
, "Bad QP transition (transport %d) "
571 "%d->%d with attr 0x%08x\n",
572 qp
->transport
, cur_state
, new_state
,
577 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
582 if ((attr_mask
& IB_QP_PKEY_INDEX
) &&
583 attr
->pkey_index
>= dev
->limits
.pkey_table_len
) {
584 mthca_dbg(dev
, "P_Key index (%u) too large. max is %d\n",
585 attr
->pkey_index
, dev
->limits
.pkey_table_len
-1);
589 if ((attr_mask
& IB_QP_PORT
) &&
590 (attr
->port_num
== 0 || attr
->port_num
> dev
->limits
.num_ports
)) {
591 mthca_dbg(dev
, "Port number (%u) is invalid\n", attr
->port_num
);
595 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
596 attr
->max_rd_atomic
> dev
->limits
.max_qp_init_rdma
) {
597 mthca_dbg(dev
, "Max rdma_atomic as initiator %u too large (max is %d)\n",
598 attr
->max_rd_atomic
, dev
->limits
.max_qp_init_rdma
);
602 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
603 attr
->max_dest_rd_atomic
> 1 << dev
->qp_table
.rdb_shift
) {
604 mthca_dbg(dev
, "Max rdma_atomic as responder %u too large (max %d)\n",
605 attr
->max_dest_rd_atomic
, 1 << dev
->qp_table
.rdb_shift
);
609 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
610 if (IS_ERR(mailbox
)) {
611 err
= PTR_ERR(mailbox
);
614 qp_param
= mailbox
->buf
;
615 qp_context
= &qp_param
->context
;
616 memset(qp_param
, 0, sizeof *qp_param
);
618 qp_context
->flags
= cpu_to_be32((to_mthca_state(new_state
) << 28) |
619 (to_mthca_st(qp
->transport
) << 16));
620 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_BIT_DE
);
621 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
622 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
624 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE
);
625 switch (attr
->path_mig_state
) {
626 case IB_MIG_MIGRATED
:
627 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
630 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_REARM
<< 11);
633 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_ARMED
<< 11);
638 /* leave tavor_sched_queue as 0 */
640 if (qp
->transport
== MLX
|| qp
->transport
== UD
)
641 qp_context
->mtu_msgmax
= (IB_MTU_2048
<< 5) | 11;
642 else if (attr_mask
& IB_QP_PATH_MTU
) {
643 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_2048
) {
644 mthca_dbg(dev
, "path MTU (%u) is invalid\n",
648 qp_context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | 31;
651 if (mthca_is_memfree(dev
)) {
653 qp_context
->rq_size_stride
= ilog2(qp
->rq
.max
) << 3;
654 qp_context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
657 qp_context
->sq_size_stride
= ilog2(qp
->sq
.max
) << 3;
658 qp_context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
661 /* leave arbel_sched_queue as 0 */
663 if (qp
->ibqp
.uobject
)
664 qp_context
->usr_page
=
665 cpu_to_be32(to_mucontext(qp
->ibqp
.uobject
->context
)->uar
.index
);
667 qp_context
->usr_page
= cpu_to_be32(dev
->driver_uar
.index
);
668 qp_context
->local_qpn
= cpu_to_be32(qp
->qpn
);
669 if (attr_mask
& IB_QP_DEST_QPN
) {
670 qp_context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
673 if (qp
->transport
== MLX
)
674 qp_context
->pri_path
.port_pkey
|=
675 cpu_to_be32(qp
->port
<< 24);
677 if (attr_mask
& IB_QP_PORT
) {
678 qp_context
->pri_path
.port_pkey
|=
679 cpu_to_be32(attr
->port_num
<< 24);
680 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM
);
684 if (attr_mask
& IB_QP_PKEY_INDEX
) {
685 qp_context
->pri_path
.port_pkey
|=
686 cpu_to_be32(attr
->pkey_index
);
687 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX
);
690 if (attr_mask
& IB_QP_RNR_RETRY
) {
691 qp_context
->alt_path
.rnr_retry
= qp_context
->pri_path
.rnr_retry
=
692 attr
->rnr_retry
<< 5;
693 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY
|
694 MTHCA_QP_OPTPAR_ALT_RNR_RETRY
);
697 if (attr_mask
& IB_QP_AV
) {
698 if (mthca_path_set(dev
, &attr
->ah_attr
, &qp_context
->pri_path
,
699 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
))
702 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
);
705 if (ibqp
->qp_type
== IB_QPT_RC
&&
706 cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
707 u8 sched_queue
= ibqp
->uobject
? 0x2 : 0x1;
709 if (mthca_is_memfree(dev
))
710 qp_context
->rlkey_arbel_sched_queue
|= sched_queue
;
712 qp_context
->tavor_sched_queue
|= cpu_to_be32(sched_queue
);
714 qp_param
->opt_param_mask
|=
715 cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE
);
718 if (attr_mask
& IB_QP_TIMEOUT
) {
719 qp_context
->pri_path
.ackto
= attr
->timeout
<< 3;
720 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT
);
723 if (attr_mask
& IB_QP_ALT_PATH
) {
724 if (attr
->alt_pkey_index
>= dev
->limits
.pkey_table_len
) {
725 mthca_dbg(dev
, "Alternate P_Key index (%u) too large. max is %d\n",
726 attr
->alt_pkey_index
, dev
->limits
.pkey_table_len
-1);
730 if (attr
->alt_port_num
== 0 || attr
->alt_port_num
> dev
->limits
.num_ports
) {
731 mthca_dbg(dev
, "Alternate port number (%u) is invalid\n",
736 if (mthca_path_set(dev
, &attr
->alt_ah_attr
, &qp_context
->alt_path
,
737 attr
->alt_ah_attr
.port_num
))
740 qp_context
->alt_path
.port_pkey
|= cpu_to_be32(attr
->alt_pkey_index
|
741 attr
->alt_port_num
<< 24);
742 qp_context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
743 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH
);
747 qp_context
->pd
= cpu_to_be32(to_mpd(ibqp
->pd
)->pd_num
);
748 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
749 qp_context
->wqe_lkey
= cpu_to_be32(qp
->mr
.ibmr
.lkey
);
750 qp_context
->params1
= cpu_to_be32((MTHCA_ACK_REQ_FREQ
<< 28) |
751 (MTHCA_FLIGHT_LIMIT
<< 24) |
753 if (qp
->sq_policy
== IB_SIGNAL_ALL_WR
)
754 qp_context
->params1
|= cpu_to_be32(MTHCA_QP_BIT_SSC
);
755 if (attr_mask
& IB_QP_RETRY_CNT
) {
756 qp_context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
757 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT
);
760 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
761 if (attr
->max_rd_atomic
) {
762 qp_context
->params1
|=
763 cpu_to_be32(MTHCA_QP_BIT_SRE
|
765 qp_context
->params1
|=
766 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
768 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX
);
771 if (attr_mask
& IB_QP_SQ_PSN
)
772 qp_context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
773 qp_context
->cqn_snd
= cpu_to_be32(to_mcq(ibqp
->send_cq
)->cqn
);
775 if (mthca_is_memfree(dev
)) {
776 qp_context
->snd_wqe_base_l
= cpu_to_be32(qp
->send_wqe_offset
);
777 qp_context
->snd_db_index
= cpu_to_be32(qp
->sq
.db_index
);
780 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
781 if (attr
->max_dest_rd_atomic
)
782 qp_context
->params2
|=
783 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
785 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX
);
788 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
789 qp_context
->params2
|= get_hw_access_flags(qp
, attr
, attr_mask
);
790 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
791 MTHCA_QP_OPTPAR_RRE
|
792 MTHCA_QP_OPTPAR_RAE
);
795 qp_context
->params2
|= cpu_to_be32(MTHCA_QP_BIT_RSC
);
798 qp_context
->params2
|= cpu_to_be32(MTHCA_QP_BIT_RIC
);
800 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
801 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
802 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT
);
804 if (attr_mask
& IB_QP_RQ_PSN
)
805 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
807 qp_context
->ra_buff_indx
=
808 cpu_to_be32(dev
->qp_table
.rdb_base
+
809 ((qp
->qpn
& (dev
->limits
.num_qps
- 1)) * MTHCA_RDB_ENTRY_SIZE
<<
810 dev
->qp_table
.rdb_shift
));
812 qp_context
->cqn_rcv
= cpu_to_be32(to_mcq(ibqp
->recv_cq
)->cqn
);
814 if (mthca_is_memfree(dev
))
815 qp_context
->rcv_db_index
= cpu_to_be32(qp
->rq
.db_index
);
817 if (attr_mask
& IB_QP_QKEY
) {
818 qp_context
->qkey
= cpu_to_be32(attr
->qkey
);
819 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY
);
823 qp_context
->srqn
= cpu_to_be32(1 << 24 |
824 to_msrq(ibqp
->srq
)->srqn
);
826 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
827 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&&
828 attr
->en_sqd_async_notify
)
831 err
= mthca_MODIFY_QP(dev
, cur_state
, new_state
, qp
->qpn
, 0,
832 mailbox
, sqd_event
, &status
);
836 mthca_warn(dev
, "modify QP %d->%d returned status %02x.\n",
837 cur_state
, new_state
, status
);
842 qp
->state
= new_state
;
843 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
844 qp
->atomic_rd_en
= attr
->qp_access_flags
;
845 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
846 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
847 if (attr_mask
& IB_QP_PORT
)
848 qp
->port
= attr
->port_num
;
849 if (attr_mask
& IB_QP_ALT_PATH
)
850 qp
->alt_port
= attr
->alt_port_num
;
853 store_attrs(to_msqp(qp
), attr
, attr_mask
);
856 * If we moved QP0 to RTR, bring the IB link up; if we moved
857 * QP0 to RESET or ERROR, bring the link back down.
859 if (is_qp0(dev
, qp
)) {
860 if (cur_state
!= IB_QPS_RTR
&&
861 new_state
== IB_QPS_RTR
)
862 init_port(dev
, qp
->port
);
864 if (cur_state
!= IB_QPS_RESET
&&
865 cur_state
!= IB_QPS_ERR
&&
866 (new_state
== IB_QPS_RESET
||
867 new_state
== IB_QPS_ERR
))
868 mthca_CLOSE_IB(dev
, qp
->port
, &status
);
872 * If we moved a kernel QP to RESET, clean up all old CQ
873 * entries and reinitialize the QP.
875 if (new_state
== IB_QPS_RESET
&& !qp
->ibqp
.uobject
) {
876 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.recv_cq
), qp
->qpn
,
877 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
878 if (qp
->ibqp
.send_cq
!= qp
->ibqp
.recv_cq
)
879 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.send_cq
), qp
->qpn
, NULL
);
881 mthca_wq_reset(&qp
->sq
);
882 qp
->sq
.last
= get_send_wqe(qp
, qp
->sq
.max
- 1);
884 mthca_wq_reset(&qp
->rq
);
885 qp
->rq
.last
= get_recv_wqe(qp
, qp
->rq
.max
- 1);
887 if (mthca_is_memfree(dev
)) {
894 mthca_free_mailbox(dev
, mailbox
);
897 mutex_unlock(&qp
->mutex
);
901 static int mthca_max_data_size(struct mthca_dev
*dev
, struct mthca_qp
*qp
, int desc_sz
)
904 * Calculate the maximum size of WQE s/g segments, excluding
905 * the next segment and other non-data segments.
907 int max_data_size
= desc_sz
- sizeof (struct mthca_next_seg
);
909 switch (qp
->transport
) {
911 max_data_size
-= 2 * sizeof (struct mthca_data_seg
);
915 if (mthca_is_memfree(dev
))
916 max_data_size
-= sizeof (struct mthca_arbel_ud_seg
);
918 max_data_size
-= sizeof (struct mthca_tavor_ud_seg
);
922 max_data_size
-= sizeof (struct mthca_raddr_seg
);
926 return max_data_size
;
929 static inline int mthca_max_inline_data(struct mthca_pd
*pd
, int max_data_size
)
931 /* We don't support inline data for kernel QPs (yet). */
932 return pd
->ibpd
.uobject
? max_data_size
- MTHCA_INLINE_HEADER_SIZE
: 0;
935 static void mthca_adjust_qp_caps(struct mthca_dev
*dev
,
939 int max_data_size
= mthca_max_data_size(dev
, qp
,
940 min(dev
->limits
.max_desc_sz
,
941 1 << qp
->sq
.wqe_shift
));
943 qp
->max_inline_data
= mthca_max_inline_data(pd
, max_data_size
);
945 qp
->sq
.max_gs
= min_t(int, dev
->limits
.max_sg
,
946 max_data_size
/ sizeof (struct mthca_data_seg
));
947 qp
->rq
.max_gs
= min_t(int, dev
->limits
.max_sg
,
948 (min(dev
->limits
.max_desc_sz
, 1 << qp
->rq
.wqe_shift
) -
949 sizeof (struct mthca_next_seg
)) /
950 sizeof (struct mthca_data_seg
));
954 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
955 * rq.max_gs and sq.max_gs must all be assigned.
956 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
957 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
960 static int mthca_alloc_wqe_buf(struct mthca_dev
*dev
,
967 size
= sizeof (struct mthca_next_seg
) +
968 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
);
970 if (size
> dev
->limits
.max_desc_sz
)
973 for (qp
->rq
.wqe_shift
= 6; 1 << qp
->rq
.wqe_shift
< size
;
977 size
= qp
->sq
.max_gs
* sizeof (struct mthca_data_seg
);
978 switch (qp
->transport
) {
980 size
+= 2 * sizeof (struct mthca_data_seg
);
984 size
+= mthca_is_memfree(dev
) ?
985 sizeof (struct mthca_arbel_ud_seg
) :
986 sizeof (struct mthca_tavor_ud_seg
);
990 size
+= sizeof (struct mthca_raddr_seg
);
994 size
+= sizeof (struct mthca_raddr_seg
);
996 * An atomic op will require an atomic segment, a
997 * remote address segment and one scatter entry.
999 size
= max_t(int, size
,
1000 sizeof (struct mthca_atomic_seg
) +
1001 sizeof (struct mthca_raddr_seg
) +
1002 sizeof (struct mthca_data_seg
));
1009 /* Make sure that we have enough space for a bind request */
1010 size
= max_t(int, size
, sizeof (struct mthca_bind_seg
));
1012 size
+= sizeof (struct mthca_next_seg
);
1014 if (size
> dev
->limits
.max_desc_sz
)
1017 for (qp
->sq
.wqe_shift
= 6; 1 << qp
->sq
.wqe_shift
< size
;
1021 qp
->send_wqe_offset
= ALIGN(qp
->rq
.max
<< qp
->rq
.wqe_shift
,
1022 1 << qp
->sq
.wqe_shift
);
1025 * If this is a userspace QP, we don't actually have to
1026 * allocate anything. All we need is to calculate the WQE
1027 * sizes and the send_wqe_offset, so we're done now.
1029 if (pd
->ibpd
.uobject
)
1032 size
= PAGE_ALIGN(qp
->send_wqe_offset
+
1033 (qp
->sq
.max
<< qp
->sq
.wqe_shift
));
1035 qp
->wrid
= kmalloc((qp
->rq
.max
+ qp
->sq
.max
) * sizeof (u64
),
1040 err
= mthca_buf_alloc(dev
, size
, MTHCA_MAX_DIRECT_QP_SIZE
,
1041 &qp
->queue
, &qp
->is_direct
, pd
, 0, &qp
->mr
);
1052 static void mthca_free_wqe_buf(struct mthca_dev
*dev
,
1053 struct mthca_qp
*qp
)
1055 mthca_buf_free(dev
, PAGE_ALIGN(qp
->send_wqe_offset
+
1056 (qp
->sq
.max
<< qp
->sq
.wqe_shift
)),
1057 &qp
->queue
, qp
->is_direct
, &qp
->mr
);
1061 static int mthca_map_memfree(struct mthca_dev
*dev
,
1062 struct mthca_qp
*qp
)
1066 if (mthca_is_memfree(dev
)) {
1067 ret
= mthca_table_get(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1071 ret
= mthca_table_get(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1075 ret
= mthca_table_get(dev
, dev
->qp_table
.rdb_table
,
1076 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1085 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1088 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1093 static void mthca_unmap_memfree(struct mthca_dev
*dev
,
1094 struct mthca_qp
*qp
)
1096 mthca_table_put(dev
, dev
->qp_table
.rdb_table
,
1097 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1098 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1099 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1102 static int mthca_alloc_memfree(struct mthca_dev
*dev
,
1103 struct mthca_qp
*qp
)
1105 if (mthca_is_memfree(dev
)) {
1106 qp
->rq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_RQ
,
1107 qp
->qpn
, &qp
->rq
.db
);
1108 if (qp
->rq
.db_index
< 0)
1111 qp
->sq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_SQ
,
1112 qp
->qpn
, &qp
->sq
.db
);
1113 if (qp
->sq
.db_index
< 0) {
1114 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1122 static void mthca_free_memfree(struct mthca_dev
*dev
,
1123 struct mthca_qp
*qp
)
1125 if (mthca_is_memfree(dev
)) {
1126 mthca_free_db(dev
, MTHCA_DB_TYPE_SQ
, qp
->sq
.db_index
);
1127 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1131 static int mthca_alloc_qp_common(struct mthca_dev
*dev
,
1132 struct mthca_pd
*pd
,
1133 struct mthca_cq
*send_cq
,
1134 struct mthca_cq
*recv_cq
,
1135 enum ib_sig_type send_policy
,
1136 struct mthca_qp
*qp
)
1142 init_waitqueue_head(&qp
->wait
);
1143 mutex_init(&qp
->mutex
);
1144 qp
->state
= IB_QPS_RESET
;
1145 qp
->atomic_rd_en
= 0;
1147 qp
->sq_policy
= send_policy
;
1148 mthca_wq_reset(&qp
->sq
);
1149 mthca_wq_reset(&qp
->rq
);
1151 spin_lock_init(&qp
->sq
.lock
);
1152 spin_lock_init(&qp
->rq
.lock
);
1154 ret
= mthca_map_memfree(dev
, qp
);
1158 ret
= mthca_alloc_wqe_buf(dev
, pd
, qp
);
1160 mthca_unmap_memfree(dev
, qp
);
1164 mthca_adjust_qp_caps(dev
, pd
, qp
);
1167 * If this is a userspace QP, we're done now. The doorbells
1168 * will be allocated and buffers will be initialized in
1171 if (pd
->ibpd
.uobject
)
1174 ret
= mthca_alloc_memfree(dev
, qp
);
1176 mthca_free_wqe_buf(dev
, qp
);
1177 mthca_unmap_memfree(dev
, qp
);
1181 if (mthca_is_memfree(dev
)) {
1182 struct mthca_next_seg
*next
;
1183 struct mthca_data_seg
*scatter
;
1184 int size
= (sizeof (struct mthca_next_seg
) +
1185 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
)) / 16;
1187 for (i
= 0; i
< qp
->rq
.max
; ++i
) {
1188 next
= get_recv_wqe(qp
, i
);
1189 next
->nda_op
= cpu_to_be32(((i
+ 1) & (qp
->rq
.max
- 1)) <<
1191 next
->ee_nds
= cpu_to_be32(size
);
1193 for (scatter
= (void *) (next
+ 1);
1194 (void *) scatter
< (void *) next
+ (1 << qp
->rq
.wqe_shift
);
1196 scatter
->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
1199 for (i
= 0; i
< qp
->sq
.max
; ++i
) {
1200 next
= get_send_wqe(qp
, i
);
1201 next
->nda_op
= cpu_to_be32((((i
+ 1) & (qp
->sq
.max
- 1)) <<
1203 qp
->send_wqe_offset
);
1207 qp
->sq
.last
= get_send_wqe(qp
, qp
->sq
.max
- 1);
1208 qp
->rq
.last
= get_recv_wqe(qp
, qp
->rq
.max
- 1);
1213 static int mthca_set_qp_size(struct mthca_dev
*dev
, struct ib_qp_cap
*cap
,
1214 struct mthca_pd
*pd
, struct mthca_qp
*qp
)
1216 int max_data_size
= mthca_max_data_size(dev
, qp
, dev
->limits
.max_desc_sz
);
1218 /* Sanity check QP size before proceeding */
1219 if (cap
->max_send_wr
> dev
->limits
.max_wqes
||
1220 cap
->max_recv_wr
> dev
->limits
.max_wqes
||
1221 cap
->max_send_sge
> dev
->limits
.max_sg
||
1222 cap
->max_recv_sge
> dev
->limits
.max_sg
||
1223 cap
->max_inline_data
> mthca_max_inline_data(pd
, max_data_size
))
1227 * For MLX transport we need 2 extra S/G entries:
1228 * one for the header and one for the checksum at the end
1230 if (qp
->transport
== MLX
&& cap
->max_recv_sge
+ 2 > dev
->limits
.max_sg
)
1233 if (mthca_is_memfree(dev
)) {
1234 qp
->rq
.max
= cap
->max_recv_wr
?
1235 roundup_pow_of_two(cap
->max_recv_wr
) : 0;
1236 qp
->sq
.max
= cap
->max_send_wr
?
1237 roundup_pow_of_two(cap
->max_send_wr
) : 0;
1239 qp
->rq
.max
= cap
->max_recv_wr
;
1240 qp
->sq
.max
= cap
->max_send_wr
;
1243 qp
->rq
.max_gs
= cap
->max_recv_sge
;
1244 qp
->sq
.max_gs
= max_t(int, cap
->max_send_sge
,
1245 ALIGN(cap
->max_inline_data
+ MTHCA_INLINE_HEADER_SIZE
,
1246 MTHCA_INLINE_CHUNK_SIZE
) /
1247 sizeof (struct mthca_data_seg
));
1252 int mthca_alloc_qp(struct mthca_dev
*dev
,
1253 struct mthca_pd
*pd
,
1254 struct mthca_cq
*send_cq
,
1255 struct mthca_cq
*recv_cq
,
1256 enum ib_qp_type type
,
1257 enum ib_sig_type send_policy
,
1258 struct ib_qp_cap
*cap
,
1259 struct mthca_qp
*qp
)
1264 case IB_QPT_RC
: qp
->transport
= RC
; break;
1265 case IB_QPT_UC
: qp
->transport
= UC
; break;
1266 case IB_QPT_UD
: qp
->transport
= UD
; break;
1267 default: return -EINVAL
;
1270 err
= mthca_set_qp_size(dev
, cap
, pd
, qp
);
1274 qp
->qpn
= mthca_alloc(&dev
->qp_table
.alloc
);
1278 /* initialize port to zero for error-catching. */
1281 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1284 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1288 spin_lock_irq(&dev
->qp_table
.lock
);
1289 mthca_array_set(&dev
->qp_table
.qp
,
1290 qp
->qpn
& (dev
->limits
.num_qps
- 1), qp
);
1291 spin_unlock_irq(&dev
->qp_table
.lock
);
1296 static void mthca_lock_cqs(struct mthca_cq
*send_cq
, struct mthca_cq
*recv_cq
)
1298 if (send_cq
== recv_cq
)
1299 spin_lock_irq(&send_cq
->lock
);
1300 else if (send_cq
->cqn
< recv_cq
->cqn
) {
1301 spin_lock_irq(&send_cq
->lock
);
1302 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
1304 spin_lock_irq(&recv_cq
->lock
);
1305 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
1309 static void mthca_unlock_cqs(struct mthca_cq
*send_cq
, struct mthca_cq
*recv_cq
)
1311 if (send_cq
== recv_cq
)
1312 spin_unlock_irq(&send_cq
->lock
);
1313 else if (send_cq
->cqn
< recv_cq
->cqn
) {
1314 spin_unlock(&recv_cq
->lock
);
1315 spin_unlock_irq(&send_cq
->lock
);
1317 spin_unlock(&send_cq
->lock
);
1318 spin_unlock_irq(&recv_cq
->lock
);
1322 int mthca_alloc_sqp(struct mthca_dev
*dev
,
1323 struct mthca_pd
*pd
,
1324 struct mthca_cq
*send_cq
,
1325 struct mthca_cq
*recv_cq
,
1326 enum ib_sig_type send_policy
,
1327 struct ib_qp_cap
*cap
,
1330 struct mthca_sqp
*sqp
)
1332 u32 mqpn
= qpn
* 2 + dev
->qp_table
.sqp_start
+ port
- 1;
1335 sqp
->qp
.transport
= MLX
;
1336 err
= mthca_set_qp_size(dev
, cap
, pd
, &sqp
->qp
);
1340 sqp
->header_buf_size
= sqp
->qp
.sq
.max
* MTHCA_UD_HEADER_SIZE
;
1341 sqp
->header_buf
= dma_alloc_coherent(&dev
->pdev
->dev
, sqp
->header_buf_size
,
1342 &sqp
->header_dma
, GFP_KERNEL
);
1343 if (!sqp
->header_buf
)
1346 spin_lock_irq(&dev
->qp_table
.lock
);
1347 if (mthca_array_get(&dev
->qp_table
.qp
, mqpn
))
1350 mthca_array_set(&dev
->qp_table
.qp
, mqpn
, sqp
);
1351 spin_unlock_irq(&dev
->qp_table
.lock
);
1356 sqp
->qp
.port
= port
;
1358 sqp
->qp
.transport
= MLX
;
1360 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1361 send_policy
, &sqp
->qp
);
1365 atomic_inc(&pd
->sqp_count
);
1371 * Lock CQs here, so that CQ polling code can do QP lookup
1372 * without taking a lock.
1374 mthca_lock_cqs(send_cq
, recv_cq
);
1376 spin_lock(&dev
->qp_table
.lock
);
1377 mthca_array_clear(&dev
->qp_table
.qp
, mqpn
);
1378 spin_unlock(&dev
->qp_table
.lock
);
1380 mthca_unlock_cqs(send_cq
, recv_cq
);
1383 dma_free_coherent(&dev
->pdev
->dev
, sqp
->header_buf_size
,
1384 sqp
->header_buf
, sqp
->header_dma
);
1389 static inline int get_qp_refcount(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
1393 spin_lock_irq(&dev
->qp_table
.lock
);
1395 spin_unlock_irq(&dev
->qp_table
.lock
);
1400 void mthca_free_qp(struct mthca_dev
*dev
,
1401 struct mthca_qp
*qp
)
1404 struct mthca_cq
*send_cq
;
1405 struct mthca_cq
*recv_cq
;
1407 send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1408 recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1411 * Lock CQs here, so that CQ polling code can do QP lookup
1412 * without taking a lock.
1414 mthca_lock_cqs(send_cq
, recv_cq
);
1416 spin_lock(&dev
->qp_table
.lock
);
1417 mthca_array_clear(&dev
->qp_table
.qp
,
1418 qp
->qpn
& (dev
->limits
.num_qps
- 1));
1420 spin_unlock(&dev
->qp_table
.lock
);
1422 mthca_unlock_cqs(send_cq
, recv_cq
);
1424 wait_event(qp
->wait
, !get_qp_refcount(dev
, qp
));
1426 if (qp
->state
!= IB_QPS_RESET
)
1427 mthca_MODIFY_QP(dev
, qp
->state
, IB_QPS_RESET
, qp
->qpn
, 0,
1431 * If this is a userspace QP, the buffers, MR, CQs and so on
1432 * will be cleaned up in userspace, so all we have to do is
1433 * unref the mem-free tables and free the QPN in our table.
1435 if (!qp
->ibqp
.uobject
) {
1436 mthca_cq_clean(dev
, recv_cq
, qp
->qpn
,
1437 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1438 if (send_cq
!= recv_cq
)
1439 mthca_cq_clean(dev
, send_cq
, qp
->qpn
, NULL
);
1441 mthca_free_memfree(dev
, qp
);
1442 mthca_free_wqe_buf(dev
, qp
);
1445 mthca_unmap_memfree(dev
, qp
);
1447 if (is_sqp(dev
, qp
)) {
1448 atomic_dec(&(to_mpd(qp
->ibqp
.pd
)->sqp_count
));
1449 dma_free_coherent(&dev
->pdev
->dev
,
1450 to_msqp(qp
)->header_buf_size
,
1451 to_msqp(qp
)->header_buf
,
1452 to_msqp(qp
)->header_dma
);
1454 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1457 /* Create UD header for an MLX send and build a data segment for it */
1458 static int build_mlx_header(struct mthca_dev
*dev
, struct mthca_sqp
*sqp
,
1459 int ind
, struct ib_send_wr
*wr
,
1460 struct mthca_mlx_seg
*mlx
,
1461 struct mthca_data_seg
*data
)
1467 ib_ud_header_init(256, /* assume a MAD */
1468 mthca_ah_grh_present(to_mah(wr
->wr
.ud
.ah
)),
1471 err
= mthca_read_ah(dev
, to_mah(wr
->wr
.ud
.ah
), &sqp
->ud_header
);
1474 mlx
->flags
&= ~cpu_to_be32(MTHCA_NEXT_SOLICIT
| 1);
1475 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MTHCA_MLX_VL15
: 0) |
1476 (sqp
->ud_header
.lrh
.destination_lid
==
1477 IB_LID_PERMISSIVE
? MTHCA_MLX_SLR
: 0) |
1478 (sqp
->ud_header
.lrh
.service_level
<< 8));
1479 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
1482 switch (wr
->opcode
) {
1484 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1485 sqp
->ud_header
.immediate_present
= 0;
1487 case IB_WR_SEND_WITH_IMM
:
1488 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
1489 sqp
->ud_header
.immediate_present
= 1;
1490 sqp
->ud_header
.immediate_data
= wr
->imm_data
;
1496 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 : 0;
1497 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
1498 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
1499 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
1500 if (!sqp
->qp
.ibqp
.qp_num
)
1501 ib_get_cached_pkey(&dev
->ib_dev
, sqp
->qp
.port
,
1502 sqp
->pkey_index
, &pkey
);
1504 ib_get_cached_pkey(&dev
->ib_dev
, sqp
->qp
.port
,
1505 wr
->wr
.ud
.pkey_index
, &pkey
);
1506 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
1507 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1508 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
1509 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
& 0x80000000 ?
1510 sqp
->qkey
: wr
->wr
.ud
.remote_qkey
);
1511 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
1513 header_size
= ib_ud_header_pack(&sqp
->ud_header
,
1515 ind
* MTHCA_UD_HEADER_SIZE
);
1517 data
->byte_count
= cpu_to_be32(header_size
);
1518 data
->lkey
= cpu_to_be32(to_mpd(sqp
->qp
.ibqp
.pd
)->ntmr
.ibmr
.lkey
);
1519 data
->addr
= cpu_to_be64(sqp
->header_dma
+
1520 ind
* MTHCA_UD_HEADER_SIZE
);
1525 static inline int mthca_wq_overflow(struct mthca_wq
*wq
, int nreq
,
1526 struct ib_cq
*ib_cq
)
1529 struct mthca_cq
*cq
;
1531 cur
= wq
->head
- wq
->tail
;
1532 if (likely(cur
+ nreq
< wq
->max
))
1536 spin_lock(&cq
->lock
);
1537 cur
= wq
->head
- wq
->tail
;
1538 spin_unlock(&cq
->lock
);
1540 return cur
+ nreq
>= wq
->max
;
1543 int mthca_tavor_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1544 struct ib_send_wr
**bad_wr
)
1546 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1547 struct mthca_qp
*qp
= to_mqp(ibqp
);
1550 unsigned long flags
;
1560 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1562 /* XXX check that state is OK to post send */
1564 ind
= qp
->sq
.next_ind
;
1566 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1567 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1568 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1569 " %d max, %d nreq)\n", qp
->qpn
,
1570 qp
->sq
.head
, qp
->sq
.tail
,
1577 wqe
= get_send_wqe(qp
, ind
);
1578 prev_wqe
= qp
->sq
.last
;
1581 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
1582 ((struct mthca_next_seg
*) wqe
)->ee_nds
= 0;
1583 ((struct mthca_next_seg
*) wqe
)->flags
=
1584 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
1585 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
1586 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
1587 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
1589 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1590 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1591 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->imm_data
;
1593 wqe
+= sizeof (struct mthca_next_seg
);
1594 size
= sizeof (struct mthca_next_seg
) / 16;
1596 switch (qp
->transport
) {
1598 switch (wr
->opcode
) {
1599 case IB_WR_ATOMIC_CMP_AND_SWP
:
1600 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1601 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1602 cpu_to_be64(wr
->wr
.atomic
.remote_addr
);
1603 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1604 cpu_to_be32(wr
->wr
.atomic
.rkey
);
1605 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1607 wqe
+= sizeof (struct mthca_raddr_seg
);
1609 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1610 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1611 cpu_to_be64(wr
->wr
.atomic
.swap
);
1612 ((struct mthca_atomic_seg
*) wqe
)->compare
=
1613 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1615 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1616 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1617 ((struct mthca_atomic_seg
*) wqe
)->compare
= 0;
1620 wqe
+= sizeof (struct mthca_atomic_seg
);
1621 size
+= (sizeof (struct mthca_raddr_seg
) +
1622 sizeof (struct mthca_atomic_seg
)) / 16;
1625 case IB_WR_RDMA_WRITE
:
1626 case IB_WR_RDMA_WRITE_WITH_IMM
:
1627 case IB_WR_RDMA_READ
:
1628 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1629 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1630 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1631 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1632 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1633 wqe
+= sizeof (struct mthca_raddr_seg
);
1634 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1638 /* No extra segments required for sends */
1645 switch (wr
->opcode
) {
1646 case IB_WR_RDMA_WRITE
:
1647 case IB_WR_RDMA_WRITE_WITH_IMM
:
1648 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1649 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1650 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1651 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1652 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1653 wqe
+= sizeof (struct mthca_raddr_seg
);
1654 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1658 /* No extra segments required for sends */
1665 ((struct mthca_tavor_ud_seg
*) wqe
)->lkey
=
1666 cpu_to_be32(to_mah(wr
->wr
.ud
.ah
)->key
);
1667 ((struct mthca_tavor_ud_seg
*) wqe
)->av_addr
=
1668 cpu_to_be64(to_mah(wr
->wr
.ud
.ah
)->avdma
);
1669 ((struct mthca_tavor_ud_seg
*) wqe
)->dqpn
=
1670 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1671 ((struct mthca_tavor_ud_seg
*) wqe
)->qkey
=
1672 cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1674 wqe
+= sizeof (struct mthca_tavor_ud_seg
);
1675 size
+= sizeof (struct mthca_tavor_ud_seg
) / 16;
1679 err
= build_mlx_header(dev
, to_msqp(qp
), ind
, wr
,
1680 wqe
- sizeof (struct mthca_next_seg
),
1686 wqe
+= sizeof (struct mthca_data_seg
);
1687 size
+= sizeof (struct mthca_data_seg
) / 16;
1691 if (wr
->num_sge
> qp
->sq
.max_gs
) {
1692 mthca_err(dev
, "too many gathers\n");
1698 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1699 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1700 cpu_to_be32(wr
->sg_list
[i
].length
);
1701 ((struct mthca_data_seg
*) wqe
)->lkey
=
1702 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1703 ((struct mthca_data_seg
*) wqe
)->addr
=
1704 cpu_to_be64(wr
->sg_list
[i
].addr
);
1705 wqe
+= sizeof (struct mthca_data_seg
);
1706 size
+= sizeof (struct mthca_data_seg
) / 16;
1709 /* Add one more inline data segment for ICRC */
1710 if (qp
->transport
== MLX
) {
1711 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1712 cpu_to_be32((1 << 31) | 4);
1713 ((u32
*) wqe
)[1] = 0;
1714 wqe
+= sizeof (struct mthca_data_seg
);
1715 size
+= sizeof (struct mthca_data_seg
) / 16;
1718 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
1720 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
1721 mthca_err(dev
, "opcode invalid\n");
1727 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1728 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
1729 qp
->send_wqe_offset
) |
1730 mthca_opcode
[wr
->opcode
]);
1732 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1733 cpu_to_be32((size0
? 0 : MTHCA_NEXT_DBD
) | size
|
1734 ((wr
->send_flags
& IB_SEND_FENCE
) ?
1735 MTHCA_NEXT_FENCE
: 0));
1739 op0
= mthca_opcode
[wr
->opcode
];
1740 f0
= wr
->send_flags
& IB_SEND_FENCE
?
1741 MTHCA_SEND_DOORBELL_FENCE
: 0;
1745 if (unlikely(ind
>= qp
->sq
.max
))
1753 doorbell
[0] = cpu_to_be32(((qp
->sq
.next_ind
<< qp
->sq
.wqe_shift
) +
1754 qp
->send_wqe_offset
) | f0
| op0
);
1755 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
1759 mthca_write64(doorbell
,
1760 dev
->kar
+ MTHCA_SEND_DOORBELL
,
1761 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1763 * Make sure doorbells don't leak out of SQ spinlock
1764 * and reach the HCA out of order:
1769 qp
->sq
.next_ind
= ind
;
1770 qp
->sq
.head
+= nreq
;
1772 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
1776 int mthca_tavor_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1777 struct ib_recv_wr
**bad_wr
)
1779 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1780 struct mthca_qp
*qp
= to_mqp(ibqp
);
1782 unsigned long flags
;
1792 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
1794 /* XXX check that state is OK to post receive */
1796 ind
= qp
->rq
.next_ind
;
1798 for (nreq
= 0; wr
; wr
= wr
->next
) {
1799 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
1800 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
1801 " %d max, %d nreq)\n", qp
->qpn
,
1802 qp
->rq
.head
, qp
->rq
.tail
,
1809 wqe
= get_recv_wqe(qp
, ind
);
1810 prev_wqe
= qp
->rq
.last
;
1813 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
1814 ((struct mthca_next_seg
*) wqe
)->ee_nds
=
1815 cpu_to_be32(MTHCA_NEXT_DBD
);
1816 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
1818 wqe
+= sizeof (struct mthca_next_seg
);
1819 size
= sizeof (struct mthca_next_seg
) / 16;
1821 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
1827 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1828 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1829 cpu_to_be32(wr
->sg_list
[i
].length
);
1830 ((struct mthca_data_seg
*) wqe
)->lkey
=
1831 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1832 ((struct mthca_data_seg
*) wqe
)->addr
=
1833 cpu_to_be64(wr
->sg_list
[i
].addr
);
1834 wqe
+= sizeof (struct mthca_data_seg
);
1835 size
+= sizeof (struct mthca_data_seg
) / 16;
1838 qp
->wrid
[ind
] = wr
->wr_id
;
1840 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1841 cpu_to_be32((ind
<< qp
->rq
.wqe_shift
) | 1);
1843 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1844 cpu_to_be32(MTHCA_NEXT_DBD
| size
);
1850 if (unlikely(ind
>= qp
->rq
.max
))
1854 if (unlikely(nreq
== MTHCA_TAVOR_MAX_WQES_PER_RECV_DB
)) {
1857 doorbell
[0] = cpu_to_be32((qp
->rq
.next_ind
<< qp
->rq
.wqe_shift
) | size0
);
1858 doorbell
[1] = cpu_to_be32(qp
->qpn
<< 8);
1862 mthca_write64(doorbell
,
1863 dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
1864 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1866 qp
->rq
.next_ind
= ind
;
1867 qp
->rq
.head
+= MTHCA_TAVOR_MAX_WQES_PER_RECV_DB
;
1874 doorbell
[0] = cpu_to_be32((qp
->rq
.next_ind
<< qp
->rq
.wqe_shift
) | size0
);
1875 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | nreq
);
1879 mthca_write64(doorbell
,
1880 dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
1881 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1884 qp
->rq
.next_ind
= ind
;
1885 qp
->rq
.head
+= nreq
;
1888 * Make sure doorbells don't leak out of RQ spinlock and reach
1889 * the HCA out of order:
1893 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
1897 int mthca_arbel_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1898 struct ib_send_wr
**bad_wr
)
1900 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1901 struct mthca_qp
*qp
= to_mqp(ibqp
);
1905 unsigned long flags
;
1915 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1917 /* XXX check that state is OK to post send */
1919 ind
= qp
->sq
.head
& (qp
->sq
.max
- 1);
1921 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1922 if (unlikely(nreq
== MTHCA_ARBEL_MAX_WQES_PER_SEND_DB
)) {
1925 doorbell
[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB
<< 24) |
1926 ((qp
->sq
.head
& 0xffff) << 8) |
1928 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
1930 qp
->sq
.head
+= MTHCA_ARBEL_MAX_WQES_PER_SEND_DB
;
1934 * Make sure that descriptors are written before
1938 *qp
->sq
.db
= cpu_to_be32(qp
->sq
.head
& 0xffff);
1941 * Make sure doorbell record is written before we
1942 * write MMIO send doorbell.
1945 mthca_write64(doorbell
,
1946 dev
->kar
+ MTHCA_SEND_DOORBELL
,
1947 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1950 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1951 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1952 " %d max, %d nreq)\n", qp
->qpn
,
1953 qp
->sq
.head
, qp
->sq
.tail
,
1960 wqe
= get_send_wqe(qp
, ind
);
1961 prev_wqe
= qp
->sq
.last
;
1964 ((struct mthca_next_seg
*) wqe
)->flags
=
1965 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
1966 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
1967 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
1968 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
1970 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1971 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1972 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->imm_data
;
1974 wqe
+= sizeof (struct mthca_next_seg
);
1975 size
= sizeof (struct mthca_next_seg
) / 16;
1977 switch (qp
->transport
) {
1979 switch (wr
->opcode
) {
1980 case IB_WR_ATOMIC_CMP_AND_SWP
:
1981 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1982 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1983 cpu_to_be64(wr
->wr
.atomic
.remote_addr
);
1984 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1985 cpu_to_be32(wr
->wr
.atomic
.rkey
);
1986 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1988 wqe
+= sizeof (struct mthca_raddr_seg
);
1990 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1991 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1992 cpu_to_be64(wr
->wr
.atomic
.swap
);
1993 ((struct mthca_atomic_seg
*) wqe
)->compare
=
1994 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1996 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1997 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1998 ((struct mthca_atomic_seg
*) wqe
)->compare
= 0;
2001 wqe
+= sizeof (struct mthca_atomic_seg
);
2002 size
+= (sizeof (struct mthca_raddr_seg
) +
2003 sizeof (struct mthca_atomic_seg
)) / 16;
2006 case IB_WR_RDMA_READ
:
2007 case IB_WR_RDMA_WRITE
:
2008 case IB_WR_RDMA_WRITE_WITH_IMM
:
2009 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
2010 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
2011 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
2012 cpu_to_be32(wr
->wr
.rdma
.rkey
);
2013 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
2014 wqe
+= sizeof (struct mthca_raddr_seg
);
2015 size
+= sizeof (struct mthca_raddr_seg
) / 16;
2019 /* No extra segments required for sends */
2026 switch (wr
->opcode
) {
2027 case IB_WR_RDMA_WRITE
:
2028 case IB_WR_RDMA_WRITE_WITH_IMM
:
2029 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
2030 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
2031 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
2032 cpu_to_be32(wr
->wr
.rdma
.rkey
);
2033 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
2034 wqe
+= sizeof (struct mthca_raddr_seg
);
2035 size
+= sizeof (struct mthca_raddr_seg
) / 16;
2039 /* No extra segments required for sends */
2046 memcpy(((struct mthca_arbel_ud_seg
*) wqe
)->av
,
2047 to_mah(wr
->wr
.ud
.ah
)->av
, MTHCA_AV_SIZE
);
2048 ((struct mthca_arbel_ud_seg
*) wqe
)->dqpn
=
2049 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
2050 ((struct mthca_arbel_ud_seg
*) wqe
)->qkey
=
2051 cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
2053 wqe
+= sizeof (struct mthca_arbel_ud_seg
);
2054 size
+= sizeof (struct mthca_arbel_ud_seg
) / 16;
2058 err
= build_mlx_header(dev
, to_msqp(qp
), ind
, wr
,
2059 wqe
- sizeof (struct mthca_next_seg
),
2065 wqe
+= sizeof (struct mthca_data_seg
);
2066 size
+= sizeof (struct mthca_data_seg
) / 16;
2070 if (wr
->num_sge
> qp
->sq
.max_gs
) {
2071 mthca_err(dev
, "too many gathers\n");
2077 for (i
= 0; i
< wr
->num_sge
; ++i
) {
2078 ((struct mthca_data_seg
*) wqe
)->byte_count
=
2079 cpu_to_be32(wr
->sg_list
[i
].length
);
2080 ((struct mthca_data_seg
*) wqe
)->lkey
=
2081 cpu_to_be32(wr
->sg_list
[i
].lkey
);
2082 ((struct mthca_data_seg
*) wqe
)->addr
=
2083 cpu_to_be64(wr
->sg_list
[i
].addr
);
2084 wqe
+= sizeof (struct mthca_data_seg
);
2085 size
+= sizeof (struct mthca_data_seg
) / 16;
2088 /* Add one more inline data segment for ICRC */
2089 if (qp
->transport
== MLX
) {
2090 ((struct mthca_data_seg
*) wqe
)->byte_count
=
2091 cpu_to_be32((1 << 31) | 4);
2092 ((u32
*) wqe
)[1] = 0;
2093 wqe
+= sizeof (struct mthca_data_seg
);
2094 size
+= sizeof (struct mthca_data_seg
) / 16;
2097 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
2099 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
2100 mthca_err(dev
, "opcode invalid\n");
2106 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
2107 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
2108 qp
->send_wqe_offset
) |
2109 mthca_opcode
[wr
->opcode
]);
2111 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
2112 cpu_to_be32(MTHCA_NEXT_DBD
| size
|
2113 ((wr
->send_flags
& IB_SEND_FENCE
) ?
2114 MTHCA_NEXT_FENCE
: 0));
2118 op0
= mthca_opcode
[wr
->opcode
];
2119 f0
= wr
->send_flags
& IB_SEND_FENCE
?
2120 MTHCA_SEND_DOORBELL_FENCE
: 0;
2124 if (unlikely(ind
>= qp
->sq
.max
))
2130 doorbell
[0] = cpu_to_be32((nreq
<< 24) |
2131 ((qp
->sq
.head
& 0xffff) << 8) |
2133 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
2135 qp
->sq
.head
+= nreq
;
2138 * Make sure that descriptors are written before
2142 *qp
->sq
.db
= cpu_to_be32(qp
->sq
.head
& 0xffff);
2145 * Make sure doorbell record is written before we
2146 * write MMIO send doorbell.
2149 mthca_write64(doorbell
,
2150 dev
->kar
+ MTHCA_SEND_DOORBELL
,
2151 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
2155 * Make sure doorbells don't leak out of SQ spinlock and reach
2156 * the HCA out of order:
2160 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2164 int mthca_arbel_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
2165 struct ib_recv_wr
**bad_wr
)
2167 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
2168 struct mthca_qp
*qp
= to_mqp(ibqp
);
2169 unsigned long flags
;
2176 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2178 /* XXX check that state is OK to post receive */
2180 ind
= qp
->rq
.head
& (qp
->rq
.max
- 1);
2182 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2183 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2184 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
2185 " %d max, %d nreq)\n", qp
->qpn
,
2186 qp
->rq
.head
, qp
->rq
.tail
,
2193 wqe
= get_recv_wqe(qp
, ind
);
2195 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
2197 wqe
+= sizeof (struct mthca_next_seg
);
2199 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2205 for (i
= 0; i
< wr
->num_sge
; ++i
) {
2206 ((struct mthca_data_seg
*) wqe
)->byte_count
=
2207 cpu_to_be32(wr
->sg_list
[i
].length
);
2208 ((struct mthca_data_seg
*) wqe
)->lkey
=
2209 cpu_to_be32(wr
->sg_list
[i
].lkey
);
2210 ((struct mthca_data_seg
*) wqe
)->addr
=
2211 cpu_to_be64(wr
->sg_list
[i
].addr
);
2212 wqe
+= sizeof (struct mthca_data_seg
);
2215 if (i
< qp
->rq
.max_gs
) {
2216 ((struct mthca_data_seg
*) wqe
)->byte_count
= 0;
2217 ((struct mthca_data_seg
*) wqe
)->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
2218 ((struct mthca_data_seg
*) wqe
)->addr
= 0;
2221 qp
->wrid
[ind
] = wr
->wr_id
;
2224 if (unlikely(ind
>= qp
->rq
.max
))
2229 qp
->rq
.head
+= nreq
;
2232 * Make sure that descriptors are written before
2236 *qp
->rq
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2239 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2243 void mthca_free_err_wqe(struct mthca_dev
*dev
, struct mthca_qp
*qp
, int is_send
,
2244 int index
, int *dbd
, __be32
*new_wqe
)
2246 struct mthca_next_seg
*next
;
2249 * For SRQs, all WQEs generate a CQE, so we're always at the
2250 * end of the doorbell chain.
2258 next
= get_send_wqe(qp
, index
);
2260 next
= get_recv_wqe(qp
, index
);
2262 *dbd
= !!(next
->ee_nds
& cpu_to_be32(MTHCA_NEXT_DBD
));
2263 if (next
->ee_nds
& cpu_to_be32(0x3f))
2264 *new_wqe
= (next
->nda_op
& cpu_to_be32(~0x3f)) |
2265 (next
->ee_nds
& cpu_to_be32(0x3f));
2270 int mthca_init_qp_table(struct mthca_dev
*dev
)
2276 spin_lock_init(&dev
->qp_table
.lock
);
2279 * We reserve 2 extra QPs per port for the special QPs. The
2280 * special QP for port 1 has to be even, so round up.
2282 dev
->qp_table
.sqp_start
= (dev
->limits
.reserved_qps
+ 1) & ~1UL;
2283 err
= mthca_alloc_init(&dev
->qp_table
.alloc
,
2284 dev
->limits
.num_qps
,
2286 dev
->qp_table
.sqp_start
+
2287 MTHCA_MAX_PORTS
* 2);
2291 err
= mthca_array_init(&dev
->qp_table
.qp
,
2292 dev
->limits
.num_qps
);
2294 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2298 for (i
= 0; i
< 2; ++i
) {
2299 err
= mthca_CONF_SPECIAL_QP(dev
, i
? IB_QPT_GSI
: IB_QPT_SMI
,
2300 dev
->qp_table
.sqp_start
+ i
* 2,
2305 mthca_warn(dev
, "CONF_SPECIAL_QP returned "
2306 "status %02x, aborting.\n",
2315 for (i
= 0; i
< 2; ++i
)
2316 mthca_CONF_SPECIAL_QP(dev
, i
, 0, &status
);
2318 mthca_array_cleanup(&dev
->qp_table
.qp
, dev
->limits
.num_qps
);
2319 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2324 void mthca_cleanup_qp_table(struct mthca_dev
*dev
)
2329 for (i
= 0; i
< 2; ++i
)
2330 mthca_CONF_SPECIAL_QP(dev
, i
, 0, &status
);
2332 mthca_array_cleanup(&dev
->qp_table
.qp
, dev
->limits
.num_qps
);
2333 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);