2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
37 static int db_delay_usecs
= 1;
38 module_param(db_delay_usecs
, int, 0644);
39 MODULE_PARM_DESC(db_delay_usecs
, "Usecs to delay awaiting db fifo to drain");
41 static int ocqp_support
= 1;
42 module_param(ocqp_support
, int, 0644);
43 MODULE_PARM_DESC(ocqp_support
, "Support on-chip SQs (default=1)");
45 int db_fc_threshold
= 1000;
46 module_param(db_fc_threshold
, int, 0644);
47 MODULE_PARM_DESC(db_fc_threshold
,
48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
51 int db_coalescing_threshold
;
52 module_param(db_coalescing_threshold
, int, 0644);
53 MODULE_PARM_DESC(db_coalescing_threshold
,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
57 static int max_fr_immd
= T4_MAX_FR_IMMD
;
58 module_param(max_fr_immd
, int, 0644);
59 MODULE_PARM_DESC(max_fr_immd
, "fastreg threshold for using DSGL instead of immedate");
61 static int alloc_ird(struct c4iw_dev
*dev
, u32 ird
)
65 spin_lock_irq(&dev
->lock
);
66 if (ird
<= dev
->avail_ird
)
67 dev
->avail_ird
-= ird
;
70 spin_unlock_irq(&dev
->lock
);
73 dev_warn(&dev
->rdev
.lldi
.pdev
->dev
,
74 "device IRD resources exhausted\n");
79 static void free_ird(struct c4iw_dev
*dev
, int ird
)
81 spin_lock_irq(&dev
->lock
);
82 dev
->avail_ird
+= ird
;
83 spin_unlock_irq(&dev
->lock
);
86 static void set_state(struct c4iw_qp
*qhp
, enum c4iw_qp_state state
)
89 spin_lock_irqsave(&qhp
->lock
, flag
);
90 qhp
->attr
.state
= state
;
91 spin_unlock_irqrestore(&qhp
->lock
, flag
);
94 static void dealloc_oc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
96 c4iw_ocqp_pool_free(rdev
, sq
->dma_addr
, sq
->memsize
);
99 static void dealloc_host_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
101 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
), sq
->memsize
, sq
->queue
,
102 pci_unmap_addr(sq
, mapping
));
105 static void dealloc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
107 if (t4_sq_onchip(sq
))
108 dealloc_oc_sq(rdev
, sq
);
110 dealloc_host_sq(rdev
, sq
);
113 static int alloc_oc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
115 if (!ocqp_support
|| !ocqp_supported(&rdev
->lldi
))
117 sq
->dma_addr
= c4iw_ocqp_pool_alloc(rdev
, sq
->memsize
);
120 sq
->phys_addr
= rdev
->oc_mw_pa
+ sq
->dma_addr
-
121 rdev
->lldi
.vr
->ocq
.start
;
122 sq
->queue
= (__force
union t4_wr
*)(rdev
->oc_mw_kva
+ sq
->dma_addr
-
123 rdev
->lldi
.vr
->ocq
.start
);
124 sq
->flags
|= T4_SQ_ONCHIP
;
128 static int alloc_host_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
130 sq
->queue
= dma_alloc_coherent(&(rdev
->lldi
.pdev
->dev
), sq
->memsize
,
131 &(sq
->dma_addr
), GFP_KERNEL
);
134 sq
->phys_addr
= virt_to_phys(sq
->queue
);
135 pci_unmap_addr_set(sq
, mapping
, sq
->dma_addr
);
139 static int alloc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
, int user
)
143 ret
= alloc_oc_sq(rdev
, sq
);
145 ret
= alloc_host_sq(rdev
, sq
);
149 static int destroy_qp(struct c4iw_rdev
*rdev
, struct t4_wq
*wq
,
150 struct c4iw_dev_ucontext
*uctx
)
153 * uP clears EQ contexts when the connection exits rdma mode,
154 * so no need to post a RESET WR for these EQs.
156 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
),
157 wq
->rq
.memsize
, wq
->rq
.queue
,
158 dma_unmap_addr(&wq
->rq
, mapping
));
159 dealloc_sq(rdev
, &wq
->sq
);
160 c4iw_rqtpool_free(rdev
, wq
->rq
.rqt_hwaddr
, wq
->rq
.rqt_size
);
163 c4iw_put_qpid(rdev
, wq
->rq
.qid
, uctx
);
164 c4iw_put_qpid(rdev
, wq
->sq
.qid
, uctx
);
169 * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
170 * then this is a user mapping so compute the page-aligned physical address
173 void __iomem
*c4iw_bar2_addrs(struct c4iw_rdev
*rdev
, unsigned int qid
,
174 enum cxgb4_bar2_qtype qtype
,
175 unsigned int *pbar2_qid
, u64
*pbar2_pa
)
180 ret
= cxgb4_bar2_sge_qregs(rdev
->lldi
.ports
[0], qid
, qtype
,
182 &bar2_qoffset
, pbar2_qid
);
187 *pbar2_pa
= (rdev
->bar2_pa
+ bar2_qoffset
) & PAGE_MASK
;
189 if (is_t4(rdev
->lldi
.adapter_type
))
192 return rdev
->bar2_kva
+ bar2_qoffset
;
195 static int create_qp(struct c4iw_rdev
*rdev
, struct t4_wq
*wq
,
196 struct t4_cq
*rcq
, struct t4_cq
*scq
,
197 struct c4iw_dev_ucontext
*uctx
)
199 int user
= (uctx
!= &rdev
->uctx
);
200 struct fw_ri_res_wr
*res_wr
;
201 struct fw_ri_res
*res
;
203 struct c4iw_wr_wait wr_wait
;
208 wq
->sq
.qid
= c4iw_get_qpid(rdev
, uctx
);
212 wq
->rq
.qid
= c4iw_get_qpid(rdev
, uctx
);
219 wq
->sq
.sw_sq
= kzalloc(wq
->sq
.size
* sizeof *wq
->sq
.sw_sq
,
226 wq
->rq
.sw_rq
= kzalloc(wq
->rq
.size
* sizeof *wq
->rq
.sw_rq
,
235 * RQT must be a power of 2 and at least 16 deep.
237 wq
->rq
.rqt_size
= roundup_pow_of_two(max_t(u16
, wq
->rq
.size
, 16));
238 wq
->rq
.rqt_hwaddr
= c4iw_rqtpool_alloc(rdev
, wq
->rq
.rqt_size
);
239 if (!wq
->rq
.rqt_hwaddr
) {
244 ret
= alloc_sq(rdev
, &wq
->sq
, user
);
247 memset(wq
->sq
.queue
, 0, wq
->sq
.memsize
);
248 dma_unmap_addr_set(&wq
->sq
, mapping
, wq
->sq
.dma_addr
);
250 wq
->rq
.queue
= dma_alloc_coherent(&(rdev
->lldi
.pdev
->dev
),
251 wq
->rq
.memsize
, &(wq
->rq
.dma_addr
),
257 pr_debug("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
258 __func__
, wq
->sq
.queue
,
259 (unsigned long long)virt_to_phys(wq
->sq
.queue
),
261 (unsigned long long)virt_to_phys(wq
->rq
.queue
));
262 memset(wq
->rq
.queue
, 0, wq
->rq
.memsize
);
263 dma_unmap_addr_set(&wq
->rq
, mapping
, wq
->rq
.dma_addr
);
265 wq
->db
= rdev
->lldi
.db_reg
;
267 wq
->sq
.bar2_va
= c4iw_bar2_addrs(rdev
, wq
->sq
.qid
, T4_BAR2_QTYPE_EGRESS
,
269 user
? &wq
->sq
.bar2_pa
: NULL
);
270 wq
->rq
.bar2_va
= c4iw_bar2_addrs(rdev
, wq
->rq
.qid
, T4_BAR2_QTYPE_EGRESS
,
272 user
? &wq
->rq
.bar2_pa
: NULL
);
275 * User mode must have bar2 access.
277 if (user
&& (!wq
->sq
.bar2_pa
|| !wq
->rq
.bar2_pa
)) {
278 pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
279 pci_name(rdev
->lldi
.pdev
), wq
->sq
.qid
, wq
->rq
.qid
);
286 /* build fw_ri_res_wr */
287 wr_len
= sizeof *res_wr
+ 2 * sizeof *res
;
289 skb
= alloc_skb(wr_len
, GFP_KERNEL
);
294 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
296 res_wr
= __skb_put_zero(skb
, wr_len
);
297 res_wr
->op_nres
= cpu_to_be32(
298 FW_WR_OP_V(FW_RI_RES_WR
) |
299 FW_RI_RES_WR_NRES_V(2) |
301 res_wr
->len16_pkd
= cpu_to_be32(DIV_ROUND_UP(wr_len
, 16));
302 res_wr
->cookie
= (uintptr_t)&wr_wait
;
304 res
->u
.sqrq
.restype
= FW_RI_RES_TYPE_SQ
;
305 res
->u
.sqrq
.op
= FW_RI_RES_OP_WRITE
;
308 * eqsize is the number of 64B entries plus the status page size.
310 eqsize
= wq
->sq
.size
* T4_SQ_NUM_SLOTS
+
311 rdev
->hw_queue
.t4_eq_status_entries
;
313 res
->u
.sqrq
.fetchszm_to_iqid
= cpu_to_be32(
314 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
315 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
316 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
317 (t4_sq_onchip(&wq
->sq
) ? FW_RI_RES_WR_ONCHIP_F
: 0) |
318 FW_RI_RES_WR_IQID_V(scq
->cqid
));
319 res
->u
.sqrq
.dcaen_to_eqsize
= cpu_to_be32(
320 FW_RI_RES_WR_DCAEN_V(0) |
321 FW_RI_RES_WR_DCACPU_V(0) |
322 FW_RI_RES_WR_FBMIN_V(2) |
323 (t4_sq_onchip(&wq
->sq
) ? FW_RI_RES_WR_FBMAX_V(2) :
324 FW_RI_RES_WR_FBMAX_V(3)) |
325 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
326 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
327 FW_RI_RES_WR_EQSIZE_V(eqsize
));
328 res
->u
.sqrq
.eqid
= cpu_to_be32(wq
->sq
.qid
);
329 res
->u
.sqrq
.eqaddr
= cpu_to_be64(wq
->sq
.dma_addr
);
331 res
->u
.sqrq
.restype
= FW_RI_RES_TYPE_RQ
;
332 res
->u
.sqrq
.op
= FW_RI_RES_OP_WRITE
;
335 * eqsize is the number of 64B entries plus the status page size.
337 eqsize
= wq
->rq
.size
* T4_RQ_NUM_SLOTS
+
338 rdev
->hw_queue
.t4_eq_status_entries
;
339 res
->u
.sqrq
.fetchszm_to_iqid
= cpu_to_be32(
340 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
341 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
342 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
343 FW_RI_RES_WR_IQID_V(rcq
->cqid
));
344 res
->u
.sqrq
.dcaen_to_eqsize
= cpu_to_be32(
345 FW_RI_RES_WR_DCAEN_V(0) |
346 FW_RI_RES_WR_DCACPU_V(0) |
347 FW_RI_RES_WR_FBMIN_V(2) |
348 FW_RI_RES_WR_FBMAX_V(3) |
349 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
350 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
351 FW_RI_RES_WR_EQSIZE_V(eqsize
));
352 res
->u
.sqrq
.eqid
= cpu_to_be32(wq
->rq
.qid
);
353 res
->u
.sqrq
.eqaddr
= cpu_to_be64(wq
->rq
.dma_addr
);
355 c4iw_init_wr_wait(&wr_wait
);
357 ret
= c4iw_ofld_send(rdev
, skb
);
360 ret
= c4iw_wait_for_reply(rdev
, &wr_wait
, 0, wq
->sq
.qid
, __func__
);
364 pr_debug("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
365 __func__
, wq
->sq
.qid
, wq
->rq
.qid
, wq
->db
,
366 wq
->sq
.bar2_va
, wq
->rq
.bar2_va
);
370 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
),
371 wq
->rq
.memsize
, wq
->rq
.queue
,
372 dma_unmap_addr(&wq
->rq
, mapping
));
374 dealloc_sq(rdev
, &wq
->sq
);
376 c4iw_rqtpool_free(rdev
, wq
->rq
.rqt_hwaddr
, wq
->rq
.rqt_size
);
382 c4iw_put_qpid(rdev
, wq
->rq
.qid
, uctx
);
384 c4iw_put_qpid(rdev
, wq
->sq
.qid
, uctx
);
388 static int build_immd(struct t4_sq
*sq
, struct fw_ri_immd
*immdp
,
389 struct ib_send_wr
*wr
, int max
, u32
*plenp
)
396 dstp
= (u8
*)immdp
->data
;
397 for (i
= 0; i
< wr
->num_sge
; i
++) {
398 if ((plen
+ wr
->sg_list
[i
].length
) > max
)
400 srcp
= (u8
*)(unsigned long)wr
->sg_list
[i
].addr
;
401 plen
+= wr
->sg_list
[i
].length
;
402 rem
= wr
->sg_list
[i
].length
;
404 if (dstp
== (u8
*)&sq
->queue
[sq
->size
])
405 dstp
= (u8
*)sq
->queue
;
406 if (rem
<= (u8
*)&sq
->queue
[sq
->size
] - dstp
)
409 len
= (u8
*)&sq
->queue
[sq
->size
] - dstp
;
410 memcpy(dstp
, srcp
, len
);
416 len
= roundup(plen
+ sizeof *immdp
, 16) - (plen
+ sizeof *immdp
);
418 memset(dstp
, 0, len
);
419 immdp
->op
= FW_RI_DATA_IMMD
;
422 immdp
->immdlen
= cpu_to_be32(plen
);
427 static int build_isgl(__be64
*queue_start
, __be64
*queue_end
,
428 struct fw_ri_isgl
*isglp
, struct ib_sge
*sg_list
,
429 int num_sge
, u32
*plenp
)
434 __be64
*flitp
= (__be64
*)isglp
->sge
;
436 for (i
= 0; i
< num_sge
; i
++) {
437 if ((plen
+ sg_list
[i
].length
) < plen
)
439 plen
+= sg_list
[i
].length
;
440 *flitp
= cpu_to_be64(((u64
)sg_list
[i
].lkey
<< 32) |
442 if (++flitp
== queue_end
)
444 *flitp
= cpu_to_be64(sg_list
[i
].addr
);
445 if (++flitp
== queue_end
)
448 *flitp
= (__force __be64
)0;
449 isglp
->op
= FW_RI_DATA_ISGL
;
451 isglp
->nsge
= cpu_to_be16(num_sge
);
458 static int build_rdma_send(struct t4_sq
*sq
, union t4_wr
*wqe
,
459 struct ib_send_wr
*wr
, u8
*len16
)
465 if (wr
->num_sge
> T4_MAX_SEND_SGE
)
467 switch (wr
->opcode
) {
469 if (wr
->send_flags
& IB_SEND_SOLICITED
)
470 wqe
->send
.sendop_pkd
= cpu_to_be32(
471 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE
));
473 wqe
->send
.sendop_pkd
= cpu_to_be32(
474 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND
));
475 wqe
->send
.stag_inv
= 0;
477 case IB_WR_SEND_WITH_INV
:
478 if (wr
->send_flags
& IB_SEND_SOLICITED
)
479 wqe
->send
.sendop_pkd
= cpu_to_be32(
480 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV
));
482 wqe
->send
.sendop_pkd
= cpu_to_be32(
483 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV
));
484 wqe
->send
.stag_inv
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
495 if (wr
->send_flags
& IB_SEND_INLINE
) {
496 ret
= build_immd(sq
, wqe
->send
.u
.immd_src
, wr
,
497 T4_MAX_SEND_INLINE
, &plen
);
500 size
= sizeof wqe
->send
+ sizeof(struct fw_ri_immd
) +
503 ret
= build_isgl((__be64
*)sq
->queue
,
504 (__be64
*)&sq
->queue
[sq
->size
],
505 wqe
->send
.u
.isgl_src
,
506 wr
->sg_list
, wr
->num_sge
, &plen
);
509 size
= sizeof wqe
->send
+ sizeof(struct fw_ri_isgl
) +
510 wr
->num_sge
* sizeof(struct fw_ri_sge
);
513 wqe
->send
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
514 wqe
->send
.u
.immd_src
[0].r1
= 0;
515 wqe
->send
.u
.immd_src
[0].r2
= 0;
516 wqe
->send
.u
.immd_src
[0].immdlen
= 0;
517 size
= sizeof wqe
->send
+ sizeof(struct fw_ri_immd
);
520 *len16
= DIV_ROUND_UP(size
, 16);
521 wqe
->send
.plen
= cpu_to_be32(plen
);
525 static int build_rdma_write(struct t4_sq
*sq
, union t4_wr
*wqe
,
526 struct ib_send_wr
*wr
, u8
*len16
)
532 if (wr
->num_sge
> T4_MAX_SEND_SGE
)
535 wqe
->write
.stag_sink
= cpu_to_be32(rdma_wr(wr
)->rkey
);
536 wqe
->write
.to_sink
= cpu_to_be64(rdma_wr(wr
)->remote_addr
);
538 if (wr
->send_flags
& IB_SEND_INLINE
) {
539 ret
= build_immd(sq
, wqe
->write
.u
.immd_src
, wr
,
540 T4_MAX_WRITE_INLINE
, &plen
);
543 size
= sizeof wqe
->write
+ sizeof(struct fw_ri_immd
) +
546 ret
= build_isgl((__be64
*)sq
->queue
,
547 (__be64
*)&sq
->queue
[sq
->size
],
548 wqe
->write
.u
.isgl_src
,
549 wr
->sg_list
, wr
->num_sge
, &plen
);
552 size
= sizeof wqe
->write
+ sizeof(struct fw_ri_isgl
) +
553 wr
->num_sge
* sizeof(struct fw_ri_sge
);
556 wqe
->write
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
557 wqe
->write
.u
.immd_src
[0].r1
= 0;
558 wqe
->write
.u
.immd_src
[0].r2
= 0;
559 wqe
->write
.u
.immd_src
[0].immdlen
= 0;
560 size
= sizeof wqe
->write
+ sizeof(struct fw_ri_immd
);
563 *len16
= DIV_ROUND_UP(size
, 16);
564 wqe
->write
.plen
= cpu_to_be32(plen
);
568 static int build_rdma_read(union t4_wr
*wqe
, struct ib_send_wr
*wr
, u8
*len16
)
572 if (wr
->num_sge
&& wr
->sg_list
[0].length
) {
573 wqe
->read
.stag_src
= cpu_to_be32(rdma_wr(wr
)->rkey
);
574 wqe
->read
.to_src_hi
= cpu_to_be32((u32
)(rdma_wr(wr
)->remote_addr
576 wqe
->read
.to_src_lo
= cpu_to_be32((u32
)rdma_wr(wr
)->remote_addr
);
577 wqe
->read
.stag_sink
= cpu_to_be32(wr
->sg_list
[0].lkey
);
578 wqe
->read
.plen
= cpu_to_be32(wr
->sg_list
[0].length
);
579 wqe
->read
.to_sink_hi
= cpu_to_be32((u32
)(wr
->sg_list
[0].addr
581 wqe
->read
.to_sink_lo
= cpu_to_be32((u32
)(wr
->sg_list
[0].addr
));
583 wqe
->read
.stag_src
= cpu_to_be32(2);
584 wqe
->read
.to_src_hi
= 0;
585 wqe
->read
.to_src_lo
= 0;
586 wqe
->read
.stag_sink
= cpu_to_be32(2);
588 wqe
->read
.to_sink_hi
= 0;
589 wqe
->read
.to_sink_lo
= 0;
593 *len16
= DIV_ROUND_UP(sizeof wqe
->read
, 16);
597 static int build_rdma_recv(struct c4iw_qp
*qhp
, union t4_recv_wr
*wqe
,
598 struct ib_recv_wr
*wr
, u8
*len16
)
602 ret
= build_isgl((__be64
*)qhp
->wq
.rq
.queue
,
603 (__be64
*)&qhp
->wq
.rq
.queue
[qhp
->wq
.rq
.size
],
604 &wqe
->recv
.isgl
, wr
->sg_list
, wr
->num_sge
, NULL
);
607 *len16
= DIV_ROUND_UP(sizeof wqe
->recv
+
608 wr
->num_sge
* sizeof(struct fw_ri_sge
), 16);
612 static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr
*fr
,
613 struct ib_reg_wr
*wr
, struct c4iw_mr
*mhp
,
616 __be64
*p
= (__be64
*)fr
->pbl
;
618 fr
->r2
= cpu_to_be32(0);
619 fr
->stag
= cpu_to_be32(mhp
->ibmr
.rkey
);
621 fr
->tpte
.valid_to_pdid
= cpu_to_be32(FW_RI_TPTE_VALID_F
|
622 FW_RI_TPTE_STAGKEY_V((mhp
->ibmr
.rkey
& FW_RI_TPTE_STAGKEY_M
)) |
623 FW_RI_TPTE_STAGSTATE_V(1) |
624 FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR
) |
625 FW_RI_TPTE_PDID_V(mhp
->attr
.pdid
));
626 fr
->tpte
.locread_to_qpid
= cpu_to_be32(
627 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr
->access
)) |
628 FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO
) |
629 FW_RI_TPTE_PS_V(ilog2(wr
->mr
->page_size
) - 12));
630 fr
->tpte
.nosnoop_pbladdr
= cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
631 PBL_OFF(&mhp
->rhp
->rdev
, mhp
->attr
.pbl_addr
)>>3));
632 fr
->tpte
.dca_mwbcnt_pstag
= cpu_to_be32(0);
633 fr
->tpte
.len_hi
= cpu_to_be32(0);
634 fr
->tpte
.len_lo
= cpu_to_be32(mhp
->ibmr
.length
);
635 fr
->tpte
.va_hi
= cpu_to_be32(mhp
->ibmr
.iova
>> 32);
636 fr
->tpte
.va_lo_fbo
= cpu_to_be32(mhp
->ibmr
.iova
& 0xffffffff);
638 p
[0] = cpu_to_be64((u64
)mhp
->mpl
[0]);
639 p
[1] = cpu_to_be64((u64
)mhp
->mpl
[1]);
641 *len16
= DIV_ROUND_UP(sizeof(*fr
), 16);
644 static int build_memreg(struct t4_sq
*sq
, union t4_wr
*wqe
,
645 struct ib_reg_wr
*wr
, struct c4iw_mr
*mhp
, u8
*len16
,
648 struct fw_ri_immd
*imdp
;
651 int pbllen
= roundup(mhp
->mpl_len
* sizeof(u64
), 32);
654 if (mhp
->mpl_len
> t4_max_fr_depth(dsgl_supported
&& use_dsgl
))
657 wqe
->fr
.qpbinde_to_dcacpu
= 0;
658 wqe
->fr
.pgsz_shift
= ilog2(wr
->mr
->page_size
) - 12;
659 wqe
->fr
.addr_type
= FW_RI_VA_BASED_TO
;
660 wqe
->fr
.mem_perms
= c4iw_ib_to_tpt_access(wr
->access
);
662 wqe
->fr
.len_lo
= cpu_to_be32(mhp
->ibmr
.length
);
663 wqe
->fr
.stag
= cpu_to_be32(wr
->key
);
664 wqe
->fr
.va_hi
= cpu_to_be32(mhp
->ibmr
.iova
>> 32);
665 wqe
->fr
.va_lo_fbo
= cpu_to_be32(mhp
->ibmr
.iova
&
668 if (dsgl_supported
&& use_dsgl
&& (pbllen
> max_fr_immd
)) {
669 struct fw_ri_dsgl
*sglp
;
671 for (i
= 0; i
< mhp
->mpl_len
; i
++)
672 mhp
->mpl
[i
] = (__force u64
)cpu_to_be64((u64
)mhp
->mpl
[i
]);
674 sglp
= (struct fw_ri_dsgl
*)(&wqe
->fr
+ 1);
675 sglp
->op
= FW_RI_DATA_DSGL
;
677 sglp
->nsge
= cpu_to_be16(1);
678 sglp
->addr0
= cpu_to_be64(mhp
->mpl_addr
);
679 sglp
->len0
= cpu_to_be32(pbllen
);
681 *len16
= DIV_ROUND_UP(sizeof(wqe
->fr
) + sizeof(*sglp
), 16);
683 imdp
= (struct fw_ri_immd
*)(&wqe
->fr
+ 1);
684 imdp
->op
= FW_RI_DATA_IMMD
;
687 imdp
->immdlen
= cpu_to_be32(pbllen
);
688 p
= (__be64
*)(imdp
+ 1);
690 for (i
= 0; i
< mhp
->mpl_len
; i
++) {
691 *p
= cpu_to_be64((u64
)mhp
->mpl
[i
]);
693 if (++p
== (__be64
*)&sq
->queue
[sq
->size
])
694 p
= (__be64
*)sq
->queue
;
700 if (++p
== (__be64
*)&sq
->queue
[sq
->size
])
701 p
= (__be64
*)sq
->queue
;
703 *len16
= DIV_ROUND_UP(sizeof(wqe
->fr
) + sizeof(*imdp
)
709 static int build_inv_stag(union t4_wr
*wqe
, struct ib_send_wr
*wr
, u8
*len16
)
711 wqe
->inv
.stag_inv
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
713 *len16
= DIV_ROUND_UP(sizeof wqe
->inv
, 16);
717 static void free_qp_work(struct work_struct
*work
)
719 struct c4iw_ucontext
*ucontext
;
721 struct c4iw_dev
*rhp
;
723 qhp
= container_of(work
, struct c4iw_qp
, free_work
);
724 ucontext
= qhp
->ucontext
;
727 pr_debug("%s qhp %p ucontext %p\n", __func__
, qhp
, ucontext
);
728 destroy_qp(&rhp
->rdev
, &qhp
->wq
,
729 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
732 c4iw_put_ucontext(ucontext
);
736 static void queue_qp_free(struct kref
*kref
)
740 qhp
= container_of(kref
, struct c4iw_qp
, kref
);
741 pr_debug("%s qhp %p\n", __func__
, qhp
);
742 queue_work(qhp
->rhp
->rdev
.free_workq
, &qhp
->free_work
);
745 void c4iw_qp_add_ref(struct ib_qp
*qp
)
747 pr_debug("%s ib_qp %p\n", __func__
, qp
);
748 kref_get(&to_c4iw_qp(qp
)->kref
);
751 void c4iw_qp_rem_ref(struct ib_qp
*qp
)
753 pr_debug("%s ib_qp %p\n", __func__
, qp
);
754 kref_put(&to_c4iw_qp(qp
)->kref
, queue_qp_free
);
757 static void add_to_fc_list(struct list_head
*head
, struct list_head
*entry
)
759 if (list_empty(entry
))
760 list_add_tail(entry
, head
);
763 static int ring_kernel_sq_db(struct c4iw_qp
*qhp
, u16 inc
)
767 spin_lock_irqsave(&qhp
->rhp
->lock
, flags
);
768 spin_lock(&qhp
->lock
);
769 if (qhp
->rhp
->db_state
== NORMAL
)
770 t4_ring_sq_db(&qhp
->wq
, inc
, NULL
);
772 add_to_fc_list(&qhp
->rhp
->db_fc_list
, &qhp
->db_fc_entry
);
773 qhp
->wq
.sq
.wq_pidx_inc
+= inc
;
775 spin_unlock(&qhp
->lock
);
776 spin_unlock_irqrestore(&qhp
->rhp
->lock
, flags
);
780 static int ring_kernel_rq_db(struct c4iw_qp
*qhp
, u16 inc
)
784 spin_lock_irqsave(&qhp
->rhp
->lock
, flags
);
785 spin_lock(&qhp
->lock
);
786 if (qhp
->rhp
->db_state
== NORMAL
)
787 t4_ring_rq_db(&qhp
->wq
, inc
, NULL
);
789 add_to_fc_list(&qhp
->rhp
->db_fc_list
, &qhp
->db_fc_entry
);
790 qhp
->wq
.rq
.wq_pidx_inc
+= inc
;
792 spin_unlock(&qhp
->lock
);
793 spin_unlock_irqrestore(&qhp
->rhp
->lock
, flags
);
797 static int ib_to_fw_opcode(int ib_opcode
)
802 case IB_WR_SEND_WITH_INV
:
803 opcode
= FW_RI_SEND_WITH_INV
;
808 case IB_WR_RDMA_WRITE
:
809 opcode
= FW_RI_RDMA_WRITE
;
811 case IB_WR_RDMA_READ
:
812 case IB_WR_RDMA_READ_WITH_INV
:
813 opcode
= FW_RI_READ_REQ
;
816 opcode
= FW_RI_FAST_REGISTER
;
818 case IB_WR_LOCAL_INV
:
819 opcode
= FW_RI_LOCAL_INV
;
827 static int complete_sq_drain_wr(struct c4iw_qp
*qhp
, struct ib_send_wr
*wr
)
829 struct t4_cqe cqe
= {};
830 struct c4iw_cq
*schp
;
835 schp
= to_c4iw_cq(qhp
->ibqp
.send_cq
);
838 opcode
= ib_to_fw_opcode(wr
->opcode
);
842 cqe
.u
.drain_cookie
= wr
->wr_id
;
843 cqe
.header
= cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH
) |
844 CQE_OPCODE_V(opcode
) |
848 CQE_QPID_V(qhp
->wq
.sq
.qid
));
850 spin_lock_irqsave(&schp
->lock
, flag
);
851 cqe
.bits_type_ts
= cpu_to_be64(CQE_GENBIT_V((u64
)cq
->gen
));
852 cq
->sw_queue
[cq
->sw_pidx
] = cqe
;
854 spin_unlock_irqrestore(&schp
->lock
, flag
);
856 if (t4_clear_cq_armed(&schp
->cq
)) {
857 spin_lock_irqsave(&schp
->comp_handler_lock
, flag
);
858 (*schp
->ibcq
.comp_handler
)(&schp
->ibcq
,
859 schp
->ibcq
.cq_context
);
860 spin_unlock_irqrestore(&schp
->comp_handler_lock
, flag
);
865 static int complete_sq_drain_wrs(struct c4iw_qp
*qhp
, struct ib_send_wr
*wr
,
866 struct ib_send_wr
**bad_wr
)
871 ret
= complete_sq_drain_wr(qhp
, wr
);
881 static void complete_rq_drain_wr(struct c4iw_qp
*qhp
, struct ib_recv_wr
*wr
)
883 struct t4_cqe cqe
= {};
884 struct c4iw_cq
*rchp
;
888 rchp
= to_c4iw_cq(qhp
->ibqp
.recv_cq
);
891 cqe
.u
.drain_cookie
= wr
->wr_id
;
892 cqe
.header
= cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH
) |
893 CQE_OPCODE_V(FW_RI_SEND
) |
897 CQE_QPID_V(qhp
->wq
.sq
.qid
));
899 spin_lock_irqsave(&rchp
->lock
, flag
);
900 cqe
.bits_type_ts
= cpu_to_be64(CQE_GENBIT_V((u64
)cq
->gen
));
901 cq
->sw_queue
[cq
->sw_pidx
] = cqe
;
903 spin_unlock_irqrestore(&rchp
->lock
, flag
);
905 if (t4_clear_cq_armed(&rchp
->cq
)) {
906 spin_lock_irqsave(&rchp
->comp_handler_lock
, flag
);
907 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
,
908 rchp
->ibcq
.cq_context
);
909 spin_unlock_irqrestore(&rchp
->comp_handler_lock
, flag
);
913 static void complete_rq_drain_wrs(struct c4iw_qp
*qhp
, struct ib_recv_wr
*wr
)
916 complete_rq_drain_wr(qhp
, wr
);
921 int c4iw_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
922 struct ib_send_wr
**bad_wr
)
926 enum fw_wr_opcodes fw_opcode
= 0;
927 enum fw_ri_wr_flags fw_flags
;
929 union t4_wr
*wqe
= NULL
;
931 struct t4_swsqe
*swsqe
;
935 qhp
= to_c4iw_qp(ibqp
);
936 spin_lock_irqsave(&qhp
->lock
, flag
);
939 * If the qp has been flushed, then just insert a special
942 if (qhp
->wq
.flushed
) {
943 spin_unlock_irqrestore(&qhp
->lock
, flag
);
944 err
= complete_sq_drain_wrs(qhp
, wr
, bad_wr
);
947 num_wrs
= t4_sq_avail(&qhp
->wq
);
949 spin_unlock_irqrestore(&qhp
->lock
, flag
);
959 wqe
= (union t4_wr
*)((u8
*)qhp
->wq
.sq
.queue
+
960 qhp
->wq
.sq
.wq_pidx
* T4_EQ_ENTRY_SIZE
);
963 if (wr
->send_flags
& IB_SEND_SOLICITED
)
964 fw_flags
|= FW_RI_SOLICITED_EVENT_FLAG
;
965 if (wr
->send_flags
& IB_SEND_SIGNALED
|| qhp
->sq_sig_all
)
966 fw_flags
|= FW_RI_COMPLETION_FLAG
;
967 swsqe
= &qhp
->wq
.sq
.sw_sq
[qhp
->wq
.sq
.pidx
];
968 switch (wr
->opcode
) {
969 case IB_WR_SEND_WITH_INV
:
971 if (wr
->send_flags
& IB_SEND_FENCE
)
972 fw_flags
|= FW_RI_READ_FENCE_FLAG
;
973 fw_opcode
= FW_RI_SEND_WR
;
974 if (wr
->opcode
== IB_WR_SEND
)
975 swsqe
->opcode
= FW_RI_SEND
;
977 swsqe
->opcode
= FW_RI_SEND_WITH_INV
;
978 err
= build_rdma_send(&qhp
->wq
.sq
, wqe
, wr
, &len16
);
980 case IB_WR_RDMA_WRITE
:
981 fw_opcode
= FW_RI_RDMA_WRITE_WR
;
982 swsqe
->opcode
= FW_RI_RDMA_WRITE
;
983 err
= build_rdma_write(&qhp
->wq
.sq
, wqe
, wr
, &len16
);
985 case IB_WR_RDMA_READ
:
986 case IB_WR_RDMA_READ_WITH_INV
:
987 fw_opcode
= FW_RI_RDMA_READ_WR
;
988 swsqe
->opcode
= FW_RI_READ_REQ
;
989 if (wr
->opcode
== IB_WR_RDMA_READ_WITH_INV
) {
990 c4iw_invalidate_mr(qhp
->rhp
,
991 wr
->sg_list
[0].lkey
);
992 fw_flags
= FW_RI_RDMA_READ_INVALIDATE
;
996 err
= build_rdma_read(wqe
, wr
, &len16
);
999 swsqe
->read_len
= wr
->sg_list
[0].length
;
1000 if (!qhp
->wq
.sq
.oldest_read
)
1001 qhp
->wq
.sq
.oldest_read
= swsqe
;
1003 case IB_WR_REG_MR
: {
1004 struct c4iw_mr
*mhp
= to_c4iw_mr(reg_wr(wr
)->mr
);
1006 swsqe
->opcode
= FW_RI_FAST_REGISTER
;
1007 if (qhp
->rhp
->rdev
.lldi
.fr_nsmr_tpte_wr_support
&&
1008 !mhp
->attr
.state
&& mhp
->mpl_len
<= 2) {
1009 fw_opcode
= FW_RI_FR_NSMR_TPTE_WR
;
1010 build_tpte_memreg(&wqe
->fr_tpte
, reg_wr(wr
),
1013 fw_opcode
= FW_RI_FR_NSMR_WR
;
1014 err
= build_memreg(&qhp
->wq
.sq
, wqe
, reg_wr(wr
),
1016 qhp
->rhp
->rdev
.lldi
.ulptx_memwrite_dsgl
);
1020 mhp
->attr
.state
= 1;
1023 case IB_WR_LOCAL_INV
:
1024 if (wr
->send_flags
& IB_SEND_FENCE
)
1025 fw_flags
|= FW_RI_LOCAL_FENCE_FLAG
;
1026 fw_opcode
= FW_RI_INV_LSTAG_WR
;
1027 swsqe
->opcode
= FW_RI_LOCAL_INV
;
1028 err
= build_inv_stag(wqe
, wr
, &len16
);
1029 c4iw_invalidate_mr(qhp
->rhp
, wr
->ex
.invalidate_rkey
);
1032 pr_debug("%s post of type=%d TBD!\n", __func__
,
1040 swsqe
->idx
= qhp
->wq
.sq
.pidx
;
1041 swsqe
->complete
= 0;
1042 swsqe
->signaled
= (wr
->send_flags
& IB_SEND_SIGNALED
) ||
1045 swsqe
->wr_id
= wr
->wr_id
;
1047 swsqe
->sge_ts
= cxgb4_read_sge_timestamp(
1048 qhp
->rhp
->rdev
.lldi
.ports
[0]);
1049 getnstimeofday(&swsqe
->host_ts
);
1052 init_wr_hdr(wqe
, qhp
->wq
.sq
.pidx
, fw_opcode
, fw_flags
, len16
);
1054 pr_debug("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
1056 (unsigned long long)wr
->wr_id
, qhp
->wq
.sq
.pidx
,
1057 swsqe
->opcode
, swsqe
->read_len
);
1060 t4_sq_produce(&qhp
->wq
, len16
);
1061 idx
+= DIV_ROUND_UP(len16
*16, T4_EQ_ENTRY_SIZE
);
1063 if (!qhp
->rhp
->rdev
.status_page
->db_off
) {
1064 t4_ring_sq_db(&qhp
->wq
, idx
, wqe
);
1065 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1067 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1068 ring_kernel_sq_db(qhp
, idx
);
1073 int c4iw_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1074 struct ib_recv_wr
**bad_wr
)
1077 struct c4iw_qp
*qhp
;
1078 union t4_recv_wr
*wqe
= NULL
;
1084 qhp
= to_c4iw_qp(ibqp
);
1085 spin_lock_irqsave(&qhp
->lock
, flag
);
1088 * If the qp has been flushed, then just insert a special
1091 if (qhp
->wq
.flushed
) {
1092 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1093 complete_rq_drain_wrs(qhp
, wr
);
1096 num_wrs
= t4_rq_avail(&qhp
->wq
);
1098 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1103 if (wr
->num_sge
> T4_MAX_RECV_SGE
) {
1108 wqe
= (union t4_recv_wr
*)((u8
*)qhp
->wq
.rq
.queue
+
1109 qhp
->wq
.rq
.wq_pidx
*
1112 err
= build_rdma_recv(qhp
, wqe
, wr
, &len16
);
1120 qhp
->wq
.rq
.sw_rq
[qhp
->wq
.rq
.pidx
].wr_id
= wr
->wr_id
;
1122 qhp
->wq
.rq
.sw_rq
[qhp
->wq
.rq
.pidx
].sge_ts
=
1123 cxgb4_read_sge_timestamp(
1124 qhp
->rhp
->rdev
.lldi
.ports
[0]);
1126 &qhp
->wq
.rq
.sw_rq
[qhp
->wq
.rq
.pidx
].host_ts
);
1129 wqe
->recv
.opcode
= FW_RI_RECV_WR
;
1131 wqe
->recv
.wrid
= qhp
->wq
.rq
.pidx
;
1132 wqe
->recv
.r2
[0] = 0;
1133 wqe
->recv
.r2
[1] = 0;
1134 wqe
->recv
.r2
[2] = 0;
1135 wqe
->recv
.len16
= len16
;
1136 pr_debug("%s cookie 0x%llx pidx %u\n",
1138 (unsigned long long)wr
->wr_id
, qhp
->wq
.rq
.pidx
);
1139 t4_rq_produce(&qhp
->wq
, len16
);
1140 idx
+= DIV_ROUND_UP(len16
*16, T4_EQ_ENTRY_SIZE
);
1144 if (!qhp
->rhp
->rdev
.status_page
->db_off
) {
1145 t4_ring_rq_db(&qhp
->wq
, idx
, wqe
);
1146 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1148 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1149 ring_kernel_rq_db(qhp
, idx
);
1154 static inline void build_term_codes(struct t4_cqe
*err_cqe
, u8
*layer_type
,
1164 *layer_type
= LAYER_RDMAP
|DDP_LOCAL_CATA
;
1169 status
= CQE_STATUS(err_cqe
);
1170 opcode
= CQE_OPCODE(err_cqe
);
1171 rqtype
= RQ_TYPE(err_cqe
);
1172 send_inv
= (opcode
== FW_RI_SEND_WITH_INV
) ||
1173 (opcode
== FW_RI_SEND_WITH_SE_INV
);
1174 tagged
= (opcode
== FW_RI_RDMA_WRITE
) ||
1175 (rqtype
&& (opcode
== FW_RI_READ_RESP
));
1180 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
1181 *ecode
= RDMAP_CANT_INV_STAG
;
1183 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1184 *ecode
= RDMAP_INV_STAG
;
1188 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1189 if ((opcode
== FW_RI_SEND_WITH_INV
) ||
1190 (opcode
== FW_RI_SEND_WITH_SE_INV
))
1191 *ecode
= RDMAP_CANT_INV_STAG
;
1193 *ecode
= RDMAP_STAG_NOT_ASSOC
;
1196 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1197 *ecode
= RDMAP_STAG_NOT_ASSOC
;
1200 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1201 *ecode
= RDMAP_ACC_VIOL
;
1204 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1205 *ecode
= RDMAP_TO_WRAP
;
1209 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
1210 *ecode
= DDPT_BASE_BOUNDS
;
1212 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1213 *ecode
= RDMAP_BASE_BOUNDS
;
1216 case T4_ERR_INVALIDATE_SHARED_MR
:
1217 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND
:
1218 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
1219 *ecode
= RDMAP_CANT_INV_STAG
;
1222 case T4_ERR_ECC_PSTAG
:
1223 case T4_ERR_INTERNAL_ERR
:
1224 *layer_type
= LAYER_RDMAP
|RDMAP_LOCAL_CATA
;
1227 case T4_ERR_OUT_OF_RQE
:
1228 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1229 *ecode
= DDPU_INV_MSN_NOBUF
;
1231 case T4_ERR_PBL_ADDR_BOUND
:
1232 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
1233 *ecode
= DDPT_BASE_BOUNDS
;
1236 *layer_type
= LAYER_MPA
|DDP_LLP
;
1237 *ecode
= MPA_CRC_ERR
;
1240 *layer_type
= LAYER_MPA
|DDP_LLP
;
1241 *ecode
= MPA_MARKER_ERR
;
1243 case T4_ERR_PDU_LEN_ERR
:
1244 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1245 *ecode
= DDPU_MSG_TOOBIG
;
1247 case T4_ERR_DDP_VERSION
:
1249 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
1250 *ecode
= DDPT_INV_VERS
;
1252 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1253 *ecode
= DDPU_INV_VERS
;
1256 case T4_ERR_RDMA_VERSION
:
1257 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
1258 *ecode
= RDMAP_INV_VERS
;
1261 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
1262 *ecode
= RDMAP_INV_OPCODE
;
1264 case T4_ERR_DDP_QUEUE_NUM
:
1265 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1266 *ecode
= DDPU_INV_QN
;
1269 case T4_ERR_MSN_GAP
:
1270 case T4_ERR_MSN_RANGE
:
1271 case T4_ERR_IRD_OVERFLOW
:
1272 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1273 *ecode
= DDPU_INV_MSN_RANGE
;
1276 *layer_type
= LAYER_DDP
|DDP_LOCAL_CATA
;
1280 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1281 *ecode
= DDPU_INV_MO
;
1284 *layer_type
= LAYER_RDMAP
|DDP_LOCAL_CATA
;
1290 static void post_terminate(struct c4iw_qp
*qhp
, struct t4_cqe
*err_cqe
,
1293 struct fw_ri_wr
*wqe
;
1294 struct sk_buff
*skb
;
1295 struct terminate_message
*term
;
1297 pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__
, qhp
, qhp
->wq
.sq
.qid
,
1300 skb
= skb_dequeue(&qhp
->ep
->com
.ep_skb_list
);
1304 set_wr_txq(skb
, CPL_PRIORITY_DATA
, qhp
->ep
->txq_idx
);
1306 wqe
= __skb_put(skb
, sizeof(*wqe
));
1307 memset(wqe
, 0, sizeof *wqe
);
1308 wqe
->op_compl
= cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR
));
1309 wqe
->flowid_len16
= cpu_to_be32(
1310 FW_WR_FLOWID_V(qhp
->ep
->hwtid
) |
1311 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe
), 16)));
1313 wqe
->u
.terminate
.type
= FW_RI_TYPE_TERMINATE
;
1314 wqe
->u
.terminate
.immdlen
= cpu_to_be32(sizeof *term
);
1315 term
= (struct terminate_message
*)wqe
->u
.terminate
.termmsg
;
1316 if (qhp
->attr
.layer_etype
== (LAYER_MPA
|DDP_LLP
)) {
1317 term
->layer_etype
= qhp
->attr
.layer_etype
;
1318 term
->ecode
= qhp
->attr
.ecode
;
1320 build_term_codes(err_cqe
, &term
->layer_etype
, &term
->ecode
);
1321 c4iw_ofld_send(&qhp
->rhp
->rdev
, skb
);
1325 * Assumes qhp lock is held.
1327 static void __flush_qp(struct c4iw_qp
*qhp
, struct c4iw_cq
*rchp
,
1328 struct c4iw_cq
*schp
)
1331 int rq_flushed
, sq_flushed
;
1334 pr_debug("%s qhp %p rchp %p schp %p\n", __func__
, qhp
, rchp
, schp
);
1336 /* locking hierarchy: cqs lock first, then qp lock. */
1337 spin_lock_irqsave(&rchp
->lock
, flag
);
1339 spin_lock(&schp
->lock
);
1340 spin_lock(&qhp
->lock
);
1342 if (qhp
->wq
.flushed
) {
1343 spin_unlock(&qhp
->lock
);
1345 spin_unlock(&schp
->lock
);
1346 spin_unlock_irqrestore(&rchp
->lock
, flag
);
1349 qhp
->wq
.flushed
= 1;
1350 t4_set_wq_in_error(&qhp
->wq
);
1352 c4iw_flush_hw_cq(rchp
, qhp
);
1353 c4iw_count_rcqes(&rchp
->cq
, &qhp
->wq
, &count
);
1354 rq_flushed
= c4iw_flush_rq(&qhp
->wq
, &rchp
->cq
, count
);
1357 c4iw_flush_hw_cq(schp
, qhp
);
1358 sq_flushed
= c4iw_flush_sq(qhp
);
1360 spin_unlock(&qhp
->lock
);
1362 spin_unlock(&schp
->lock
);
1363 spin_unlock_irqrestore(&rchp
->lock
, flag
);
1366 if ((rq_flushed
|| sq_flushed
) &&
1367 t4_clear_cq_armed(&rchp
->cq
)) {
1368 spin_lock_irqsave(&rchp
->comp_handler_lock
, flag
);
1369 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
,
1370 rchp
->ibcq
.cq_context
);
1371 spin_unlock_irqrestore(&rchp
->comp_handler_lock
, flag
);
1374 if (rq_flushed
&& t4_clear_cq_armed(&rchp
->cq
)) {
1375 spin_lock_irqsave(&rchp
->comp_handler_lock
, flag
);
1376 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
,
1377 rchp
->ibcq
.cq_context
);
1378 spin_unlock_irqrestore(&rchp
->comp_handler_lock
, flag
);
1380 if (sq_flushed
&& t4_clear_cq_armed(&schp
->cq
)) {
1381 spin_lock_irqsave(&schp
->comp_handler_lock
, flag
);
1382 (*schp
->ibcq
.comp_handler
)(&schp
->ibcq
,
1383 schp
->ibcq
.cq_context
);
1384 spin_unlock_irqrestore(&schp
->comp_handler_lock
, flag
);
1389 static void flush_qp(struct c4iw_qp
*qhp
)
1391 struct c4iw_cq
*rchp
, *schp
;
1394 rchp
= to_c4iw_cq(qhp
->ibqp
.recv_cq
);
1395 schp
= to_c4iw_cq(qhp
->ibqp
.send_cq
);
1397 if (qhp
->ibqp
.uobject
) {
1398 t4_set_wq_in_error(&qhp
->wq
);
1399 t4_set_cq_in_error(&rchp
->cq
);
1400 spin_lock_irqsave(&rchp
->comp_handler_lock
, flag
);
1401 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
, rchp
->ibcq
.cq_context
);
1402 spin_unlock_irqrestore(&rchp
->comp_handler_lock
, flag
);
1404 t4_set_cq_in_error(&schp
->cq
);
1405 spin_lock_irqsave(&schp
->comp_handler_lock
, flag
);
1406 (*schp
->ibcq
.comp_handler
)(&schp
->ibcq
,
1407 schp
->ibcq
.cq_context
);
1408 spin_unlock_irqrestore(&schp
->comp_handler_lock
, flag
);
1412 __flush_qp(qhp
, rchp
, schp
);
1415 static int rdma_fini(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
,
1418 struct fw_ri_wr
*wqe
;
1420 struct sk_buff
*skb
;
1422 pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__
, qhp
, qhp
->wq
.sq
.qid
,
1425 skb
= skb_dequeue(&ep
->com
.ep_skb_list
);
1429 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1431 wqe
= __skb_put(skb
, sizeof(*wqe
));
1432 memset(wqe
, 0, sizeof *wqe
);
1433 wqe
->op_compl
= cpu_to_be32(
1434 FW_WR_OP_V(FW_RI_INIT_WR
) |
1436 wqe
->flowid_len16
= cpu_to_be32(
1437 FW_WR_FLOWID_V(ep
->hwtid
) |
1438 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe
), 16)));
1439 wqe
->cookie
= (uintptr_t)&ep
->com
.wr_wait
;
1441 wqe
->u
.fini
.type
= FW_RI_TYPE_FINI
;
1442 ret
= c4iw_ofld_send(&rhp
->rdev
, skb
);
1446 ret
= c4iw_wait_for_reply(&rhp
->rdev
, &ep
->com
.wr_wait
, qhp
->ep
->hwtid
,
1447 qhp
->wq
.sq
.qid
, __func__
);
1449 pr_debug("%s ret %d\n", __func__
, ret
);
1453 static void build_rtr_msg(u8 p2p_type
, struct fw_ri_init
*init
)
1455 pr_debug("%s p2p_type = %d\n", __func__
, p2p_type
);
1456 memset(&init
->u
, 0, sizeof init
->u
);
1458 case FW_RI_INIT_P2PTYPE_RDMA_WRITE
:
1459 init
->u
.write
.opcode
= FW_RI_RDMA_WRITE_WR
;
1460 init
->u
.write
.stag_sink
= cpu_to_be32(1);
1461 init
->u
.write
.to_sink
= cpu_to_be64(1);
1462 init
->u
.write
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
1463 init
->u
.write
.len16
= DIV_ROUND_UP(sizeof init
->u
.write
+
1464 sizeof(struct fw_ri_immd
),
1467 case FW_RI_INIT_P2PTYPE_READ_REQ
:
1468 init
->u
.write
.opcode
= FW_RI_RDMA_READ_WR
;
1469 init
->u
.read
.stag_src
= cpu_to_be32(1);
1470 init
->u
.read
.to_src_lo
= cpu_to_be32(1);
1471 init
->u
.read
.stag_sink
= cpu_to_be32(1);
1472 init
->u
.read
.to_sink_lo
= cpu_to_be32(1);
1473 init
->u
.read
.len16
= DIV_ROUND_UP(sizeof init
->u
.read
, 16);
1478 static int rdma_init(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
)
1480 struct fw_ri_wr
*wqe
;
1482 struct sk_buff
*skb
;
1484 pr_debug("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__
, qhp
,
1485 qhp
->wq
.sq
.qid
, qhp
->ep
->hwtid
, qhp
->ep
->ird
, qhp
->ep
->ord
);
1487 skb
= alloc_skb(sizeof *wqe
, GFP_KERNEL
);
1492 ret
= alloc_ird(rhp
, qhp
->attr
.max_ird
);
1494 qhp
->attr
.max_ird
= 0;
1498 set_wr_txq(skb
, CPL_PRIORITY_DATA
, qhp
->ep
->txq_idx
);
1500 wqe
= __skb_put(skb
, sizeof(*wqe
));
1501 memset(wqe
, 0, sizeof *wqe
);
1502 wqe
->op_compl
= cpu_to_be32(
1503 FW_WR_OP_V(FW_RI_INIT_WR
) |
1505 wqe
->flowid_len16
= cpu_to_be32(
1506 FW_WR_FLOWID_V(qhp
->ep
->hwtid
) |
1507 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe
), 16)));
1509 wqe
->cookie
= (uintptr_t)&qhp
->ep
->com
.wr_wait
;
1511 wqe
->u
.init
.type
= FW_RI_TYPE_INIT
;
1512 wqe
->u
.init
.mpareqbit_p2ptype
=
1513 FW_RI_WR_MPAREQBIT_V(qhp
->attr
.mpa_attr
.initiator
) |
1514 FW_RI_WR_P2PTYPE_V(qhp
->attr
.mpa_attr
.p2p_type
);
1515 wqe
->u
.init
.mpa_attrs
= FW_RI_MPA_IETF_ENABLE
;
1516 if (qhp
->attr
.mpa_attr
.recv_marker_enabled
)
1517 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_RX_MARKER_ENABLE
;
1518 if (qhp
->attr
.mpa_attr
.xmit_marker_enabled
)
1519 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_TX_MARKER_ENABLE
;
1520 if (qhp
->attr
.mpa_attr
.crc_enabled
)
1521 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_CRC_ENABLE
;
1523 wqe
->u
.init
.qp_caps
= FW_RI_QP_RDMA_READ_ENABLE
|
1524 FW_RI_QP_RDMA_WRITE_ENABLE
|
1525 FW_RI_QP_BIND_ENABLE
;
1526 if (!qhp
->ibqp
.uobject
)
1527 wqe
->u
.init
.qp_caps
|= FW_RI_QP_FAST_REGISTER_ENABLE
|
1528 FW_RI_QP_STAG0_ENABLE
;
1529 wqe
->u
.init
.nrqe
= cpu_to_be16(t4_rqes_posted(&qhp
->wq
));
1530 wqe
->u
.init
.pdid
= cpu_to_be32(qhp
->attr
.pd
);
1531 wqe
->u
.init
.qpid
= cpu_to_be32(qhp
->wq
.sq
.qid
);
1532 wqe
->u
.init
.sq_eqid
= cpu_to_be32(qhp
->wq
.sq
.qid
);
1533 wqe
->u
.init
.rq_eqid
= cpu_to_be32(qhp
->wq
.rq
.qid
);
1534 wqe
->u
.init
.scqid
= cpu_to_be32(qhp
->attr
.scq
);
1535 wqe
->u
.init
.rcqid
= cpu_to_be32(qhp
->attr
.rcq
);
1536 wqe
->u
.init
.ord_max
= cpu_to_be32(qhp
->attr
.max_ord
);
1537 wqe
->u
.init
.ird_max
= cpu_to_be32(qhp
->attr
.max_ird
);
1538 wqe
->u
.init
.iss
= cpu_to_be32(qhp
->ep
->snd_seq
);
1539 wqe
->u
.init
.irs
= cpu_to_be32(qhp
->ep
->rcv_seq
);
1540 wqe
->u
.init
.hwrqsize
= cpu_to_be32(qhp
->wq
.rq
.rqt_size
);
1541 wqe
->u
.init
.hwrqaddr
= cpu_to_be32(qhp
->wq
.rq
.rqt_hwaddr
-
1542 rhp
->rdev
.lldi
.vr
->rq
.start
);
1543 if (qhp
->attr
.mpa_attr
.initiator
)
1544 build_rtr_msg(qhp
->attr
.mpa_attr
.p2p_type
, &wqe
->u
.init
);
1546 ret
= c4iw_ofld_send(&rhp
->rdev
, skb
);
1550 ret
= c4iw_wait_for_reply(&rhp
->rdev
, &qhp
->ep
->com
.wr_wait
,
1551 qhp
->ep
->hwtid
, qhp
->wq
.sq
.qid
, __func__
);
1555 free_ird(rhp
, qhp
->attr
.max_ird
);
1557 pr_debug("%s ret %d\n", __func__
, ret
);
1561 int c4iw_modify_qp(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
,
1562 enum c4iw_qp_attr_mask mask
,
1563 struct c4iw_qp_attributes
*attrs
,
1567 struct c4iw_qp_attributes newattr
= qhp
->attr
;
1572 struct c4iw_ep
*ep
= NULL
;
1574 pr_debug("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
1576 qhp
, qhp
->wq
.sq
.qid
, qhp
->wq
.rq
.qid
, qhp
->ep
, qhp
->attr
.state
,
1577 (mask
& C4IW_QP_ATTR_NEXT_STATE
) ? attrs
->next_state
: -1);
1579 mutex_lock(&qhp
->mutex
);
1581 /* Process attr changes if in IDLE */
1582 if (mask
& C4IW_QP_ATTR_VALID_MODIFY
) {
1583 if (qhp
->attr
.state
!= C4IW_QP_STATE_IDLE
) {
1587 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_READ
)
1588 newattr
.enable_rdma_read
= attrs
->enable_rdma_read
;
1589 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_WRITE
)
1590 newattr
.enable_rdma_write
= attrs
->enable_rdma_write
;
1591 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_BIND
)
1592 newattr
.enable_bind
= attrs
->enable_bind
;
1593 if (mask
& C4IW_QP_ATTR_MAX_ORD
) {
1594 if (attrs
->max_ord
> c4iw_max_read_depth
) {
1598 newattr
.max_ord
= attrs
->max_ord
;
1600 if (mask
& C4IW_QP_ATTR_MAX_IRD
) {
1601 if (attrs
->max_ird
> cur_max_read_depth(rhp
)) {
1605 newattr
.max_ird
= attrs
->max_ird
;
1607 qhp
->attr
= newattr
;
1610 if (mask
& C4IW_QP_ATTR_SQ_DB
) {
1611 ret
= ring_kernel_sq_db(qhp
, attrs
->sq_db_inc
);
1614 if (mask
& C4IW_QP_ATTR_RQ_DB
) {
1615 ret
= ring_kernel_rq_db(qhp
, attrs
->rq_db_inc
);
1619 if (!(mask
& C4IW_QP_ATTR_NEXT_STATE
))
1621 if (qhp
->attr
.state
== attrs
->next_state
)
1624 switch (qhp
->attr
.state
) {
1625 case C4IW_QP_STATE_IDLE
:
1626 switch (attrs
->next_state
) {
1627 case C4IW_QP_STATE_RTS
:
1628 if (!(mask
& C4IW_QP_ATTR_LLP_STREAM_HANDLE
)) {
1632 if (!(mask
& C4IW_QP_ATTR_MPA_ATTR
)) {
1636 qhp
->attr
.mpa_attr
= attrs
->mpa_attr
;
1637 qhp
->attr
.llp_stream_handle
= attrs
->llp_stream_handle
;
1638 qhp
->ep
= qhp
->attr
.llp_stream_handle
;
1639 set_state(qhp
, C4IW_QP_STATE_RTS
);
1642 * Ref the endpoint here and deref when we
1643 * disassociate the endpoint from the QP. This
1644 * happens in CLOSING->IDLE transition or *->ERROR
1647 c4iw_get_ep(&qhp
->ep
->com
);
1648 ret
= rdma_init(rhp
, qhp
);
1652 case C4IW_QP_STATE_ERROR
:
1653 set_state(qhp
, C4IW_QP_STATE_ERROR
);
1661 case C4IW_QP_STATE_RTS
:
1662 switch (attrs
->next_state
) {
1663 case C4IW_QP_STATE_CLOSING
:
1664 BUG_ON(kref_read(&qhp
->ep
->com
.kref
) < 2);
1665 t4_set_wq_in_error(&qhp
->wq
);
1666 set_state(qhp
, C4IW_QP_STATE_CLOSING
);
1671 c4iw_get_ep(&qhp
->ep
->com
);
1673 ret
= rdma_fini(rhp
, qhp
, ep
);
1677 case C4IW_QP_STATE_TERMINATE
:
1678 t4_set_wq_in_error(&qhp
->wq
);
1679 set_state(qhp
, C4IW_QP_STATE_TERMINATE
);
1680 qhp
->attr
.layer_etype
= attrs
->layer_etype
;
1681 qhp
->attr
.ecode
= attrs
->ecode
;
1684 c4iw_get_ep(&qhp
->ep
->com
);
1688 terminate
= qhp
->attr
.send_term
;
1689 ret
= rdma_fini(rhp
, qhp
, ep
);
1694 case C4IW_QP_STATE_ERROR
:
1695 t4_set_wq_in_error(&qhp
->wq
);
1696 set_state(qhp
, C4IW_QP_STATE_ERROR
);
1701 c4iw_get_ep(&qhp
->ep
->com
);
1710 case C4IW_QP_STATE_CLOSING
:
1713 * Allow kernel users to move to ERROR for qp draining.
1715 if (!internal
&& (qhp
->ibqp
.uobject
|| attrs
->next_state
!=
1716 C4IW_QP_STATE_ERROR
)) {
1720 switch (attrs
->next_state
) {
1721 case C4IW_QP_STATE_IDLE
:
1723 set_state(qhp
, C4IW_QP_STATE_IDLE
);
1724 qhp
->attr
.llp_stream_handle
= NULL
;
1725 c4iw_put_ep(&qhp
->ep
->com
);
1727 wake_up(&qhp
->wait
);
1729 case C4IW_QP_STATE_ERROR
:
1736 case C4IW_QP_STATE_ERROR
:
1737 if (attrs
->next_state
!= C4IW_QP_STATE_IDLE
) {
1741 if (!t4_sq_empty(&qhp
->wq
) || !t4_rq_empty(&qhp
->wq
)) {
1745 set_state(qhp
, C4IW_QP_STATE_IDLE
);
1747 case C4IW_QP_STATE_TERMINATE
:
1755 pr_err("%s in a bad state %d\n", __func__
, qhp
->attr
.state
);
1762 pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__
, qhp
->ep
,
1765 /* disassociate the LLP connection */
1766 qhp
->attr
.llp_stream_handle
= NULL
;
1770 set_state(qhp
, C4IW_QP_STATE_ERROR
);
1775 wake_up(&qhp
->wait
);
1777 mutex_unlock(&qhp
->mutex
);
1780 post_terminate(qhp
, NULL
, internal
? GFP_ATOMIC
: GFP_KERNEL
);
1783 * If disconnect is 1, then we need to initiate a disconnect
1784 * on the EP. This can be a normal close (RTS->CLOSING) or
1785 * an abnormal close (RTS/CLOSING->ERROR).
1788 c4iw_ep_disconnect(ep
, abort
, internal
? GFP_ATOMIC
:
1790 c4iw_put_ep(&ep
->com
);
1794 * If free is 1, then we've disassociated the EP from the QP
1795 * and we need to dereference the EP.
1798 c4iw_put_ep(&ep
->com
);
1799 pr_debug("%s exit state %d\n", __func__
, qhp
->attr
.state
);
1803 int c4iw_destroy_qp(struct ib_qp
*ib_qp
)
1805 struct c4iw_dev
*rhp
;
1806 struct c4iw_qp
*qhp
;
1807 struct c4iw_qp_attributes attrs
;
1809 qhp
= to_c4iw_qp(ib_qp
);
1812 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
1813 if (qhp
->attr
.state
== C4IW_QP_STATE_TERMINATE
)
1814 c4iw_modify_qp(rhp
, qhp
, C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1816 c4iw_modify_qp(rhp
, qhp
, C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 0);
1817 wait_event(qhp
->wait
, !qhp
->ep
);
1819 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.sq
.qid
);
1821 spin_lock_irq(&rhp
->lock
);
1822 if (!list_empty(&qhp
->db_fc_entry
))
1823 list_del_init(&qhp
->db_fc_entry
);
1824 spin_unlock_irq(&rhp
->lock
);
1825 free_ird(rhp
, qhp
->attr
.max_ird
);
1827 c4iw_qp_rem_ref(ib_qp
);
1829 pr_debug("%s ib_qp %p qpid 0x%0x\n", __func__
, ib_qp
, qhp
->wq
.sq
.qid
);
1833 struct ib_qp
*c4iw_create_qp(struct ib_pd
*pd
, struct ib_qp_init_attr
*attrs
,
1834 struct ib_udata
*udata
)
1836 struct c4iw_dev
*rhp
;
1837 struct c4iw_qp
*qhp
;
1838 struct c4iw_pd
*php
;
1839 struct c4iw_cq
*schp
;
1840 struct c4iw_cq
*rchp
;
1841 struct c4iw_create_qp_resp uresp
;
1842 unsigned int sqsize
, rqsize
;
1843 struct c4iw_ucontext
*ucontext
;
1845 struct c4iw_mm_entry
*sq_key_mm
, *rq_key_mm
= NULL
, *sq_db_key_mm
;
1846 struct c4iw_mm_entry
*rq_db_key_mm
= NULL
, *ma_sync_key_mm
= NULL
;
1848 pr_debug("%s ib_pd %p\n", __func__
, pd
);
1850 if (attrs
->qp_type
!= IB_QPT_RC
)
1851 return ERR_PTR(-EINVAL
);
1853 php
= to_c4iw_pd(pd
);
1855 schp
= get_chp(rhp
, ((struct c4iw_cq
*)attrs
->send_cq
)->cq
.cqid
);
1856 rchp
= get_chp(rhp
, ((struct c4iw_cq
*)attrs
->recv_cq
)->cq
.cqid
);
1858 return ERR_PTR(-EINVAL
);
1860 if (attrs
->cap
.max_inline_data
> T4_MAX_SEND_INLINE
)
1861 return ERR_PTR(-EINVAL
);
1863 if (attrs
->cap
.max_recv_wr
> rhp
->rdev
.hw_queue
.t4_max_rq_size
)
1864 return ERR_PTR(-E2BIG
);
1865 rqsize
= attrs
->cap
.max_recv_wr
+ 1;
1869 if (attrs
->cap
.max_send_wr
> rhp
->rdev
.hw_queue
.t4_max_sq_size
)
1870 return ERR_PTR(-E2BIG
);
1871 sqsize
= attrs
->cap
.max_send_wr
+ 1;
1875 ucontext
= pd
->uobject
? to_c4iw_ucontext(pd
->uobject
->context
) : NULL
;
1877 qhp
= kzalloc(sizeof(*qhp
), GFP_KERNEL
);
1879 return ERR_PTR(-ENOMEM
);
1880 qhp
->wq
.sq
.size
= sqsize
;
1881 qhp
->wq
.sq
.memsize
=
1882 (sqsize
+ rhp
->rdev
.hw_queue
.t4_eq_status_entries
) *
1883 sizeof(*qhp
->wq
.sq
.queue
) + 16 * sizeof(__be64
);
1884 qhp
->wq
.sq
.flush_cidx
= -1;
1885 qhp
->wq
.rq
.size
= rqsize
;
1886 qhp
->wq
.rq
.memsize
=
1887 (rqsize
+ rhp
->rdev
.hw_queue
.t4_eq_status_entries
) *
1888 sizeof(*qhp
->wq
.rq
.queue
);
1891 qhp
->wq
.sq
.memsize
= roundup(qhp
->wq
.sq
.memsize
, PAGE_SIZE
);
1892 qhp
->wq
.rq
.memsize
= roundup(qhp
->wq
.rq
.memsize
, PAGE_SIZE
);
1895 ret
= create_qp(&rhp
->rdev
, &qhp
->wq
, &schp
->cq
, &rchp
->cq
,
1896 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
1900 attrs
->cap
.max_recv_wr
= rqsize
- 1;
1901 attrs
->cap
.max_send_wr
= sqsize
- 1;
1902 attrs
->cap
.max_inline_data
= T4_MAX_SEND_INLINE
;
1905 qhp
->attr
.pd
= php
->pdid
;
1906 qhp
->attr
.scq
= ((struct c4iw_cq
*) attrs
->send_cq
)->cq
.cqid
;
1907 qhp
->attr
.rcq
= ((struct c4iw_cq
*) attrs
->recv_cq
)->cq
.cqid
;
1908 qhp
->attr
.sq_num_entries
= attrs
->cap
.max_send_wr
;
1909 qhp
->attr
.rq_num_entries
= attrs
->cap
.max_recv_wr
;
1910 qhp
->attr
.sq_max_sges
= attrs
->cap
.max_send_sge
;
1911 qhp
->attr
.sq_max_sges_rdma_write
= attrs
->cap
.max_send_sge
;
1912 qhp
->attr
.rq_max_sges
= attrs
->cap
.max_recv_sge
;
1913 qhp
->attr
.state
= C4IW_QP_STATE_IDLE
;
1914 qhp
->attr
.next_state
= C4IW_QP_STATE_IDLE
;
1915 qhp
->attr
.enable_rdma_read
= 1;
1916 qhp
->attr
.enable_rdma_write
= 1;
1917 qhp
->attr
.enable_bind
= 1;
1918 qhp
->attr
.max_ord
= 0;
1919 qhp
->attr
.max_ird
= 0;
1920 qhp
->sq_sig_all
= attrs
->sq_sig_type
== IB_SIGNAL_ALL_WR
;
1921 spin_lock_init(&qhp
->lock
);
1922 mutex_init(&qhp
->mutex
);
1923 init_waitqueue_head(&qhp
->wait
);
1924 kref_init(&qhp
->kref
);
1925 INIT_WORK(&qhp
->free_work
, free_qp_work
);
1927 ret
= insert_handle(rhp
, &rhp
->qpidr
, qhp
, qhp
->wq
.sq
.qid
);
1932 sq_key_mm
= kmalloc(sizeof(*sq_key_mm
), GFP_KERNEL
);
1937 rq_key_mm
= kmalloc(sizeof(*rq_key_mm
), GFP_KERNEL
);
1942 sq_db_key_mm
= kmalloc(sizeof(*sq_db_key_mm
), GFP_KERNEL
);
1943 if (!sq_db_key_mm
) {
1947 rq_db_key_mm
= kmalloc(sizeof(*rq_db_key_mm
), GFP_KERNEL
);
1948 if (!rq_db_key_mm
) {
1952 if (t4_sq_onchip(&qhp
->wq
.sq
)) {
1953 ma_sync_key_mm
= kmalloc(sizeof(*ma_sync_key_mm
),
1955 if (!ma_sync_key_mm
) {
1959 uresp
.flags
= C4IW_QPF_ONCHIP
;
1962 uresp
.qid_mask
= rhp
->rdev
.qpmask
;
1963 uresp
.sqid
= qhp
->wq
.sq
.qid
;
1964 uresp
.sq_size
= qhp
->wq
.sq
.size
;
1965 uresp
.sq_memsize
= qhp
->wq
.sq
.memsize
;
1966 uresp
.rqid
= qhp
->wq
.rq
.qid
;
1967 uresp
.rq_size
= qhp
->wq
.rq
.size
;
1968 uresp
.rq_memsize
= qhp
->wq
.rq
.memsize
;
1969 spin_lock(&ucontext
->mmap_lock
);
1970 if (ma_sync_key_mm
) {
1971 uresp
.ma_sync_key
= ucontext
->key
;
1972 ucontext
->key
+= PAGE_SIZE
;
1974 uresp
.ma_sync_key
= 0;
1976 uresp
.sq_key
= ucontext
->key
;
1977 ucontext
->key
+= PAGE_SIZE
;
1978 uresp
.rq_key
= ucontext
->key
;
1979 ucontext
->key
+= PAGE_SIZE
;
1980 uresp
.sq_db_gts_key
= ucontext
->key
;
1981 ucontext
->key
+= PAGE_SIZE
;
1982 uresp
.rq_db_gts_key
= ucontext
->key
;
1983 ucontext
->key
+= PAGE_SIZE
;
1984 spin_unlock(&ucontext
->mmap_lock
);
1985 ret
= ib_copy_to_udata(udata
, &uresp
, sizeof uresp
);
1988 sq_key_mm
->key
= uresp
.sq_key
;
1989 sq_key_mm
->addr
= qhp
->wq
.sq
.phys_addr
;
1990 sq_key_mm
->len
= PAGE_ALIGN(qhp
->wq
.sq
.memsize
);
1991 insert_mmap(ucontext
, sq_key_mm
);
1992 rq_key_mm
->key
= uresp
.rq_key
;
1993 rq_key_mm
->addr
= virt_to_phys(qhp
->wq
.rq
.queue
);
1994 rq_key_mm
->len
= PAGE_ALIGN(qhp
->wq
.rq
.memsize
);
1995 insert_mmap(ucontext
, rq_key_mm
);
1996 sq_db_key_mm
->key
= uresp
.sq_db_gts_key
;
1997 sq_db_key_mm
->addr
= (u64
)(unsigned long)qhp
->wq
.sq
.bar2_pa
;
1998 sq_db_key_mm
->len
= PAGE_SIZE
;
1999 insert_mmap(ucontext
, sq_db_key_mm
);
2000 rq_db_key_mm
->key
= uresp
.rq_db_gts_key
;
2001 rq_db_key_mm
->addr
= (u64
)(unsigned long)qhp
->wq
.rq
.bar2_pa
;
2002 rq_db_key_mm
->len
= PAGE_SIZE
;
2003 insert_mmap(ucontext
, rq_db_key_mm
);
2004 if (ma_sync_key_mm
) {
2005 ma_sync_key_mm
->key
= uresp
.ma_sync_key
;
2006 ma_sync_key_mm
->addr
=
2007 (pci_resource_start(rhp
->rdev
.lldi
.pdev
, 0) +
2008 PCIE_MA_SYNC_A
) & PAGE_MASK
;
2009 ma_sync_key_mm
->len
= PAGE_SIZE
;
2010 insert_mmap(ucontext
, ma_sync_key_mm
);
2013 c4iw_get_ucontext(ucontext
);
2014 qhp
->ucontext
= ucontext
;
2016 qhp
->ibqp
.qp_num
= qhp
->wq
.sq
.qid
;
2017 init_timer(&(qhp
->timer
));
2018 INIT_LIST_HEAD(&qhp
->db_fc_entry
);
2019 pr_debug("%s sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
2021 qhp
->wq
.sq
.qid
, qhp
->wq
.sq
.size
, qhp
->wq
.sq
.memsize
,
2022 attrs
->cap
.max_send_wr
, qhp
->wq
.rq
.qid
, qhp
->wq
.rq
.size
,
2023 qhp
->wq
.rq
.memsize
, attrs
->cap
.max_recv_wr
);
2026 kfree(ma_sync_key_mm
);
2028 kfree(rq_db_key_mm
);
2030 kfree(sq_db_key_mm
);
2036 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.sq
.qid
);
2038 destroy_qp(&rhp
->rdev
, &qhp
->wq
,
2039 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
2042 return ERR_PTR(ret
);
2045 int c4iw_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2046 int attr_mask
, struct ib_udata
*udata
)
2048 struct c4iw_dev
*rhp
;
2049 struct c4iw_qp
*qhp
;
2050 enum c4iw_qp_attr_mask mask
= 0;
2051 struct c4iw_qp_attributes attrs
;
2053 pr_debug("%s ib_qp %p\n", __func__
, ibqp
);
2055 /* iwarp does not support the RTR state */
2056 if ((attr_mask
& IB_QP_STATE
) && (attr
->qp_state
== IB_QPS_RTR
))
2057 attr_mask
&= ~IB_QP_STATE
;
2059 /* Make sure we still have something left to do */
2063 memset(&attrs
, 0, sizeof attrs
);
2064 qhp
= to_c4iw_qp(ibqp
);
2067 attrs
.next_state
= c4iw_convert_state(attr
->qp_state
);
2068 attrs
.enable_rdma_read
= (attr
->qp_access_flags
&
2069 IB_ACCESS_REMOTE_READ
) ? 1 : 0;
2070 attrs
.enable_rdma_write
= (attr
->qp_access_flags
&
2071 IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
2072 attrs
.enable_bind
= (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
) ? 1 : 0;
2075 mask
|= (attr_mask
& IB_QP_STATE
) ? C4IW_QP_ATTR_NEXT_STATE
: 0;
2076 mask
|= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
2077 (C4IW_QP_ATTR_ENABLE_RDMA_READ
|
2078 C4IW_QP_ATTR_ENABLE_RDMA_WRITE
|
2079 C4IW_QP_ATTR_ENABLE_RDMA_BIND
) : 0;
2082 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
2083 * ringing the queue db when we're in DB_FULL mode.
2084 * Only allow this on T4 devices.
2086 attrs
.sq_db_inc
= attr
->sq_psn
;
2087 attrs
.rq_db_inc
= attr
->rq_psn
;
2088 mask
|= (attr_mask
& IB_QP_SQ_PSN
) ? C4IW_QP_ATTR_SQ_DB
: 0;
2089 mask
|= (attr_mask
& IB_QP_RQ_PSN
) ? C4IW_QP_ATTR_RQ_DB
: 0;
2090 if (!is_t4(to_c4iw_qp(ibqp
)->rhp
->rdev
.lldi
.adapter_type
) &&
2091 (mask
& (C4IW_QP_ATTR_SQ_DB
|C4IW_QP_ATTR_RQ_DB
)))
2094 return c4iw_modify_qp(rhp
, qhp
, mask
, &attrs
, 0);
2097 struct ib_qp
*c4iw_get_qp(struct ib_device
*dev
, int qpn
)
2099 pr_debug("%s ib_dev %p qpn 0x%x\n", __func__
, dev
, qpn
);
2100 return (struct ib_qp
*)get_qhp(to_c4iw_dev(dev
), qpn
);
2103 int c4iw_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2104 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
2106 struct c4iw_qp
*qhp
= to_c4iw_qp(ibqp
);
2108 memset(attr
, 0, sizeof *attr
);
2109 memset(init_attr
, 0, sizeof *init_attr
);
2110 attr
->qp_state
= to_ib_qp_state(qhp
->attr
.state
);
2111 init_attr
->cap
.max_send_wr
= qhp
->attr
.sq_num_entries
;
2112 init_attr
->cap
.max_recv_wr
= qhp
->attr
.rq_num_entries
;
2113 init_attr
->cap
.max_send_sge
= qhp
->attr
.sq_max_sges
;
2114 init_attr
->cap
.max_recv_sge
= qhp
->attr
.sq_max_sges
;
2115 init_attr
->cap
.max_inline_data
= T4_MAX_SEND_INLINE
;
2116 init_attr
->sq_sig_type
= qhp
->sq_sig_all
? IB_SIGNAL_ALL_WR
: 0;