From 6c2ab0b857d1b674c5f710d2cbf06a0f3ac52313 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 4 Feb 2016 11:03:19 -0800 Subject: [PATCH] staging/rdma/hfi1: Insure last cursor is updated prior to complete This patch is a prerequisite for adding a separate lock for post send. The timing of updating s_last needs to be before returning any send completion to avoid a race between a poll cq seeing a completion and the post send checking for a full queue. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/rc.c | 20 ++++++++++++++++---- drivers/staging/rdma/hfi1/ruc.c | 12 +++++++----- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 2c46491746bb..e54e0b4bb5e5 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -1123,10 +1123,18 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) hfi1_add_retry_timer(qp); while (qp->s_last != qp->s_acked) { + u32 s_last; + wqe = rvt_get_swqe_ptr(qp, qp->s_last); if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; + s_last = qp->s_last; + if (++s_last >= qp->s_size) + s_last = 0; + qp->s_last = s_last; + /* see post_send() */ + barrier(); for (i = 0; i < wqe->wr.num_sge; i++) { struct rvt_sge *sge = &wqe->sg_list[i]; @@ -1143,8 +1151,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) wc.qp = &qp->ibqp; rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; } /* * If we were waiting for sends to complete before re-sending, @@ -1184,11 +1190,19 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, */ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { + u32 s_last; + for (i = 0; i < wqe->wr.num_sge; i++) { struct rvt_sge *sge = &wqe->sg_list[i]; rvt_put_mr(sge->mr); } + s_last = qp->s_last; + if (++s_last >= qp->s_size) + s_last = 0; + qp->s_last = s_last; + /* see post_send() */ + barrier(); /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { @@ -1200,8 +1214,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, wc.qp = &qp->ibqp; rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; } else { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 6aeea6c4b236..66449acac76d 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -921,6 +921,13 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) return; + last = qp->s_last; + old_last = last; + if (++last >= qp->s_size) + last = 0; + qp->s_last = last; + /* See post_send() */ + barrier(); for (i = 0; i < wqe->wr.num_sge; i++) { struct rvt_sge *sge = &wqe->sg_list[i]; @@ -948,11 +955,6 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, status != IB_WC_SUCCESS); } - last = qp->s_last; - old_last = last; - if (++last >= qp->s_size) - last = 0; - qp->s_last = last; if (qp->s_acked == old_last) qp->s_acked = last; if (qp->s_cur == old_last) -- 2.20.1