From: Amrani, Ram Date: Thu, 27 Apr 2017 10:35:34 +0000 (+0300) Subject: RDMA/qedr: destroy CQ only after HW releases it X-Git-Tag: MMI-PSA29.97-13-9~5649^3~89 X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=4dd72636c9e5a17b0d788ab1d8cc10ae25c69ea3;p=GitHub%2FMotorolaMobilityLLC%2Fkernel-slsi.git RDMA/qedr: destroy CQ only after HW releases it Wait for all relevant CNQ interrupts before freeing the CQ. Don't invoke completion handlers for a destroyed CQ. Signed-off-by: Ram Amrani Signed-off-by: Doug Ledford --- diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index c64dabe8ae6e..ef11e770f822 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -438,14 +438,21 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle) cq->arm_flags = 0; - if (cq->ibcq.comp_handler) + if (!cq->destroyed && cq->ibcq.comp_handler) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); + /* The CQ's CNQ notification counter is checked before + * destroying the CQ in a busy-wait loop that waits for all of + * the CQ's CNQ interrupts to be processed. It is increased + * here, only after the completion handler, to ensure that the + * the handler is not running when the CQ is destroyed. + */ + cq->cnq_notif++; + sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); cnq->n_comp++; - } qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 5cb9195513bd..fc9e2797ac5a 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -272,6 +272,8 @@ struct qedr_cq { u32 cq_cons; struct qedr_userq q; + u8 destroyed; + u16 cnq_notif; }; struct qedr_pd { diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 6ae481926c91..9f76c46b058e 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -822,6 +822,17 @@ int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct qedr_cq *cq = get_qedr_cq(ibcq); unsigned long sflags; + struct qedr_dev *dev; + + dev = get_qedr_dev(ibcq->device); + + if (cq->destroyed) { + DP_ERR(dev, + "warning: arm was invoked after destroy for cq %p (icid=%d)\n", + cq, cq->icid); + return -EINVAL; + } + if (cq->cq_type == QEDR_CQ_TYPE_GSI) return 0; @@ -987,16 +998,22 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata) return 0; } +#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10) +#define QEDR_DESTROY_CQ_ITER_DURATION (10) + int qedr_destroy_cq(struct ib_cq *ibcq) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qed_rdma_destroy_cq_out_params oparams; struct qed_rdma_destroy_cq_in_params iparams; struct qedr_cq *cq = get_qedr_cq(ibcq); + int iter; int rc; DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid); + cq->destroyed = 1; + /* GSIs CQs are handled by driver, so they don't exist in the FW */ if (cq->cq_type == QEDR_CQ_TYPE_GSI) goto done; @@ -1013,10 +1030,50 @@ int qedr_destroy_cq(struct ib_cq *ibcq) ib_umem_release(cq->q.umem); } + /* We don't want the IRQ handler to handle a non-existing CQ so we + * wait until all CNQ interrupts, if any, are received. This will always + * happen and will always happen very fast. If not, then a serious error + * has occured. That is why we can use a long delay. + * We spin for a short time so we don’t lose time on context switching + * in case all the completions are handled in that span. Otherwise + * we sleep for a while and check again. Since the CNQ may be + * associated with (only) the current CPU we use msleep to allow the + * current CPU to be freed. + * The CNQ notification is increased in qedr_irq_handler(). + */ + iter = QEDR_DESTROY_CQ_MAX_ITERATIONS; + while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) { + udelay(QEDR_DESTROY_CQ_ITER_DURATION); + iter--; + } + + iter = QEDR_DESTROY_CQ_MAX_ITERATIONS; + while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) { + msleep(QEDR_DESTROY_CQ_ITER_DURATION); + iter--; + } + + if (oparams.num_cq_notif != cq->cnq_notif) + goto err; + + /* Note that we don't need to have explicit code to wait for the + * completion of the event handler because it is invoked from the EQ. + * Since the destroy CQ ramrod has also been received on the EQ we can + * be certain that there's no event handler in process. + */ done: + cq->sig = ~cq->sig; + kfree(cq); return 0; + +err: + DP_ERR(dev, + "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n", + cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif); + + return -EINVAL; } static inline int get_gid_info_from_table(struct ib_qp *ibqp, @@ -3419,6 +3476,13 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) int update = 0; int done = 0; + if (cq->destroyed) { + DP_ERR(dev, + "warning: poll was invoked after destroy for cq %p (icid=%d)\n", + cq, cq->icid); + return 0; + } + if (cq->cq_type == QEDR_CQ_TYPE_GSI) return qedr_gsi_poll_cq(ibcq, num_entries, wc);