iw_cxgb4: only call the cq comp_handler when the cq is armed
authorSteve Wise <swise@opengridcomputing.com>
Thu, 9 Nov 2017 15:14:43 +0000 (07:14 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Jan 2018 08:45:19 +0000 (09:45 +0100)
commit cbb40fadd31c6bbc59104e58ac95c6ef492d038b upstream.

The ULPs completion handler should only be called if the CQ is
armed for notification.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/qp.c

index 8f963df0bffceb5c180dd508ab5a6b8631acf929..9d25298d96faf0765861e63a507f14aa1f55d937 100644 (file)
@@ -109,9 +109,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
        if (qhp->ibqp.event_handler)
                (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
 
-       spin_lock_irqsave(&chp->comp_handler_lock, flag);
-       (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
-       spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+       if (t4_clear_cq_armed(&chp->cq)) {
+               spin_lock_irqsave(&chp->comp_handler_lock, flag);
+               (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+               spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+       }
 }
 
 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
index e69453665a174a00390f43a10da2ab336c86630d..282046ebaeb2802b6bbdac103de28438431d0988 100644 (file)
@@ -817,10 +817,12 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
        t4_swcq_produce(cq);
        spin_unlock_irqrestore(&schp->lock, flag);
 
-       spin_lock_irqsave(&schp->comp_handler_lock, flag);
-       (*schp->ibcq.comp_handler)(&schp->ibcq,
-                                  schp->ibcq.cq_context);
-       spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+       if (t4_clear_cq_armed(&schp->cq)) {
+               spin_lock_irqsave(&schp->comp_handler_lock, flag);
+               (*schp->ibcq.comp_handler)(&schp->ibcq,
+                                          schp->ibcq.cq_context);
+               spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+       }
 }
 
 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -846,10 +848,12 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
        t4_swcq_produce(cq);
        spin_unlock_irqrestore(&rchp->lock, flag);
 
-       spin_lock_irqsave(&rchp->comp_handler_lock, flag);
-       (*rchp->ibcq.comp_handler)(&rchp->ibcq,
-                                  rchp->ibcq.cq_context);
-       spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+       if (t4_clear_cq_armed(&rchp->cq)) {
+               spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+               (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+                                          rchp->ibcq.cq_context);
+               spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+       }
 }
 
 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,