IB/qib, IB/hfi1: Fix up UD loopback use of irq flags
authorMike Marciniszyn <mike.marciniszyn@intel.com>
Tue, 12 Apr 2016 17:46:10 +0000 (10:46 -0700)
committerDoug Ledford <dledford@redhat.com>
Thu, 28 Apr 2016 20:32:26 +0000 (16:32 -0400)
The dual lock patch moved locking around and missed an issue
with handling irq flags when processing UD loopback
packets.  This issue was revealed by smatch.

Fix for both qib and hfi1 to pass the saved flags to the UD request
builder and handle the changes correctly.

Fixes: 46a80d62e6e0 ("IB/qib, staging/rdma/hfi1: add s_hlock for use in post send")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/qib/qib_rc.c
drivers/infiniband/hw/qib/qib_ruc.c
drivers/infiniband/hw/qib/qib_uc.c
drivers/infiniband/hw/qib/qib_ud.c
drivers/infiniband/hw/qib/qib_verbs.h
drivers/staging/rdma/hfi1/ruc.c
drivers/staging/rdma/hfi1/ud.c
drivers/staging/rdma/hfi1/verbs.h

index 9088e26d3ac8b0aef86424d244c0465c4f33b87e..444028a3582a3898f2ddcedcdb69614904ab6f0d 100644 (file)
@@ -230,7 +230,7 @@ bail:
  *
  * Return 1 if constructed; otherwise, return 0.
  */
-int qib_make_rc_req(struct rvt_qp *qp)
+int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
 {
        struct qib_qp_priv *priv = qp->priv;
        struct qib_ibdev *dev = to_idev(qp->ibqp.device);
index a5f07a64b228d2e8f5e1796f8892ddeda6aa37fd..b6777925629704d3a2c3c7bc51a5475a725ca2c3 100644 (file)
@@ -739,7 +739,7 @@ void qib_do_send(struct rvt_qp *qp)
        struct qib_qp_priv *priv = qp->priv;
        struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
        struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-       int (*make_req)(struct rvt_qp *qp);
+       int (*make_req)(struct rvt_qp *qp, unsigned long *flags);
        unsigned long flags;
 
        if ((qp->ibqp.qp_type == IB_QPT_RC ||
@@ -781,7 +781,7 @@ void qib_do_send(struct rvt_qp *qp)
                        qp->s_hdrwords = 0;
                        spin_lock_irqsave(&qp->s_lock, flags);
                }
-       } while (make_req(qp));
+       } while (make_req(qp, &flags));
 
        spin_unlock_irqrestore(&qp->s_lock, flags);
 }
index 7bdbc79ceaa3bbd7a25a87ea107b06f1460dd7ad..1d61bd04f4494bf12a6c020bad53a6fe1c6f8e81 100644 (file)
@@ -45,7 +45,7 @@
  *
  * Return 1 if constructed; otherwise, return 0.
  */
-int qib_make_uc_req(struct rvt_qp *qp)
+int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
 {
        struct qib_qp_priv *priv = qp->priv;
        struct qib_other_headers *ohdr;
index d9502137de626b80966cfd200e3806c65c36b21a..846e6c726df7c67204f1dc1d5f1bfb066c36e149 100644 (file)
@@ -238,7 +238,7 @@ drop:
  *
  * Return 1 if constructed; otherwise, return 0.
  */
-int qib_make_ud_req(struct rvt_qp *qp)
+int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
 {
        struct qib_qp_priv *priv = qp->priv;
        struct qib_other_headers *ohdr;
@@ -294,7 +294,7 @@ int qib_make_ud_req(struct rvt_qp *qp)
                this_cpu_inc(ibp->pmastats->n_unicast_xmit);
                lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
                if (unlikely(lid == ppd->lid)) {
-                       unsigned long flags;
+                       unsigned long tflags = *flags;
                        /*
                         * If DMAs are in progress, we can't generate
                         * a completion for the loopback packet since
@@ -307,10 +307,10 @@ int qib_make_ud_req(struct rvt_qp *qp)
                                goto bail;
                        }
                        qp->s_cur = next_cur;
-                       local_irq_save(flags);
-                       spin_unlock_irqrestore(&qp->s_lock, flags);
+                       spin_unlock_irqrestore(&qp->s_lock, tflags);
                        qib_ud_loopback(qp, wqe);
-                       spin_lock_irqsave(&qp->s_lock, flags);
+                       spin_lock_irqsave(&qp->s_lock, tflags);
+                       *flags = tflags;
                        qib_send_complete(qp, wqe, IB_WC_SUCCESS);
                        goto done;
                }
index 4b76a8d593371b3929faf86437890ff8191ecf1c..6888f03c6d61007b6f4d86c1f73b403908e08279 100644 (file)
@@ -430,11 +430,11 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
 
 void qib_send_rc_ack(struct rvt_qp *qp);
 
-int qib_make_rc_req(struct rvt_qp *qp);
+int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags);
 
-int qib_make_uc_req(struct rvt_qp *qp);
+int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags);
 
-int qib_make_ud_req(struct rvt_qp *qp);
+int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags);
 
 int qib_register_ib_device(struct qib_devdata *);
 
index 08813cdbd475773b73bd9ee38ee9ff9cd40f0100..a659aec3c3c6b95823650c7b6bb3ac800649967a 100644 (file)
@@ -831,7 +831,6 @@ void hfi1_do_send(struct rvt_qp *qp)
        struct hfi1_pkt_state ps;
        struct hfi1_qp_priv *priv = qp->priv;
        int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
-       unsigned long flags;
        unsigned long timeout;
        unsigned long timeout_int;
        int cpu;
@@ -866,11 +865,11 @@ void hfi1_do_send(struct rvt_qp *qp)
                timeout_int = SEND_RESCHED_TIMEOUT;
        }
 
-       spin_lock_irqsave(&qp->s_lock, flags);
+       spin_lock_irqsave(&qp->s_lock, ps.flags);
 
        /* Return if we are already busy processing a work request. */
        if (!hfi1_send_ok(qp)) {
-               spin_unlock_irqrestore(&qp->s_lock, flags);
+               spin_unlock_irqrestore(&qp->s_lock, ps.flags);
                return;
        }
 
@@ -884,7 +883,7 @@ void hfi1_do_send(struct rvt_qp *qp)
        do {
                /* Check for a constructed packet to be sent. */
                if (qp->s_hdrwords != 0) {
-                       spin_unlock_irqrestore(&qp->s_lock, flags);
+                       spin_unlock_irqrestore(&qp->s_lock, ps.flags);
                        /*
                         * If the packet cannot be sent now, return and
                         * the send tasklet will be woken up later.
@@ -897,11 +896,14 @@ void hfi1_do_send(struct rvt_qp *qp)
                        if (unlikely(time_after(jiffies, timeout))) {
                                if (workqueue_congested(cpu,
                                                        ps.ppd->hfi1_wq)) {
-                                       spin_lock_irqsave(&qp->s_lock, flags);
+                                       spin_lock_irqsave(
+                                               &qp->s_lock,
+                                               ps.flags);
                                        qp->s_flags &= ~RVT_S_BUSY;
                                        hfi1_schedule_send(qp);
-                                       spin_unlock_irqrestore(&qp->s_lock,
-                                                              flags);
+                                       spin_unlock_irqrestore(
+                                               &qp->s_lock,
+                                               ps.flags);
                                        this_cpu_inc(
                                                *ps.ppd->dd->send_schedule);
                                        return;
@@ -913,11 +915,11 @@ void hfi1_do_send(struct rvt_qp *qp)
                                }
                                timeout = jiffies + (timeout_int) / 8;
                        }
-                       spin_lock_irqsave(&qp->s_lock, flags);
+                       spin_lock_irqsave(&qp->s_lock, ps.flags);
                }
        } while (make_req(qp, &ps));
 
-       spin_unlock_irqrestore(&qp->s_lock, flags);
+       spin_unlock_irqrestore(&qp->s_lock, ps.flags);
 }
 
 /*
index ae8a70f703ebcbb877abf630f501158688c6fb4a..1e503ad0bebb764ee1e05462604538e6324002f8 100644 (file)
@@ -322,7 +322,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                             (lid == ppd->lid ||
                              (lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
                              qp->ibqp.qp_type == IB_QPT_GSI)))) {
-                       unsigned long flags;
+                       unsigned long tflags = ps->flags;
                        /*
                         * If DMAs are in progress, we can't generate
                         * a completion for the loopback packet since
@@ -335,10 +335,10 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                                goto bail;
                        }
                        qp->s_cur = next_cur;
-                       local_irq_save(flags);
-                       spin_unlock_irqrestore(&qp->s_lock, flags);
+                       spin_unlock_irqrestore(&qp->s_lock, tflags);
                        ud_loopback(qp, wqe);
-                       spin_lock_irqsave(&qp->s_lock, flags);
+                       spin_lock_irqsave(&qp->s_lock, tflags);
+                       ps->flags = tflags;
                        hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
                        goto done_free_tx;
                }
index 6c4670fffdbb8bf852a1ecaa6243339807bffd50..2ba1373f4fb4ca7e79d8bdaca4a79150e32212a8 100644 (file)
@@ -215,6 +215,7 @@ struct hfi1_pkt_state {
        struct hfi1_ibport *ibp;
        struct hfi1_pportdata *ppd;
        struct verbs_txreq *s_txreq;
+       unsigned long flags;
 };
 
 #define HFI1_PSN_CREDIT  16