IB/hfi1: Prevent kernel QP post send hard lockups
authorMike Marciniszyn <mike.marciniszyn@intel.com>
Sun, 9 Apr 2017 17:16:35 +0000 (10:16 -0700)
committerDoug Ledford <dledford@redhat.com>
Fri, 28 Apr 2017 17:48:01 +0000 (13:48 -0400)
The driver progress routines can call cond_resched() when
a timeslice is exhausted and irqs are enabled.

If the ULP had been holding a spin lock without disabling irqs and
the post send directly called the progress routine, the cond_resched()
could yield allowing another thread from the same ULP to deadlock
on that same lock.

Correct by replacing the current hfi1_do_send() calldown with a unique
one for post send and adding an argument to hfi1_do_send() to indicate
that the send engine is running in a thread.   If the routine is not
running in a thread, avoid calling cond_resched().

CC: <stable@vger.kernel.org> # 4.7.x-
Fixes: Commit 831464ce4b74 ("IB/hfi1: Don't call cond_resched in atomic mode when sending packets")
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/hfi1/ruc.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/hfi1/verbs.h

index 879eb9b31954e4c5f7075750fed3d8aef956a6aa..ccf8d8037355f914ce1aea3c19c4fb7b7e8a6963 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -784,23 +784,29 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
 /* when sending, force a reschedule every one of these periods */
 #define SEND_RESCHED_TIMEOUT (5 * HZ)  /* 5s in jiffies */
 
+void hfi1_do_send_from_rvt(struct rvt_qp *qp)
+{
+       hfi1_do_send(qp, false);
+}
+
 void _hfi1_do_send(struct work_struct *work)
 {
        struct iowait *wait = container_of(work, struct iowait, iowork);
        struct rvt_qp *qp = iowait_to_qp(wait);
 
-       hfi1_do_send(qp);
+       hfi1_do_send(qp, true);
 }
 
 /**
  * hfi1_do_send - perform a send on a QP
  * @work: contains a pointer to the QP
+ * @in_thread: true if in a workqueue thread
  *
  * Process entries in the send work queue until credit or queue is
  * exhausted.  Only allow one CPU to send a packet per QP.
  * Otherwise, two threads could send packets out of order.
  */
-void hfi1_do_send(struct rvt_qp *qp)
+void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
 {
        struct hfi1_pkt_state ps;
        struct hfi1_qp_priv *priv = qp->priv;
@@ -868,8 +874,10 @@ void hfi1_do_send(struct rvt_qp *qp)
                        qp->s_hdrwords = 0;
                        /* allow other tasks to run */
                        if (unlikely(time_after(jiffies, timeout))) {
-                               if (workqueue_congested(cpu,
-                                                       ps.ppd->hfi1_wq)) {
+                               if (!in_thread ||
+                                   workqueue_congested(
+                                               cpu,
+                                               ps.ppd->hfi1_wq)) {
                                        spin_lock_irqsave(
                                                &qp->s_lock,
                                                ps.flags);
@@ -882,11 +890,9 @@ void hfi1_do_send(struct rvt_qp *qp)
                                                *ps.ppd->dd->send_schedule);
                                        return;
                                }
-                               if (!irqs_disabled()) {
-                                       cond_resched();
-                                       this_cpu_inc(
-                                          *ps.ppd->dd->send_schedule);
-                               }
+                               cond_resched();
+                               this_cpu_inc(
+                                       *ps.ppd->dd->send_schedule);
                                timeout = jiffies + (timeout_int) / 8;
                        }
                        spin_lock_irqsave(&qp->s_lock, ps.flags);
index 57036e545bdb17ea7c8cf746decb2d4ef8e5ad17..7174a18ebaacdfe8cd822191bb94fd48ba069c22 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -1820,7 +1820,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
        dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
        dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
        dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
-       dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send;
+       dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
        dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
        dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
        dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
index 6c549e7a25e7926d9835bd6f37ba25fd7f35b03b..46b00ed9f2dc296df81ff0863ee52c4cb321f731 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -355,7 +355,9 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
 
 void _hfi1_do_send(struct work_struct *work);
 
-void hfi1_do_send(struct rvt_qp *qp);
+void hfi1_do_send_from_rvt(struct rvt_qp *qp);
+
+void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
 
 void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
                        enum ib_wc_status status);