watchdog/hardlockup/perf: Prevent CPU hotplug deadlock
authorThomas Gleixner <tglx@linutronix.de>
Tue, 12 Sep 2017 19:37:04 +0000 (21:37 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 14 Sep 2017 09:41:05 +0000 (11:41 +0200)
The following deadlock is possible in the watchdog hotplug code:

  cpus_write_lock()
    ...
      takedown_cpu()
        smpboot_park_threads()
          smpboot_park_thread()
            kthread_park()
              ->park() := watchdog_disable()
                watchdog_nmi_disable()
                  perf_event_release_kernel();
                    put_event()
                      _free_event()
                        ->destroy() := hw_perf_event_destroy()
                          x86_release_hardware()
                            release_ds_buffers()
                              get_online_cpus()

when a per cpu watchdog perf event is destroyed which drops the last
reference to the PMU hardware. The cleanup code there invokes
get_online_cpus() which instantly deadlocks because the hotplug percpu
rwsem is write locked.

To solve this add a deferring mechanism:

  cpus_write_lock()
   kthread_park()
    watchdog_nmi_disable(deferred)
      perf_event_disable(event);
      move_event_to_deferred(event);
   ....
  cpus_write_unlock()
  cleaup_deferred_events()
    perf_event_release_kernel()

This is still properly serialized against concurrent hotplug via the
cpu_add_remove_lock, which is held by the task which initiated the hotplug
event.

This is also used to handle event destruction when the watchdog threads are
parked via other mechanisms than CPU hotplug.

Analyzed-by: Peter Zijlstra <peterz@infradead.org>
Reported-by: Borislav Petkov <bp@alien8.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Don Zickus <dzickus@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Link: http://lkml.kernel.org/r/20170912194146.884469246@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/nmi.h
kernel/cpu.c
kernel/watchdog.c
kernel/watchdog_hld.c

index 7eefe7abf44b070ee4e864cbbf86b195d1a18a9a..80354e6fa86d688f04d167a237b674103edc1ce1 100644 (file)
 #ifdef CONFIG_LOCKUP_DETECTOR
 void lockup_detector_init(void);
 void lockup_detector_soft_poweroff(void);
+void lockup_detector_cleanup(void);
 #else
 static inline void lockup_detector_init(void) { }
 static inline void lockup_detector_soft_poweroff(void) { }
+static inline void lockup_detector_cleanup(void) { }
 #endif
 
 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
@@ -77,9 +79,13 @@ static inline void hardlockup_detector_disable(void) {}
 extern void arch_touch_nmi_watchdog(void);
 extern void hardlockup_detector_perf_stop(void);
 extern void hardlockup_detector_perf_restart(void);
+extern void hardlockup_detector_perf_disable(void);
+extern void hardlockup_detector_perf_cleanup(void);
 #else
 static inline void hardlockup_detector_perf_stop(void) { }
 static inline void hardlockup_detector_perf_restart(void) { }
+static inline void hardlockup_detector_perf_disable(void) { }
+static inline void hardlockup_detector_perf_cleanup(void) { }
 #if !defined(CONFIG_HAVE_NMI_WATCHDOG)
 static inline void arch_touch_nmi_watchdog(void) {}
 #endif
index acf5308fad51f17706197ca6550ce7ae3c8b5378..a96b348591dfdb6aa7d3f9149971852d1d6dc5bf 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/lockdep.h>
 #include <linux/tick.h>
 #include <linux/irq.h>
+#include <linux/nmi.h>
 #include <linux/smpboot.h>
 #include <linux/relay.h>
 #include <linux/slab.h>
@@ -734,6 +735,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 
 out:
        cpus_write_unlock();
+       /*
+        * Do post unplug cleanup. This is still protected against
+        * concurrent CPU hotplug via cpu_add_remove_lock.
+        */
+       lockup_detector_cleanup();
        return ret;
 }
 
index af000956286c18addf92daeedc563aa26f99541d..dd1fd59683c52dab6e150a7624704c0f979a46cc 100644 (file)
@@ -109,8 +109,10 @@ int __weak watchdog_nmi_enable(unsigned int cpu)
 {
        return 0;
 }
+
 void __weak watchdog_nmi_disable(unsigned int cpu)
 {
+       hardlockup_detector_perf_disable();
 }
 
 /*
@@ -193,6 +195,8 @@ __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
 #endif
 #endif
 
+static void __lockup_detector_cleanup(void);
+
 /*
  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
  * lockups can have false positives under extreme conditions. So we generally
@@ -631,6 +635,24 @@ static void set_sample_period(void)
 }
 #endif /* SOFTLOCKUP */
 
+static void __lockup_detector_cleanup(void)
+{
+       lockdep_assert_held(&watchdog_mutex);
+       hardlockup_detector_perf_cleanup();
+}
+
+/**
+ * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
+ *
+ * Caller must not hold the cpu hotplug rwsem.
+ */
+void lockup_detector_cleanup(void)
+{
+       mutex_lock(&watchdog_mutex);
+       __lockup_detector_cleanup();
+       mutex_unlock(&watchdog_mutex);
+}
+
 /**
  * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
  *
@@ -665,6 +687,8 @@ static int proc_watchdog_update(void)
 
        watchdog_nmi_reconfigure();
 
+       __lockup_detector_cleanup();
+
        return err;
 
 }
@@ -837,6 +861,7 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
                }
 
                watchdog_nmi_reconfigure();
+               __lockup_detector_cleanup();
        }
 
        mutex_unlock(&watchdog_mutex);
index 7b602714ea5361dc60bc3080d2e4c6e968948440..94111ccb09b529db9650fbf420a0ed7ab0b9936c 100644 (file)
@@ -21,6 +21,8 @@
 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+static DEFINE_PER_CPU(struct perf_event *, dead_event);
+static struct cpumask dead_events_mask;
 
 static unsigned long hardlockup_allcpu_dumped;
 static bool hardlockup_detector_disabled;
@@ -239,16 +241,18 @@ out:
        return 0;
 }
 
-void watchdog_nmi_disable(unsigned int cpu)
+/**
+ * hardlockup_detector_perf_disable - Disable the local event
+ */
+void hardlockup_detector_perf_disable(void)
 {
-       struct perf_event *event = per_cpu(watchdog_ev, cpu);
+       struct perf_event *event = this_cpu_read(watchdog_ev);
 
        if (event) {
                perf_event_disable(event);
-               per_cpu(watchdog_ev, cpu) = NULL;
-
-               /* should be in cleanup, but blocks oprofile */
-               perf_event_release_kernel(event);
+               this_cpu_write(watchdog_ev, NULL);
+               this_cpu_write(dead_event, event);
+               cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
 
                /* watchdog_nmi_enable() expects this to be zero initially. */
                if (atomic_dec_and_test(&watchdog_cpus))
@@ -256,6 +260,24 @@ void watchdog_nmi_disable(unsigned int cpu)
        }
 }
 
+/**
+ * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
+ *
+ * Called from lockup_detector_cleanup(). Serialized by the caller.
+ */
+void hardlockup_detector_perf_cleanup(void)
+{
+       int cpu;
+
+       for_each_cpu(cpu, &dead_events_mask) {
+               struct perf_event *event = per_cpu(dead_event, cpu);
+
+               per_cpu(dead_event, cpu) = NULL;
+               perf_event_release_kernel(event);
+       }
+       cpumask_clear(&dead_events_mask);
+}
+
 /**
  * hardlockup_detector_perf_stop - Globally stop watchdog events
  *