sparc64: Manage NMI watchdog enabling like x86.
authorDavid S. Miller <davem@davemloft.net>
Wed, 9 Sep 2009 06:16:06 +0000 (23:16 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 9 Sep 2009 06:16:06 +0000 (23:16 -0700)
Use a per-cpu 'wd_enabled' boolean and a global atomic_t count
of watchdog NMI enabled cpus which is set to '-1' if something
is wrong with the watchdog and it can't be used.

Signed-off-by: David S. Miller <davem@davemloft.net>
arch/sparc/include/asm/nmi.h
arch/sparc/kernel/nmi.c
arch/sparc/oprofile/init.c

index fbd546dd4feb658a88df1d895a2aeb822603b8bb..c7d11e435df934a5d54e2c35b8851f8600777c15 100644 (file)
@@ -5,6 +5,6 @@ extern int __init nmi_init(void);
 extern void perfctr_irq(int irq, struct pt_regs *regs);
 extern void nmi_adjust_hz(unsigned int new_hz);
 
-extern int nmi_usable;
+extern atomic_t nmi_active;
 
 #endif /* __NMI_H */
index 2c0cc72d295b079cfab695197eff38fcbd54d586..d1614e8384ae2d93da5e034a2fac9911a70a6923 100644 (file)
 static int nmi_watchdog_active;
 static int panic_on_timeout;
 
-int nmi_usable;
-EXPORT_SYMBOL_GPL(nmi_usable);
+/* nmi_active:
+ * >0: the NMI watchdog is active, but can be disabled
+ * <0: the NMI watchdog has not been set up, and cannot be enabled
+ *  0: the NMI watchdog is disabled, but can be enabled
+ */
+atomic_t nmi_active = ATOMIC_INIT(0);          /* oprofile uses this */
+EXPORT_SYMBOL(nmi_active);
 
 static unsigned int nmi_hz = HZ;
+static DEFINE_PER_CPU(short, wd_enabled);
+static int endflag __initdata;
 
 static DEFINE_PER_CPU(unsigned int, last_irq_sum);
 static DEFINE_PER_CPU(local_t, alert_counter);
@@ -110,7 +117,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
                __get_cpu_var(last_irq_sum) = sum;
                local_set(&__get_cpu_var(alert_counter), 0);
        }
-       if (nmi_usable) {
+       if (__get_cpu_var(wd_enabled)) {
                write_pic(picl_value(nmi_hz));
                pcr_ops->write(pcr_enable);
        }
@@ -121,8 +128,6 @@ static inline unsigned int get_nmi_count(int cpu)
        return cpu_data(cpu).__nmi_count;
 }
 
-static int endflag __initdata;
-
 static __init void nmi_cpu_busy(void *data)
 {
        local_irq_enable_in_hardirq();
@@ -143,12 +148,15 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
        printk(KERN_WARNING
                "and attach the output of the 'dmesg' command.\n");
 
-       nmi_usable = 0;
+       per_cpu(wd_enabled, cpu) = 0;
+       atomic_dec(&nmi_active);
 }
 
-static void stop_watchdog(void *unused)
+static void stop_nmi_watchdog(void *unused)
 {
        pcr_ops->write(PCR_PIC_PRIV);
+       __get_cpu_var(wd_enabled) = 0;
+       atomic_dec(&nmi_active);
 }
 
 static int __init check_nmi_watchdog(void)
@@ -156,6 +164,9 @@ static int __init check_nmi_watchdog(void)
        unsigned int *prev_nmi_count;
        int cpu, err;
 
+       if (!atomic_read(&nmi_active))
+               return 0;
+
        prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
        if (!prev_nmi_count) {
                err = -ENOMEM;
@@ -172,12 +183,15 @@ static int __init check_nmi_watchdog(void)
        mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
 
        for_each_online_cpu(cpu) {
+               if (!per_cpu(wd_enabled, cpu))
+                       continue;
                if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
                        report_broken_nmi(cpu, prev_nmi_count);
        }
        endflag = 1;
-       if (!nmi_usable) {
+       if (!atomic_read(&nmi_active)) {
                kfree(prev_nmi_count);
+               atomic_set(&nmi_active, -1);
                err = -ENODEV;
                goto error;
        }
@@ -188,12 +202,26 @@ static int __init check_nmi_watchdog(void)
        kfree(prev_nmi_count);
        return 0;
 error:
-       on_each_cpu(stop_watchdog, NULL, 1);
+       on_each_cpu(stop_nmi_watchdog, NULL, 1);
        return err;
 }
 
-static void start_watchdog(void *unused)
+static void start_nmi_watchdog(void *unused)
+{
+       __get_cpu_var(wd_enabled) = 1;
+       atomic_inc(&nmi_active);
+
+       pcr_ops->write(PCR_PIC_PRIV);
+       write_pic(picl_value(nmi_hz));
+
+       pcr_ops->write(pcr_enable);
+}
+
+static void nmi_adjust_hz_one(void *unused)
 {
+       if (!__get_cpu_var(wd_enabled))
+               return;
+
        pcr_ops->write(PCR_PIC_PRIV);
        write_pic(picl_value(nmi_hz));
 
@@ -203,13 +231,13 @@ static void start_watchdog(void *unused)
 void nmi_adjust_hz(unsigned int new_hz)
 {
        nmi_hz = new_hz;
-       on_each_cpu(start_watchdog, NULL, 1);
+       on_each_cpu(nmi_adjust_hz_one, NULL, 1);
 }
 EXPORT_SYMBOL_GPL(nmi_adjust_hz);
 
 static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
 {
-       on_each_cpu(stop_watchdog, NULL, 1);
+       on_each_cpu(stop_nmi_watchdog, NULL, 1);
        return 0;
 }
 
@@ -221,16 +249,14 @@ int __init nmi_init(void)
 {
        int err;
 
-       nmi_usable = 1;
-
-       on_each_cpu(start_watchdog, NULL, 1);
+       on_each_cpu(start_nmi_watchdog, NULL, 1);
 
        err = check_nmi_watchdog();
        if (!err) {
                err = register_reboot_notifier(&nmi_reboot_notifier);
                if (err) {
-                       nmi_usable = 0;
-                       on_each_cpu(stop_watchdog, NULL, 1);
+                       on_each_cpu(stop_nmi_watchdog, NULL, 1);
+                       atomic_set(&nmi_active, -1);
                }
        }
        return err;
index d172f86439b1b8be67b58c0f2ad1b64deb30068e..9ce34fd294c917606c38d1c9f73d0ecf18d84f3b 100644 (file)
@@ -57,7 +57,7 @@ static void timer_stop(void)
 
 static int op_nmi_timer_init(struct oprofile_operations *ops)
 {
-       if (!nmi_usable)
+       if (atomic_read(&nmi_active) <= 0)
                return -ENODEV;
 
        ops->start = timer_start;