perf_counter: x86: Fix up the amd NMI/INT throttle
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 13 May 2009 11:21:36 +0000 (13:21 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 15 May 2009 07:47:01 +0000 (09:47 +0200)
perf_counter_unthrottle() restores throttle_ctrl, buts its never set.
Also, we fail to disable all counters when throttling.

[ Impact: fix rare stuck perf-counters when they are throttled ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c

index c19e927b6979b8a0c3d273a106bdc5335682d35e..7601c014f8f6497f5036c51436c53c2c25aa13ea 100644 (file)
@@ -334,6 +334,8 @@ static u64 amd_pmu_save_disable_all(void)
         * right thing.
         */
        barrier();
+       if (!enabled)
+               goto out;
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
                u64 val;
@@ -347,6 +349,7 @@ static u64 amd_pmu_save_disable_all(void)
                wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
        }
 
+out:
        return enabled;
 }
 
@@ -787,32 +790,43 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
        int handled = 0;
        struct perf_counter *counter;
        struct hw_perf_counter *hwc;
-       int idx;
+       int idx, throttle = 0;
+
+       cpuc->throttle_ctrl = cpuc->enabled;
+       cpuc->enabled = 0;
+       barrier();
+
+       if (cpuc->throttle_ctrl) {
+               if (++cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
+                       throttle = 1;
+       }
 
-       ++cpuc->interrupts;
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+               int disable = 0;
+
                if (!test_bit(idx, cpuc->active_mask))
                        continue;
+
                counter = cpuc->counters[idx];
                hwc = &counter->hw;
                val = x86_perf_counter_update(counter, hwc, idx);
                if (val & (1ULL << (x86_pmu.counter_bits - 1)))
-                       continue;
+                       goto next;
+
                /* counter overflow */
                x86_perf_counter_set_period(counter, hwc, idx);
                handled = 1;
                inc_irq_stat(apic_perf_irqs);
-               if (perf_counter_overflow(counter, nmi, regs, 0))
-                       amd_pmu_disable_counter(hwc, idx);
-               else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
-                       /*
-                        * do not reenable when throttled, but reload
-                        * the register
-                        */
+               disable = perf_counter_overflow(counter, nmi, regs, 0);
+
+next:
+               if (disable || throttle)
                        amd_pmu_disable_counter(hwc, idx);
-               else if (counter->state == PERF_COUNTER_STATE_ACTIVE)
-                       amd_pmu_enable_counter(hwc, idx);
        }
+
+       if (cpuc->throttle_ctrl && !throttle)
+               cpuc->enabled = 1;
+
        return handled;
 }