x86, perfcounters: read out MSR_CORE_PERF_GLOBAL_STATUS with counters disabled
authorIngo Molnar <mingo@elte.hu>
Mon, 8 Dec 2008 13:20:16 +0000 (14:20 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 8 Dec 2008 14:56:42 +0000 (15:56 +0100)
Impact: make perfcounter NMI and IRQ sequence more robust

Make __smp_perf_counter_interrupt() a bit more conservative: first disable
all counters, then read out the status. Most invocations are because there
are real events, so there's no performance impact.

Code flow gets a bit simpler as well this way.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c

index 82440cbed0e6af389411d3f128c6c86f381d78f0..615e953208ef95e9f0461a47bef5fc3edad3032e 100644 (file)
@@ -383,18 +383,16 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
        struct cpu_hw_counters *cpuc;
        u64 ack, status;
 
-       rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
-       if (!status) {
-               ack_APIC_irq();
-               return;
-       }
-
        /* Disable counters globally */
        wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
        ack_APIC_irq();
 
        cpuc = &per_cpu(cpu_hw_counters, cpu);
 
+       rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+       if (!status)
+               goto out;
+
 again:
        ack = status;
        for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
@@ -440,7 +438,7 @@ again:
        rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
        if (status)
                goto again;
-
+out:
        /*
         * Do not reenable when global enable is off:
         */