perf_counter, x86: generic use of cpuc->active
authorRobert Richter <robert.richter@amd.com>
Wed, 29 Apr 2009 10:47:16 +0000 (12:47 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 29 Apr 2009 12:51:10 +0000 (14:51 +0200)
cpuc->active will now be used to indicate an enabled counter which
implies also valid pointers of cpuc->counters[]. In contrast,
cpuc->used only locks the counter, but it can be still uninitialized.

[ Impact: refactor and generalize code ]

Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-20-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c

index 9ec51a662db5dce81b07725e988c05fc936543f7..f7fd4a355159b4c0d6271f846304e8b9f50b2aa2 100644 (file)
@@ -424,7 +424,6 @@ static void amd_pmu_enable_counter(int idx, u64 config)
 {
        struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
 
-       set_bit(idx, cpuc->active);
        if (cpuc->enabled)
                config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
 
@@ -446,9 +445,6 @@ static void intel_pmu_disable_counter(int idx, u64 config)
 
 static void amd_pmu_disable_counter(int idx, u64 config)
 {
-       struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
-
-       clear_bit(idx, cpuc->active);
        wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
 
 }
@@ -633,10 +629,7 @@ try_generic:
        __x86_pmu_disable(counter, hwc, idx);
 
        cpuc->counters[idx] = counter;
-       /*
-        * Make it visible before enabling the hw:
-        */
-       barrier();
+       set_bit(idx, cpuc->active);
 
        x86_perf_counter_set_period(counter, hwc, idx);
        __x86_pmu_enable(counter, hwc, idx);
@@ -700,10 +693,13 @@ static void x86_pmu_disable(struct perf_counter *counter)
        struct hw_perf_counter *hwc = &counter->hw;
        unsigned int idx = hwc->idx;
 
+       /*
+        * Must be done before we disable, otherwise the nmi handler
+        * could reenable again:
+        */
+       clear_bit(idx, cpuc->active);
        __x86_pmu_disable(counter, hwc, idx);
 
-       clear_bit(idx, cpuc->used);
-       cpuc->counters[idx] = NULL;
        /*
         * Make sure the cleared pointer becomes visible before we
         * (potentially) free the counter:
@@ -715,6 +711,8 @@ static void x86_pmu_disable(struct perf_counter *counter)
         * that we are disabling:
         */
        x86_perf_counter_update(counter, hwc, idx);
+       cpuc->counters[idx] = NULL;
+       clear_bit(idx, cpuc->used);
 }
 
 /*
@@ -763,7 +761,7 @@ again:
                struct perf_counter *counter = cpuc->counters[bit];
 
                clear_bit(bit, (unsigned long *) &status);
-               if (!counter)
+               if (!test_bit(bit, cpuc->active))
                        continue;
 
                intel_pmu_save_and_restart(counter);