struct cpu_hw_counters {
struct perf_counter *counters[X86_PMC_IDX_MAX];
unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long active[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long interrupts;
u64 throttle_ctrl;
- unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
};
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
- if (!test_bit(idx, cpuc->active_mask))
+ if (!test_bit(idx, cpuc->active))
continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
- if (!test_bit(idx, cpuc->active_mask))
+ if (!test_bit(idx, cpuc->active))
continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- set_bit(idx, cpuc->active_mask);
+ set_bit(idx, cpuc->active);
if (cpuc->enabled)
config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- clear_bit(idx, cpuc->active_mask);
+ clear_bit(idx, cpuc->active);
wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
}