int (*handle_irq)(struct pt_regs *, int);
u64 (*save_disable_all)(void);
void (*restore_all)(u64);
- void (*ack_status)(u64);
void (*enable)(int, u64);
void (*disable)(int, u64);
unsigned eventsel;
return status;
}
-static void intel_pmu_ack_status(u64 ack)
+static inline void intel_pmu_ack_status(u64 ack)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
-static void amd_pmu_ack_status(u64 ack)
-{
-}
-
-static void hw_perf_ack_status(u64 ack)
-{
- if (unlikely(!perf_counters_initialized))
- return;
-
- x86_pmu->ack_status(ack);
-}
-
static void intel_pmu_enable_counter(int idx, u64 config)
{
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
__x86_pmu_disable(counter, &counter->hw, bit);
}
- hw_perf_ack_status(ack);
+ intel_pmu_ack_status(ack);
/*
* Repeat if there is more work to be done:
.handle_irq = intel_pmu_handle_irq,
.save_disable_all = intel_pmu_save_disable_all,
.restore_all = intel_pmu_restore_all,
- .ack_status = intel_pmu_ack_status,
.enable = intel_pmu_enable_counter,
.disable = intel_pmu_disable_counter,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.handle_irq = amd_pmu_handle_irq,
.save_disable_all = amd_pmu_save_disable_all,
.restore_all = amd_pmu_restore_all,
- .ack_status = amd_pmu_ack_status,
.enable = amd_pmu_enable_counter,
.disable = amd_pmu_disable_counter,
.eventsel = MSR_K7_EVNTSEL0,