struct cpu_hw_counters {
struct perf_counter *counters[X86_PMC_IDX_MAX];
unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ u64 last_interrupt;
+ u64 global_enable;
+ int throttled;
};
/*
static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
{
int bit, cpu = smp_processor_id();
- u64 ack, status, saved_global;
- struct cpu_hw_counters *cpuc;
+ u64 ack, status, now;
+ struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
+ rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
/* Disable counters globally */
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
ack_APIC_irq();
- cpuc = &per_cpu(cpu_hw_counters, cpu);
+ now = sched_clock();
+ if (now - cpuc->last_interrupt < PERFMON_MIN_PERIOD_NS)
+ cpuc->throttled = 1;
+ cpuc->last_interrupt = now;
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
if (!status)
goto again;
out:
/*
- * Restore - do not reenable when global enable is off:
+ * Restore - do not reenable when global enable is off or throttled:
*/
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
+ if (!cpuc->throttled)
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
+}
+
+void perf_counter_unthrottle(void)
+{
+ struct cpu_hw_counters *cpuc;
+
+ if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+ return;
+
+ if (unlikely(!perf_counters_initialized))
+ return;
+
+ cpuc = &per_cpu(cpu_hw_counters, smp_processor_id());
+ if (cpuc->throttled) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "PERFMON: max event frequency exceeded!\n");
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
+ cpuc->throttled = 0;
+ }
}
void smp_perf_counter_interrupt(struct pt_regs *regs)
extern void perf_counter_exit_task(struct task_struct *child);
extern void perf_counter_notify(struct pt_regs *regs);
extern void perf_counter_print_debug(void);
+extern void perf_counter_unthrottle(void);
extern u64 hw_perf_save_disable(void);
extern void hw_perf_restore(u64 ctrl);
extern int perf_counter_task_disable(void);
return !counter->hw_event.raw && counter->hw_event.type < 0;
}
+#define PERFMON_MIN_PERIOD_NS 10000
+
#else
static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
static inline void perf_counter_exit_task(struct task_struct *child) { }
static inline void perf_counter_notify(struct pt_regs *regs) { }
static inline void perf_counter_print_debug(void) { }
+static inline void perf_counter_unthrottle(void) { }
static inline void hw_perf_restore(u64 ctrl) { }
static inline u64 hw_perf_save_disable(void) { return 0; }
static inline int perf_counter_task_disable(void) { return -EINVAL; }