void *bhrb_context;
struct perf_branch_stack bhrb_stack;
struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
+ u64 ic_init;
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
+static bool use_ic(u64 event)
+{
+ return false;
+}
#endif /* CONFIG_PPC32 */
static bool regs_use_siar(struct pt_regs *regs)
mtspr(SPRN_PMC5, pmcs[4]);
mtspr(SPRN_PMC6, pmcs[5]);
}
+
+static bool use_ic(u64 event)
+{
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1) &&
+ (event == 0x200f2 || event == 0x300f2))
+ return true;
+
+ return false;
+}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (event->hw.state & PERF_HES_STOPPED)
return;
if (is_ebb_event(event)) {
val = read_pmc(event->hw.idx);
+ if (use_ic(event->attr.config)) {
+ val = mfspr(SPRN_IC);
+ if (val > cpuhw->ic_init)
+ val = val - cpuhw->ic_init;
+ else
+ val = val + (0 - cpuhw->ic_init);
+ }
local64_set(&event->hw.prev_count, val);
return;
}
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
+ if (use_ic(event->attr.config)) {
+ val = mfspr(SPRN_IC);
+ if (val > cpuhw->ic_init)
+ val = val - cpuhw->ic_init;
+ else
+ val = val + (0 - cpuhw->ic_init);
+ }
delta = check_and_compute_delta(prev, val);
if (!delta)
return;
event->attr.branch_sample_type);
}
+ /*
+ * Workaround for POWER9 DD1 to use the Instruction Counter
+ * register value for instruction counting
+ */
+ if (use_ic(event->attr.config))
+ cpuhw->ic_init = mfspr(SPRN_IC);
+
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;