From: Robert Richter Date: Mon, 2 Apr 2012 18:19:11 +0000 (+0200) Subject: perf/x86-ibs: Take instruction pointer from ibs sample X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=d47e8238cd76f1ffa7c8cd30e08b8e9074fd597e;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git perf/x86-ibs: Take instruction pointer from ibs sample Each IBS sample contains a linear address of the instruction that caused the sample to trigger. This address is more precise than the rip that was taken from the interrupt handler's stack. Update the rip with that address. We use this in the next patch to implement precise-event sampling on AMD systems using IBS. Signed-off-by: Robert Richter Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1333390758-10893-6-git-send-email-robert.richter@amd.com Signed-off-by: Ingo Molnar --- diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 8a3c75d824b7..4e40a64315c9 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -158,6 +158,7 @@ struct x86_pmu_capability { #define IBS_CAPS_OPCNT (1U<<4) #define IBS_CAPS_BRNTRGT (1U<<5) #define IBS_CAPS_OPCNTEXT (1U<<6) +#define IBS_CAPS_RIPINVALIDCHK (1U<<7) #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ | IBS_CAPS_FETCHSAM \ @@ -170,14 +171,14 @@ struct x86_pmu_capability { #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) #define IBSCTL_LVT_OFFSET_MASK 0x0F -/* IbsFetchCtl bits/masks */ +/* ibs fetch bits/masks */ #define IBS_FETCH_RAND_EN (1ULL<<57) #define IBS_FETCH_VAL (1ULL<<49) #define IBS_FETCH_ENABLE (1ULL<<48) #define IBS_FETCH_CNT 0xFFFF0000ULL #define IBS_FETCH_MAX_CNT 0x0000FFFFULL -/* IbsOpCtl bits */ +/* ibs op bits/masks */ /* lower 4 bits of the current count are ignored: */ #define IBS_OP_CUR_CNT (0xFFFF0ULL<<32) #define IBS_OP_CNT_CTL (1ULL<<19) @@ -185,6 +186,7 @@ struct x86_pmu_capability { #define IBS_OP_ENABLE (1ULL<<17) #define IBS_OP_MAX_CNT 0x0000FFFFULL #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ +#define IBS_RIP_INVALID (1ULL<<38) extern u32 get_ibs_caps(void); diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index bc401bd9f14a..cc1f3293d6c2 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -9,6 +9,7 @@ #include #include #include +#include #include @@ -382,7 +383,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) struct perf_raw_record raw; struct pt_regs regs; struct perf_ibs_data ibs_data; - int offset, size, overflow, reenable; + int offset, size, check_rip, offset_max, throttle = 0; unsigned int msr; u64 *buf, config; @@ -413,28 +414,41 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) perf_ibs_event_update(perf_ibs, event, config); perf_sample_data_init(&data, 0, hwc->last_period); + if (!perf_ibs_set_period(perf_ibs, hwc, &config)) + goto out; /* no sw counter overflow */ + + ibs_data.caps = ibs_caps; + size = 1; + offset = 1; + check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK)); + if (event->attr.sample_type & PERF_SAMPLE_RAW) + offset_max = perf_ibs->offset_max; + else if (check_rip) + offset_max = 2; + else + offset_max = 1; + do { + rdmsrl(msr + offset, *buf++); + size++; + offset = find_next_bit(perf_ibs->offset_mask, + perf_ibs->offset_max, + offset + 1); + } while (offset < offset_max); + ibs_data.size = sizeof(u64) * size; + + regs = *iregs; + if (!check_rip || !(ibs_data.regs[2] & IBS_RIP_INVALID)) + instruction_pointer_set(®s, ibs_data.regs[1]); if (event->attr.sample_type & PERF_SAMPLE_RAW) { - ibs_data.caps = ibs_caps; - size = 1; - offset = 1; - do { - rdmsrl(msr + offset, *buf++); - size++; - offset = find_next_bit(perf_ibs->offset_mask, - perf_ibs->offset_max, - offset + 1); - } while (offset < perf_ibs->offset_max); - raw.size = sizeof(u32) + sizeof(u64) * size; + raw.size = sizeof(u32) + ibs_data.size; raw.data = ibs_data.data; data.raw = &raw; } - regs = *iregs; /* XXX: update ip from ibs sample */ - - overflow = perf_ibs_set_period(perf_ibs, hwc, &config); - reenable = !(overflow && perf_event_overflow(event, &data, ®s)); - config = (config >> 4) | (reenable ? perf_ibs->enable_mask : 0); + throttle = perf_event_overflow(event, &data, ®s); +out: + config = (config >> 4) | (throttle ? 0 : perf_ibs->enable_mask); perf_ibs_enable_event(hwc, config); perf_event_update_userpage(event);