perf/x86/intel: add support for PERF_SAMPLE_BRANCH_IND_JUMP
authorStephane Eranian <eranian@google.com>
Thu, 14 May 2015 21:09:59 +0000 (23:09 +0200)
committerIngo Molnar <mingo@kernel.org>
Sun, 7 Jun 2015 14:08:27 +0000 (16:08 +0200)
This patch enables support for branch sampling filter
for indirect jumps (IND_JUMP). It enables LBR IND_JMP
filtering where available. There is also software filtering
support.

Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: acme@redhat.com
Cc: dsahern@gmail.com
Cc: jolsa@redhat.com
Cc: kan.liang@intel.com
Cc: namhyung@kernel.org
Link: http://lkml.kernel.org/r/1431637800-31061-3-git-send-email-eranian@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/cpu/perf_event_intel_lbr.c

index 94e5b506caa6d13206956095e646bfacbf558fb1..201e16f6655a9b6fd099ff0a7c6449ca2fb75c62 100644 (file)
@@ -96,6 +96,7 @@ enum {
        X86_BR_NO_TX            = 1 << 14,/* not in transaction */
        X86_BR_ZERO_CALL        = 1 << 15,/* zero length call */
        X86_BR_CALL_STACK       = 1 << 16,/* call stack */
+       X86_BR_IND_JMP          = 1 << 17,/* indirect jump */
 };
 
 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
@@ -113,6 +114,7 @@ enum {
         X86_BR_IRQ      |\
         X86_BR_ABORT    |\
         X86_BR_IND_CALL |\
+        X86_BR_IND_JMP  |\
         X86_BR_ZERO_CALL)
 
 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
@@ -523,6 +525,9 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
                        X86_BR_CALL_STACK;
        }
 
+       if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
+               mask |= X86_BR_IND_JMP;
+
        /*
         * stash actual user request into reg, it may
         * be used by fixup code for some CPU
@@ -736,7 +741,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
                        break;
                case 4:
                case 5:
-                       ret = X86_BR_JMP;
+                       ret = X86_BR_IND_JMP;
                        break;
                }
                break;
@@ -844,6 +849,7 @@ static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
         */
        [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
        [PERF_SAMPLE_BRANCH_COND_SHIFT]     = LBR_JCC,
+       [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
 };
 
 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
@@ -856,6 +862,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
                                                | LBR_FAR,
        [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]     = LBR_IND_CALL,
        [PERF_SAMPLE_BRANCH_COND_SHIFT]         = LBR_JCC,
+       [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]     = LBR_IND_JMP,
 };
 
 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
@@ -870,6 +877,7 @@ static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
        [PERF_SAMPLE_BRANCH_COND_SHIFT]         = LBR_JCC,
        [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT]   = LBR_REL_CALL | LBR_IND_CALL
                                                | LBR_RETURN | LBR_CALL_STACK,
+       [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]     = LBR_IND_JMP,
 };
 
 /* core */