x86/perf: Exclude the debug stack from the callchains
authorFrederic Weisbecker <fweisbec@gmail.com>
Sat, 5 Dec 2009 11:01:17 +0000 (12:01 +0100)
committerFrederic Weisbecker <fweisbec@gmail.com>
Sun, 6 Dec 2009 07:27:21 +0000 (08:27 +0100)
Dumping the callchains from breakpoint events with perf gives strange
results:

3.75%             perf  [kernel]           [k] _raw_read_unlock
                       |
                       --- _raw_read_unlock
                           perf_callchain
                           perf_prepare_sample
                           __perf_event_overflow
                           perf_swevent_overflow
                           perf_swevent_add
                           perf_bp_event
                           hw_breakpoint_exceptions_notify
                           notifier_call_chain
                           __atomic_notifier_call_chain
                           atomic_notifier_call_chain
                           notify_die
                           do_debug
                           debug
                           munmap

We are infected with all the debug stack. Like the nmi stack, the debug
stack is undesired as it is part of the profiling path, not helpful for
the user.

Ignore it.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: "K. Prasad" <prasad@linux.vnet.ibm.com>
arch/x86/kernel/cpu/perf_event.c

index c1bbed1021d96c63e96593f1687d50bc7716b22e..d35f26076ae52160f88782a19f77a0b08684ded0 100644 (file)
@@ -2287,7 +2287,7 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
 
 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
-static DEFINE_PER_CPU(int, in_nmi_frame);
+static DEFINE_PER_CPU(int, in_ignored_frame);
 
 
 static void
@@ -2303,8 +2303,9 @@ static void backtrace_warning(void *data, char *msg)
 
 static int backtrace_stack(void *data, char *name)
 {
-       per_cpu(in_nmi_frame, smp_processor_id()) =
-                       x86_is_stack_id(NMI_STACK, name);
+       per_cpu(in_ignored_frame, smp_processor_id()) =
+                       x86_is_stack_id(NMI_STACK, name) ||
+                       x86_is_stack_id(DEBUG_STACK, name);
 
        return 0;
 }
@@ -2313,7 +2314,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
 {
        struct perf_callchain_entry *entry = data;
 
-       if (per_cpu(in_nmi_frame, smp_processor_id()))
+       if (per_cpu(in_ignored_frame, smp_processor_id()))
                return;
 
        if (reliable)