From fc18822510721fe694d273c5211c71ea52796d76 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 1 Jul 2016 23:02:05 -0500 Subject: [PATCH] perf/x86: Fix 32-bit perf user callgraph collection A basic perf callgraph record operation causes an immediate panic on a 32-bit kernel compiled with CONFIG_CC_STACKPROTECTOR=y: $ perf record -g ls Kernel panic - not syncing: stack-protector: Kernel stack is corrupted in: c0404fbd CPU: 0 PID: 998 Comm: ls Not tainted 4.7.0-rc5+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.9.1-1.fc24 04/01/2014 c0dd5967 ff7afe1c 00000086 f41dbc2c c07445a0 464c457f f41dbca8 f41dbc44 c05646f4 f41dbca8 464c457f f41dbca8 464c457f f41dbc54 c04625be c0ce56fc c0404fbd f41dbc88 c0404fbd b74668f0 f41dc000 00000000 c0000000 00000000 Call Trace: [] dump_stack+0x58/0x78 [] panic+0x8e/0x1c6 [] __stack_chk_fail+0x1e/0x30 [] ? perf_callchain_user+0x22d/0x230 [] perf_callchain_user+0x22d/0x230 [] get_perf_callchain+0x1ff/0x270 [] perf_callchain+0x78/0x90 [] perf_prepare_sample+0x24b/0x370 [] perf_event_output_forward+0x24/0x70 [] __perf_event_overflow+0xa0/0x210 [] ? cpu_clock_event_read+0x43/0x50 [] perf_swevent_hrtimer+0x101/0x180 [] ? kmap_atomic_prot+0x35/0x140 [] ? get_page_from_freelist+0x279/0x950 [] ? vma_interval_tree_remove+0x158/0x230 [] ? wp_page_copy.isra.82+0x2f4/0x630 [] ? page_add_file_rmap+0x1d/0x50 [] ? unlock_page+0x61/0x80 [] ? filemap_map_pages+0x305/0x320 [] ? handle_mm_fault+0xb7f/0x1560 [] ? timerqueue_del+0x1b/0x70 [] ? __remove_hrtimer+0x2e/0x60 [] __hrtimer_run_queues+0xcb/0x2a0 [] ? __perf_event_overflow+0x210/0x210 [] hrtimer_interrupt+0x8a/0x180 [] local_apic_timer_interrupt+0x32/0x60 [] smp_apic_timer_interrupt+0x33/0x50 [] apic_timer_interrupt+0x34/0x3c Kernel Offset: disabled ---[ end Kernel panic - not syncing: stack-protector: Kernel stack is corrupted in: c0404fbd The panic is caused by the fact that perf_callchain_user() mistakenly assumes it's 64-bit only and ends up corrupting the stack. Signed-off-by: Josh Poimboeuf Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: stable@vger.kernel.org # v4.5+ Fixes: 75925e1ad7f5 ("perf/x86: Optimize stack walk user accesses") Link: http://lkml.kernel.org/r/1a547f5077ec30f75f9b57074837c3c80df86e5e.1467432113.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/events/core.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 33787ee817f0..26ced536005a 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2319,7 +2319,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { struct stack_frame frame; - const void __user *fp; + const unsigned long __user *fp; if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ @@ -2332,7 +2332,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) return; - fp = (void __user *)regs->bp; + fp = (unsigned long __user *)regs->bp; perf_callchain_store(entry, regs->ip); @@ -2345,16 +2345,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs pagefault_disable(); while (entry->nr < entry->max_stack) { unsigned long bytes; + frame.next_frame = NULL; frame.return_address = 0; - if (!access_ok(VERIFY_READ, fp, 16)) + if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2)) break; - bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8); + bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); if (bytes != 0) break; - bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8); + bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp)); if (bytes != 0) break; -- 2.20.1