From b59a1bfcc2406ea75346977ad016cfe909a762ac Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sat, 30 Jul 2011 10:53:20 +0000 Subject: [PATCH] powerpc/perf: Disable pagefaults during callchain stack read Panic observed on an older kernel when collecting call chains for the context-switch software event: []rb_erase+0x1b4/0x3e8 []__dequeue_entity+0x50/0xe8 []set_next_entity+0x178/0x1bc []pick_next_task_fair+0xb0/0x118 []schedule+0x500/0x614 []rwsem_down_failed_common+0xf0/0x264 []rwsem_down_read_failed+0x34/0x54 []down_read+0x3c/0x54 []do_page_fault+0x114/0x5e8 []handle_page_fault+0xc/0x80 []perf_callchain+0x224/0x31c []perf_prepare_sample+0x240/0x2fc []__perf_event_overflow+0x280/0x398 []perf_swevent_overflow+0x9c/0x10c []perf_swevent_ctx_event+0x1d0/0x230 []do_perf_sw_event+0x84/0xe4 []perf_sw_event_context_switch+0x150/0x1b4 []perf_event_task_sched_out+0x44/0x2d4 []schedule+0x2c0/0x614 []__cond_resched+0x34/0x90 []_cond_resched+0x4c/0x68 []move_page_tables+0xb0/0x418 []setup_arg_pages+0x184/0x2a0 []load_elf_binary+0x394/0x1208 []search_binary_handler+0xe0/0x2c4 []do_execve+0x1bc/0x268 []sys_execve+0x84/0xc8 []ret_from_syscall+0x0/0x3c A page fault occurred walking the callchain while creating a perf sample for the context-switch event. To handle the page fault the mmap_sem is needed, but it is currently held by setup_arg_pages. (setup_arg_pages calls shift_arg_pages with the mmap_sem held. shift_arg_pages then calls move_page_tables which has a cond_resched at the top of its for loop - hitting that cond_resched is what caused the context switch.) This is an extension of Anton's proposed patch: https://lkml.org/lkml/2011/7/24/151 adding case for 32-bit ppc. Tested on the system that first generated the panic and then again with latest kernel using a PPC VM. I am not able to test the 64-bit path - I do not have H/W for it and 64-bit PPC VMs (qemu on Intel) is horribly slow. Signed-off-by: David Ahern Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/perf_callchain.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index d05ae4204bbf..564c1d8bdb5c 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c @@ -154,8 +154,12 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) ((unsigned long)ptr & 7)) return -EFAULT; - if (!__get_user_inatomic(*ret, ptr)) + pagefault_disable(); + if (!__get_user_inatomic(*ret, ptr)) { + pagefault_enable(); return 0; + } + pagefault_enable(); return read_user_stack_slow(ptr, ret, 8); } @@ -166,8 +170,12 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) ((unsigned long)ptr & 3)) return -EFAULT; - if (!__get_user_inatomic(*ret, ptr)) + pagefault_disable(); + if (!__get_user_inatomic(*ret, ptr)) { + pagefault_enable(); return 0; + } + pagefault_enable(); return read_user_stack_slow(ptr, ret, 4); } @@ -294,11 +302,17 @@ static inline int current_is_64bit(void) */ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) { + int rc; + if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || ((unsigned long)ptr & 3)) return -EFAULT; - return __get_user_inatomic(*ret, ptr); + pagefault_disable(); + rc = __get_user_inatomic(*ret, ptr); + pagefault_enable(); + + return rc; } static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, -- 2.20.1