From: Oleg Nesterov Date: Sun, 6 Apr 2014 15:16:10 +0000 (+0200) Subject: uprobes/x86: Introduce sizeof_long(), cleanup adjust_ret_addr() and arch_uretprobe_hi... X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=8faaed1b9f500d6cf32702716733a645c9b0727a;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git uprobes/x86: Introduce sizeof_long(), cleanup adjust_ret_addr() and arch_uretprobe_hijack_return_addr() 1. Add the trivial sizeof_long() helper and change other callers of is_ia32_task() to use it. TODO: is_ia32_task() is not what we actually want, TS_COMPAT does not necessarily mean 32bit. Fortunately syscall-like insns can't be probed so it actually works, but it would be better to rename and use is_ia32_frame(). 2. As Jim pointed out "ncopied" in arch_uretprobe_hijack_return_addr() and adjust_ret_addr() should be named "nleft". And in fact only the last copy_to_user() in arch_uretprobe_hijack_return_addr() actually needs to inspect the non-zero error code. TODO: adjust_ret_addr() should die. We can always calculate the value we need to write into *regs->sp, just UPROBE_FIX_CALL should record insn->length. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston --- diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index cdd6909affcb..aecc22054384 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -408,6 +408,11 @@ struct uprobe_xol_ops { int (*post_xol)(struct arch_uprobe *, struct pt_regs *); }; +static inline int sizeof_long(void) +{ + return is_ia32_task() ? 4 : 8; +} + static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { pre_xol_rip_insn(auprobe, regs, ¤t->utask->autask); @@ -419,21 +424,14 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) */ static int adjust_ret_addr(unsigned long sp, long correction) { - int rasize, ncopied; - long ra = 0; - - if (is_ia32_task()) - rasize = 4; - else - rasize = 8; + int rasize = sizeof_long(); + long ra; - ncopied = copy_from_user(&ra, (void __user *)sp, rasize); - if (unlikely(ncopied)) + if (copy_from_user(&ra, (void __user *)sp, rasize)) return -EFAULT; ra += correction; - ncopied = copy_to_user((void __user *)sp, &ra, rasize); - if (unlikely(ncopied)) + if (copy_to_user((void __user *)sp, &ra, rasize)) return -EFAULT; return 0; @@ -450,10 +448,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs if (auprobe->fixups & UPROBE_FIX_CALL) { if (adjust_ret_addr(regs->sp, correction)) { - if (is_ia32_task()) - regs->sp += 4; - else - regs->sp += 8; + regs->sp += sizeof_long(); return -ERESTART; } } @@ -714,23 +709,21 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) { - int rasize, ncopied; + int rasize = sizeof_long(), nleft; unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ - rasize = is_ia32_task() ? 4 : 8; - ncopied = copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize); - if (unlikely(ncopied)) + if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) return -1; /* check whether address has been already hijacked */ if (orig_ret_vaddr == trampoline_vaddr) return orig_ret_vaddr; - ncopied = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize); - if (likely(!ncopied)) + nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize); + if (likely(!nleft)) return orig_ret_vaddr; - if (ncopied != rasize) { + if (nleft != rasize) { pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, " "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);