From: Michael Ellerman Date: Mon, 24 Apr 2017 14:24:04 +0000 (+1000) Subject: Merge branch 'topic/kprobes' into next X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=9fc849144c80091252551a4897782ed5321d654a;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git Merge branch 'topic/kprobes' into next Although most of these kprobes patches are powerpc specific, there's a couple that touch generic code (with Acks). At the moment there's one conflict with acme's tree, but it's not too bad. Still just in case some other conflicts show up, we've put these in a topic branch so another tree could merge some or all of it if necessary. --- 9fc849144c80091252551a4897782ed5321d654a diff --cc arch/powerpc/kernel/kprobes.c index fa3cfd90c83a,ca040e1be892..160ae0fa7d0d --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@@ -43,15 -42,78 +43,86 @@@ DEFINE_PER_CPU(struct kprobe_ctlblk, kp struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; +bool arch_within_kprobe_blacklist(unsigned long addr) +{ + return (addr >= (unsigned long)__kprobes_text_start && + addr < (unsigned long)__kprobes_text_end) || + (addr >= (unsigned long)_stext && + addr < (unsigned long)__head_end); +} + + kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) + { + kprobe_opcode_t *addr; + + #ifdef PPC64_ELF_ABI_v2 + /* PPC64 ABIv2 needs local entry point */ + addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); + if (addr && !offset) { + #ifdef CONFIG_KPROBES_ON_FTRACE + unsigned long faddr; + /* + * Per livepatch.h, ftrace location is always within the first + * 16 bytes of a function on powerpc with -mprofile-kernel. + */ + faddr = ftrace_location_range((unsigned long)addr, + (unsigned long)addr + 16); + if (faddr) + addr = (kprobe_opcode_t *)faddr; + else + #endif + addr = (kprobe_opcode_t *)ppc_function_entry(addr); + } + #elif defined(PPC64_ELF_ABI_v1) + /* + * 64bit powerpc ABIv1 uses function descriptors: + * - Check for the dot variant of the symbol first. + * - If that fails, try looking up the symbol provided. + * + * This ensures we always get to the actual symbol and not + * the descriptor. + * + * Also handle format. + */ + char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN]; + const char *modsym; + bool dot_appended = false; + if ((modsym = strchr(name, ':')) != NULL) { + modsym++; + if (*modsym != '\0' && *modsym != '.') { + /* Convert to */ + strncpy(dot_name, name, modsym - name); + dot_name[modsym - name] = '.'; + dot_name[modsym - name + 1] = '\0'; + strncat(dot_name, modsym, + sizeof(dot_name) - (modsym - name) - 2); + dot_appended = true; + } else { + dot_name[0] = '\0'; + strncat(dot_name, name, sizeof(dot_name) - 1); + } + } else if (name[0] != '.') { + dot_name[0] = '.'; + dot_name[1] = '\0'; + strncat(dot_name, name, KSYM_NAME_LEN - 2); + dot_appended = true; + } else { + dot_name[0] = '\0'; + strncat(dot_name, name, KSYM_NAME_LEN - 1); + } + addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); + if (!addr && dot_appended) { + /* Let's try the original non-dot symbol lookup */ + addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); + } + #else + addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); + #endif + + return addr; + } + -int __kprobes arch_prepare_kprobe(struct kprobe *p) +int arch_prepare_kprobe(struct kprobe *p) { int ret = 0; kprobe_opcode_t insn = *p->addr; @@@ -144,16 -202,58 +215,59 @@@ static nokprobe_inline void set_current kcb->kprobe_saved_msr = regs->msr; } + bool arch_function_offset_within_entry(unsigned long offset) + { + #ifdef PPC64_ELF_ABI_v2 + #ifdef CONFIG_KPROBES_ON_FTRACE + return offset <= 16; + #else + return offset <= 8; + #endif + #else + return !offset; + #endif + } + -void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, - struct pt_regs *regs) +void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->link; /* Replace the return addr with trampoline addr */ regs->link = (unsigned long)kretprobe_trampoline; } +NOKPROBE_SYMBOL(arch_prepare_kretprobe); -int __kprobes try_to_emulate(struct kprobe *p, struct pt_regs *regs) ++int try_to_emulate(struct kprobe *p, struct pt_regs *regs) + { + int ret; + unsigned int insn = *p->ainsn.insn; + + /* regs->nip is also adjusted if emulate_step returns 1 */ + ret = emulate_step(regs, insn); + if (ret > 0) { + /* + * Once this instruction has been boosted + * successfully, set the boostable flag + */ + if (unlikely(p->ainsn.boostable == 0)) + p->ainsn.boostable = 1; + } else if (ret < 0) { + /* + * We don't allow kprobes on mtmsr(d)/rfi(d), etc. + * So, we should never get here... but, its still + * good to catch them, just in case... + */ + printk("Can't step on instruction %x\n", insn); + BUG(); + } else if (ret == 0) + /* This instruction can't be boosted */ + p->ainsn.boostable = -1; + + return ret; + } ++NOKPROBE_SYMBOL(try_to_emulate); + -int __kprobes kprobe_handler(struct pt_regs *regs) +int kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0;