From: Keshavamurthy Anil S Date: Tue, 10 Jan 2006 04:52:44 +0000 (-0800) Subject: [PATCH] kprobes-changed-from-using-spinlock-to-mutex fix X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=f709b122343fb9a010b6cf2d5559641f1820f7c9;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git [PATCH] kprobes-changed-from-using-spinlock-to-mutex fix Based on some feedback from Oleg Nesterov, I have made few changes to previously posted patch. Signed-off-by: Anil S Keshavamurthy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 331e169e8629..2cd32dd6898b 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -35,7 +35,6 @@ #include #include -static DECLARE_MUTEX(kprobe_mutex); DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -54,9 +53,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) /* insn must be on a special executable page on ppc64 */ if (!ret) { - down(&kprobe_mutex); p->ainsn.insn = get_insn_slot(); - up(&kprobe_mutex); if (!p->ainsn.insn) ret = -ENOMEM; } diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 8b8943bfb89e..128e18190f99 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c @@ -43,7 +43,7 @@ #include void jprobe_return_end(void); -void __kprobes arch_copy_kprobe(struct kprobe *p); +static void __kprobes arch_copy_kprobe(struct kprobe *p); DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -180,7 +180,7 @@ static inline s32 *is_riprel(u8 *insn) return NULL; } -void __kprobes arch_copy_kprobe(struct kprobe *p) +static void __kprobes arch_copy_kprobe(struct kprobe *p) { s32 *ripdisp; memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index f14ccd35e9b6..f1c0e61a2cb4 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -431,7 +431,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, copy_kprobe(old_p, p); ret = add_new_kprobe(old_p, p); } else { - ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC); + ap = kcalloc(1, sizeof(struct kprobe), GFP_KERNEL); if (!ap) return -ENOMEM; add_aggr_kprobe(ap, old_p); @@ -491,7 +491,8 @@ out: void __kprobes unregister_kprobe(struct kprobe *p) { struct module *mod; - struct kprobe *old_p, *cleanup_p; + struct kprobe *old_p, *list_p; + int cleanup_p; down(&kprobe_mutex); old_p = get_kprobe(p->addr); @@ -499,22 +500,25 @@ void __kprobes unregister_kprobe(struct kprobe *p) up(&kprobe_mutex); return; } - - if ((old_p->pre_handler == aggr_pre_handler) && + if (p != old_p) { + list_for_each_entry_rcu(list_p, &old_p->list, list) + if (list_p == p) + /* kprobe p is a valid probe */ + goto valid_p; + up(&kprobe_mutex); + return; + } +valid_p: + if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && (p->list.next == &old_p->list) && - (p->list.prev == &old_p->list)) { - /* Only one element in the aggregate list */ + (p->list.prev == &old_p->list))) { + /* Only probe on the hash list */ arch_disarm_kprobe(p); hlist_del_rcu(&old_p->hlist); - cleanup_p = old_p; - } else if (old_p == p) { - /* Only one kprobe element in the hash list */ - arch_disarm_kprobe(p); - hlist_del_rcu(&p->hlist); - cleanup_p = p; + cleanup_p = 1; } else { list_del_rcu(&p->list); - cleanup_p = NULL; + cleanup_p = 0; } up(&kprobe_mutex); @@ -524,7 +528,7 @@ void __kprobes unregister_kprobe(struct kprobe *p) module_put(mod); if (cleanup_p) { - if (cleanup_p->pre_handler == aggr_pre_handler) { + if (p != old_p) { list_del_rcu(&p->list); kfree(old_p); }