From 991a51d83a3d9bebfafdd1e692cf310899d60791 Mon Sep 17 00:00:00 2001 From: Ananth N Mavinakayanahalli Date: Mon, 7 Nov 2005 01:00:14 -0800 Subject: [PATCH] [PATCH] Kprobes: Use RCU for (un)register synchronization - arch changes Changes to the arch kprobes infrastructure to take advantage of the locking changes introduced by usage of RCU for synchronization. All handlers are now run without any locks held, so they have to be re-entrant or provide their own synchronization. Signed-off-by: Ananth N Mavinakayanahalli Signed-off-by: Anil S Keshavamurthy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/kprobes.c | 22 +++++++--------------- arch/ia64/kernel/kprobes.c | 16 ++++++---------- arch/ppc64/kernel/kprobes.c | 24 ++++++------------------ arch/sparc64/kernel/kprobes.c | 14 ++------------ arch/x86_64/kernel/kprobes.c | 25 ++++++------------------- 5 files changed, 27 insertions(+), 74 deletions(-) diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 99565a66915d..ad469299267a 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -123,6 +122,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) regs->eip = (unsigned long)&p->ainsn.insn; } +/* Called with kretprobe_lock held */ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) { @@ -168,15 +168,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) } /* Check we're not actually recursing */ if (kprobe_running()) { - /* We *are* holding lock here, so this is safe. - Disarm the probe we just hit, and ignore it. */ p = get_kprobe(addr); if (p) { if (kcb->kprobe_status == KPROBE_HIT_SS && *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { regs->eflags &= ~TF_MASK; regs->eflags |= kcb->kprobe_saved_eflags; - unlock_kprobes(); goto no_kprobe; } /* We have reentered the kprobe_handler(), since @@ -197,14 +194,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) goto ss_probe; } } - /* If it's not ours, can't be delete race, (we hold lock). */ goto no_kprobe; } - lock_kprobes(); p = get_kprobe(addr); if (!p) { - unlock_kprobes(); if (regs->eflags & VM_MASK) { /* We are in virtual-8086 mode. Return 0 */ goto no_kprobe; @@ -268,9 +262,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) struct kretprobe_instance *ri = NULL; struct hlist_head *head; struct hlist_node *node, *tmp; - unsigned long orig_ret_address = 0; + unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; + spin_lock_irqsave(&kretprobe_lock, flags); head = kretprobe_inst_table_head(current); /* @@ -310,7 +305,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->eip = orig_ret_address; reset_current_kprobe(); - unlock_kprobes(); + spin_unlock_irqrestore(&kretprobe_lock, flags); preempt_enable_no_resched(); /* @@ -395,7 +390,7 @@ static void __kprobes resume_execution(struct kprobe *p, /* * Interrupts are disabled on entry as trap1 is an interrupt gate and they - * remain disabled thoroughout this function. And we hold kprobe lock. + * remain disabled thoroughout this function. */ static inline int post_kprobe_handler(struct pt_regs *regs) { @@ -419,7 +414,6 @@ static inline int post_kprobe_handler(struct pt_regs *regs) goto out; } reset_current_kprobe(); - unlock_kprobes(); out: preempt_enable_no_resched(); @@ -434,7 +428,6 @@ out: return 1; } -/* Interrupts disabled, kprobe_lock held. */ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); @@ -448,7 +441,6 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) regs->eflags |= kcb->kprobe_old_eflags; reset_current_kprobe(); - unlock_kprobes(); preempt_enable_no_resched(); } return 0; @@ -463,7 +455,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; - preempt_disable(); + rcu_read_lock(); switch (val) { case DIE_INT3: if (kprobe_handler(args->regs)) @@ -482,7 +474,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, default: break; } - preempt_enable(); + rcu_read_unlock(); return ret; } diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 17e70b1b8d79..fddbac32d44a 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -343,10 +342,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) struct kretprobe_instance *ri = NULL; struct hlist_head *head; struct hlist_node *node, *tmp; - unsigned long orig_ret_address = 0; + unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = ((struct fnptr *)kretprobe_trampoline)->ip; + spin_lock_irqsave(&kretprobe_lock, flags); head = kretprobe_inst_table_head(current); /* @@ -386,7 +386,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->cr_iip = orig_ret_address; reset_current_kprobe(); - unlock_kprobes(); + spin_unlock_irqrestore(&kretprobe_lock, flags); preempt_enable_no_resched(); /* @@ -397,6 +397,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) return 1; } +/* Called with kretprobe_lock held */ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) { @@ -612,7 +613,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) if ((kcb->kprobe_status == KPROBE_HIT_SS) && (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { ia64_psr(regs)->ss = 0; - unlock_kprobes(); goto no_kprobe; } /* We have reentered the pre_kprobe_handler(), since @@ -641,10 +641,8 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) } } - lock_kprobes(); p = get_kprobe(addr); if (!p) { - unlock_kprobes(); if (!is_ia64_break_inst(regs)) { /* * The breakpoint instruction was removed right @@ -707,7 +705,6 @@ static int __kprobes post_kprobes_handler(struct pt_regs *regs) goto out; } reset_current_kprobe(); - unlock_kprobes(); out: preempt_enable_no_resched(); @@ -728,7 +725,6 @@ static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) if (kcb->kprobe_status & KPROBE_HIT_SS) { resume_execution(cur, regs); reset_current_kprobe(); - unlock_kprobes(); preempt_enable_no_resched(); } @@ -741,7 +737,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; - preempt_disable(); + rcu_read_lock(); switch(val) { case DIE_BREAK: if (pre_kprobes_handler(args)) @@ -757,7 +753,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, default: break; } - preempt_enable(); + rcu_read_unlock(); return ret; } diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c index 3f89f3e5584a..e0a25b35437f 100644 --- a/arch/ppc64/kernel/kprobes.c +++ b/arch/ppc64/kernel/kprobes.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -125,6 +124,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, kcb->kprobe_saved_msr = regs->msr; } +/* Called with kretprobe_lock held */ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) { @@ -152,8 +152,6 @@ static inline int kprobe_handler(struct pt_regs *regs) /* Check we're not actually recursing */ if (kprobe_running()) { - /* We *are* holding lock here, so this is safe. - Disarm the probe we just hit, and ignore it. */ p = get_kprobe(addr); if (p) { kprobe_opcode_t insn = *p->ainsn.insn; @@ -161,7 +159,6 @@ static inline int kprobe_handler(struct pt_regs *regs) is_trap(insn)) { regs->msr &= ~MSR_SE; regs->msr |= kcb->kprobe_saved_msr; - unlock_kprobes(); goto no_kprobe; } /* We have reentered the kprobe_handler(), since @@ -183,14 +180,11 @@ static inline int kprobe_handler(struct pt_regs *regs) goto ss_probe; } } - /* If it's not ours, can't be delete race, (we hold lock). */ goto no_kprobe; } - lock_kprobes(); p = get_kprobe(addr); if (!p) { - unlock_kprobes(); if (*addr != BREAKPOINT_INSTRUCTION) { /* * PowerPC has multiple variants of the "trap" @@ -254,9 +248,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) struct kretprobe_instance *ri = NULL; struct hlist_head *head; struct hlist_node *node, *tmp; - unsigned long orig_ret_address = 0; + unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; + spin_lock_irqsave(&kretprobe_lock, flags); head = kretprobe_inst_table_head(current); /* @@ -296,7 +291,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->nip = orig_ret_address; reset_current_kprobe(); - unlock_kprobes(); + spin_unlock_irqrestore(&kretprobe_lock, flags); preempt_enable_no_resched(); /* @@ -348,7 +343,6 @@ static inline int post_kprobe_handler(struct pt_regs *regs) goto out; } reset_current_kprobe(); - unlock_kprobes(); out: preempt_enable_no_resched(); @@ -363,7 +357,6 @@ out: return 1; } -/* Interrupts disabled, kprobe_lock held. */ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); @@ -378,7 +371,6 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) regs->msr |= kcb->kprobe_saved_msr; reset_current_kprobe(); - unlock_kprobes(); preempt_enable_no_resched(); } return 0; @@ -393,11 +385,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; - /* - * Interrupts are not disabled here. We need to disable - * preemption, because kprobe_running() uses smp_processor_id(). - */ - preempt_disable(); + rcu_read_lock(); switch (val) { case DIE_BPT: if (kprobe_handler(args->regs)) @@ -415,7 +403,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, default: break; } - preempt_enable_no_resched(); + rcu_read_unlock(); return ret; } diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c index b95984154dba..58a815e90373 100644 --- a/arch/sparc64/kernel/kprobes.c +++ b/arch/sparc64/kernel/kprobes.c @@ -116,15 +116,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (kprobe_running()) { - /* We *are* holding lock here, so this is safe. - * Disarm the probe we just hit, and ignore it. - */ p = get_kprobe(addr); if (p) { if (kcb->kprobe_status == KPROBE_HIT_SS) { regs->tstate = ((regs->tstate & ~TSTATE_PIL) | kcb->kprobe_orig_tstate_pil); - unlock_kprobes(); goto no_kprobe; } /* We have reentered the kprobe_handler(), since @@ -144,14 +140,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) if (p->break_handler && p->break_handler(p, regs)) goto ss_probe; } - /* If it's not ours, can't be delete race, (we hold lock). */ goto no_kprobe; } - lock_kprobes(); p = get_kprobe(addr); if (!p) { - unlock_kprobes(); if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { /* * The breakpoint instruction was removed right @@ -296,14 +289,12 @@ static inline int post_kprobe_handler(struct pt_regs *regs) goto out; } reset_current_kprobe(); - unlock_kprobes(); out: preempt_enable_no_resched(); return 1; } -/* Interrupts disabled, kprobe_lock held. */ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); @@ -316,7 +307,6 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) resume_execution(cur, regs, kcb); reset_current_kprobe(); - unlock_kprobes(); preempt_enable_no_resched(); } return 0; @@ -331,7 +321,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; - preempt_disable(); + rcu_read_lock(); switch (val) { case DIE_DEBUG: if (kprobe_handler(args->regs)) @@ -350,7 +340,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, default: break; } - preempt_enable(); + rcu_read_unlock(); return ret; } diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 6cb40d133b7c..9bef2c8dc12c 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -266,6 +265,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) regs->rip = (unsigned long)p->ainsn.insn; } +/* Called with kretprobe_lock held */ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) { @@ -299,15 +299,12 @@ int __kprobes kprobe_handler(struct pt_regs *regs) /* Check we're not actually recursing */ if (kprobe_running()) { - /* We *are* holding lock here, so this is safe. - Disarm the probe we just hit, and ignore it. */ p = get_kprobe(addr); if (p) { if (kcb->kprobe_status == KPROBE_HIT_SS && *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { regs->eflags &= ~TF_MASK; regs->eflags |= kcb->kprobe_saved_rflags; - unlock_kprobes(); goto no_kprobe; } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) { /* TODO: Provide re-entrancy from @@ -340,14 +337,11 @@ int __kprobes kprobe_handler(struct pt_regs *regs) goto ss_probe; } } - /* If it's not ours, can't be delete race, (we hold lock). */ goto no_kprobe; } - lock_kprobes(); p = get_kprobe(addr); if (!p) { - unlock_kprobes(); if (*addr != BREAKPOINT_INSTRUCTION) { /* * The breakpoint instruction was removed right @@ -406,9 +400,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) struct kretprobe_instance *ri = NULL; struct hlist_head *head; struct hlist_node *node, *tmp; - unsigned long orig_ret_address = 0; + unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; + spin_lock_irqsave(&kretprobe_lock, flags); head = kretprobe_inst_table_head(current); /* @@ -448,7 +443,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->rip = orig_ret_address; reset_current_kprobe(); - unlock_kprobes(); + spin_unlock_irqrestore(&kretprobe_lock, flags); preempt_enable_no_resched(); /* @@ -536,10 +531,6 @@ static void __kprobes resume_execution(struct kprobe *p, } } -/* - * Interrupts are disabled on entry as trap1 is an interrupt gate and they - * remain disabled thoroughout this function. And we hold kprobe lock. - */ int __kprobes post_kprobe_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); @@ -560,8 +551,6 @@ int __kprobes post_kprobe_handler(struct pt_regs *regs) if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); goto out; - } else { - unlock_kprobes(); } reset_current_kprobe(); out: @@ -578,7 +567,6 @@ out: return 1; } -/* Interrupts disabled, kprobe_lock held. */ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); @@ -592,7 +580,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) regs->eflags |= kcb->kprobe_old_rflags; reset_current_kprobe(); - unlock_kprobes(); preempt_enable_no_resched(); } return 0; @@ -607,7 +594,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; - preempt_disable(); + rcu_read_lock(); switch (val) { case DIE_INT3: if (kprobe_handler(args->regs)) @@ -626,7 +613,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, default: break; } - preempt_enable(); + rcu_read_unlock(); return ret; } -- 2.20.1