kprobes, x86: Disable irqs during optimized callback
authorJiri Olsa <jolsa@redhat.com>
Wed, 11 May 2011 11:06:13 +0000 (13:06 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 11 May 2011 11:21:23 +0000 (13:21 +0200)
Disable irqs during optimized callback, so we dont miss any in-irq kprobes.

The following commands:

 # cd /debug/tracing/
 # echo "p mutex_unlock" >> kprobe_events
 # echo "p _raw_spin_lock" >> kprobe_events
 # echo "p smp_apic_timer_interrupt" >> ./kprobe_events
 # echo 1 > events/enable

Cause the optimized kprobes to be missed. None is missed
with the fix applied.

Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Link: http://lkml.kernel.org/r/20110511110613.GB2390@jolsa.brq.redhat.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/kprobes.c

index c969fd9d156651c9d0cc6dae1a606bb091cbcde6..f1a6244d7d93769c35d219b79de6358d8a4096e3 100644 (file)
@@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
                                         struct pt_regs *regs)
 {
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       unsigned long flags;
 
        /* This is possible if op is under delayed unoptimizing */
        if (kprobe_disabled(&op->kp))
                return;
 
-       preempt_disable();
+       local_irq_save(flags);
        if (kprobe_running()) {
                kprobes_inc_nmissed_count(&op->kp);
        } else {
@@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
                opt_pre_handler(&op->kp, regs);
                __this_cpu_write(current_kprobe, NULL);
        }
-       preempt_enable_no_resched();
+       local_irq_restore(flags);
 }
 
 static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)