kprobes: Fix a double lock bug of kprobe_mutex
authorMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Thu, 18 Apr 2013 09:33:18 +0000 (18:33 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 18 Apr 2013 15:58:38 +0000 (08:58 -0700)
Fix a double locking bug caused when debug.kprobe-optimization=0.
While the proc_kprobes_optimization_handler locks kprobe_mutex,
wait_for_kprobe_optimizer locks it again and that causes a double lock.
To fix the bug, this introduces different mutex for protecting
sysctl parameter and locks it in proc_kprobes_optimization_handler.
Of course, since we need to lock kprobe_mutex when touching kprobes
resources, that is done in *optimize_all_kprobes().

This bug was introduced by commit ad72b3bea744 ("kprobes: fix
wait_for_kprobe_optimizer()")

Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/kprobes.c

index e35be53f6613c4fd08c093b6865cd6f43e0b9a89..3fed7f0cbcdfe3d2149dd903e6912628ba58da95 100644 (file)
@@ -794,16 +794,16 @@ out:
 }
 
 #ifdef CONFIG_SYSCTL
-/* This should be called with kprobe_mutex locked */
 static void __kprobes optimize_all_kprobes(void)
 {
        struct hlist_head *head;
        struct kprobe *p;
        unsigned int i;
 
+       mutex_lock(&kprobe_mutex);
        /* If optimization is already allowed, just return */
        if (kprobes_allow_optimization)
-               return;
+               goto out;
 
        kprobes_allow_optimization = true;
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@@ -813,18 +813,22 @@ static void __kprobes optimize_all_kprobes(void)
                                optimize_kprobe(p);
        }
        printk(KERN_INFO "Kprobes globally optimized\n");
+out:
+       mutex_unlock(&kprobe_mutex);
 }
 
-/* This should be called with kprobe_mutex locked */
 static void __kprobes unoptimize_all_kprobes(void)
 {
        struct hlist_head *head;
        struct kprobe *p;
        unsigned int i;
 
+       mutex_lock(&kprobe_mutex);
        /* If optimization is already prohibited, just return */
-       if (!kprobes_allow_optimization)
+       if (!kprobes_allow_optimization) {
+               mutex_unlock(&kprobe_mutex);
                return;
+       }
 
        kprobes_allow_optimization = false;
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@@ -834,11 +838,14 @@ static void __kprobes unoptimize_all_kprobes(void)
                                unoptimize_kprobe(p, false);
                }
        }
+       mutex_unlock(&kprobe_mutex);
+
        /* Wait for unoptimizing completion */
        wait_for_kprobe_optimizer();
        printk(KERN_INFO "Kprobes globally unoptimized\n");
 }
 
+static DEFINE_MUTEX(kprobe_sysctl_mutex);
 int sysctl_kprobes_optimization;
 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
                                      void __user *buffer, size_t *length,
@@ -846,7 +853,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
 {
        int ret;
 
-       mutex_lock(&kprobe_mutex);
+       mutex_lock(&kprobe_sysctl_mutex);
        sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
 
@@ -854,7 +861,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
                optimize_all_kprobes();
        else
                unoptimize_all_kprobes();
-       mutex_unlock(&kprobe_mutex);
+       mutex_unlock(&kprobe_sysctl_mutex);
 
        return ret;
 }