workqueue: fix race condition in schedule_on_each_cpu()
authorTejun Heo <tj@kernel.org>
Tue, 17 Nov 2009 22:06:20 +0000 (14:06 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 18 Nov 2009 01:40:33 +0000 (17:40 -0800)
Commit 65a64464349883891e21e74af16c05d6e1eeb4e9 ("HWPOISON: Allow
schedule_on_each_cpu() from keventd") which allows schedule_on_each_cpu()
to be called from keventd added a race condition.  schedule_on_each_cpu()
may race with cpu hotplug and end up executing the function twice on a
cpu.

Fix it by moving direct execution into the section protected with
get/put_online_cpus().  While at it, update code such that direct
execution is done after works have been scheduled for all other cpus and
drop unnecessary cpu != orig test from flush loop.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Andi Kleen <ak@linux.intel.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/workqueue.c

index 12328147132c0e6e560ff2e3de39c2fda2a4f116..67e526b6ae815bbc846fd0424d43399f9fb5a0ba 100644 (file)
@@ -692,31 +692,29 @@ int schedule_on_each_cpu(work_func_t func)
        if (!works)
                return -ENOMEM;
 
+       get_online_cpus();
+
        /*
-        * when running in keventd don't schedule a work item on itself.
-        * Can just call directly because the work queue is already bound.
-        * This also is faster.
-        * Make this a generic parameter for other workqueues?
+        * When running in keventd don't schedule a work item on
+        * itself.  Can just call directly because the work queue is
+        * already bound.  This also is faster.
         */
-       if (current_is_keventd()) {
+       if (current_is_keventd())
                orig = raw_smp_processor_id();
-               INIT_WORK(per_cpu_ptr(works, orig), func);
-               func(per_cpu_ptr(works, orig));
-       }
 
-       get_online_cpus();
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
-               if (cpu == orig)
-                       continue;
                INIT_WORK(work, func);
-               schedule_work_on(cpu, work);
-       }
-       for_each_online_cpu(cpu) {
                if (cpu != orig)
-                       flush_work(per_cpu_ptr(works, cpu));
+                       schedule_work_on(cpu, work);
        }
+       if (orig >= 0)
+               func(per_cpu_ptr(works, orig));
+
+       for_each_online_cpu(cpu)
+               flush_work(per_cpu_ptr(works, cpu));
+
        put_online_cpus();
        free_percpu(works);
        return 0;