sched: for_each_domain() vs RCU
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 10 Sep 2009 16:18:47 +0000 (18:18 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 15 Sep 2009 14:01:06 +0000 (16:01 +0200)
for_each_domain() uses RCU to serialize the sched_domains, except
it doesn't actually use rcu_read_lock() and instead relies on
disabling preemption -> FAIL.

XXX: audit other sched_domain code.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index eaa00014b4990ac242508cfcdbb60dbcdf17e30a..43dc6d1d9e88588bb0fa1ad43f8494e77f6eeab5 100644 (file)
@@ -1331,6 +1331,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
                new_cpu = prev_cpu;
        }
 
+       rcu_read_lock();
        for_each_domain(cpu, tmp) {
                /*
                 * If power savings logic is enabled for a domain, see if we
@@ -1369,8 +1370,10 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
                if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
                    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
 
-                       if (wake_affine(tmp, p, sync))
-                               return cpu;
+                       if (wake_affine(tmp, p, sync)) {
+                               new_cpu = cpu;
+                               goto out;
+                       }
 
                        want_affine = 0;
                }
@@ -1416,6 +1419,8 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
                /* while loop will break here if sd == NULL */
        }
 
+out:
+       rcu_read_unlock();
        return new_cpu;
 }
 #endif /* CONFIG_SMP */