From 0ce90475dcdbe90affc218e9688c8401e468e84d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Apr 2012 00:30:36 +0200 Subject: [PATCH] sched/fair: Add some serialization to the sched_domain load-balance walk Since the sched_domain walk is completely unserialized (!SD_SERIALIZE) it is possible that multiple cpus in the group get elected to do the next level. Avoid this by adding some serialization. Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-vqh9ai6s0ewmeakjz80w4qz6@git.kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 + kernel/sched/core.c | 2 ++ kernel/sched/fair.c | 9 +++++++-- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 4a559bf0622f..3cbfb55bde25 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -927,6 +927,7 @@ struct sched_group_power { struct sched_group { struct sched_group *next; /* Must be a circular list */ atomic_t ref; + int balance_cpu; unsigned int group_weight; struct sched_group_power *sgp; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0533a688ce22..6001e5c3b4e4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6060,6 +6060,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); atomic_inc(&sg->sgp->ref); + sg->balance_cpu = -1; if (cpumask_test_cpu(cpu, sg_span)) groups = sg; @@ -6135,6 +6136,7 @@ build_sched_groups(struct sched_domain *sd, int cpu) cpumask_clear(sched_group_cpus(sg)); sg->sgp->power = 0; + sg->balance_cpu = -1; for_each_cpu(j, span) { if (get_group(j, sdd, NULL) != group) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 968ffee24721..cf86f74bcac2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3828,7 +3828,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, */ if (local_group) { if (idle != CPU_NEWLY_IDLE) { - if (balance_cpu != this_cpu) { + if (balance_cpu != this_cpu || + cmpxchg(&group->balance_cpu, -1, balance_cpu) != -1) { *balance = 0; return; } @@ -4929,7 +4930,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) int balance = 1; struct rq *rq = cpu_rq(cpu); unsigned long interval; - struct sched_domain *sd; + struct sched_domain *sd, *last = NULL; /* Earliest time when we have to do rebalance again */ unsigned long next_balance = jiffies + 60*HZ; int update_next_balance = 0; @@ -4939,6 +4940,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) rcu_read_lock(); for_each_domain(cpu, sd) { + last = sd; if (!(sd->flags & SD_LOAD_BALANCE)) continue; @@ -4983,6 +4985,9 @@ out: if (!balance) break; } + for (sd = last; sd; sd = sd->child) + (void)cmpxchg(&sd->groups->balance_cpu, cpu, -1); + rcu_read_unlock(); /* -- 2.20.1