From: Joonsoo Kim Date: Tue, 23 Apr 2013 08:27:42 +0000 (+0900) Subject: sched: Prevent to re-select dst-cpu in load_balance() X-Git-Tag: MMI-PSA29.97-13-9~14514^2~5 X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=e02e60c109ca70935bad1131976bdbf5160cf576;p=GitHub%2FMotorolaMobilityLLC%2Fkernel-slsi.git sched: Prevent to re-select dst-cpu in load_balance() Commit 88b8dac0 makes load_balance() consider other cpus in its group. But, in that, there is no code for preventing to re-select dst-cpu. So, same dst-cpu can be selected over and over. This patch add functionality to load_balance() in order to exclude cpu which is selected once. We prevent to re-select dst_cpu via env's cpus, so now, env's cpus is a candidate not only for src_cpus, but also dst_cpus. With this patch, we can remove lb_iterations and max_lb_iterations, because we decide whether we can go ahead or not via env's cpus. Signed-off-by: Joonsoo Kim Acked-by: Peter Zijlstra Tested-by: Jason Low Cc: Srivatsa Vaddagiri Cc: Davidlohr Bueso Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1366705662-3587-7-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Ingo Molnar --- diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5b1e96687b49..acaf567a03d2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3905,7 +3905,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) return 0; if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { - int new_dst_cpu; + int cpu; schedstat_inc(p, se.statistics.nr_failed_migrations_affine); @@ -3920,12 +3920,15 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED)) return 0; - new_dst_cpu = cpumask_first_and(env->dst_grpmask, - tsk_cpus_allowed(p)); - if (new_dst_cpu < nr_cpu_ids) { - env->flags |= LBF_SOME_PINNED; - env->new_dst_cpu = new_dst_cpu; + /* Prevent to re-select dst_cpu via env's cpus */ + for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { + if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) { + env->flags |= LBF_SOME_PINNED; + env->new_dst_cpu = cpu; + break; + } } + return 0; } @@ -5008,7 +5011,6 @@ static int load_balance(int this_cpu, struct rq *this_rq, int *balance) { int ld_moved, cur_ld_moved, active_balance = 0; - int lb_iterations, max_lb_iterations; struct sched_group *group; struct rq *busiest; unsigned long flags; @@ -5028,15 +5030,8 @@ static int load_balance(int this_cpu, struct rq *this_rq, * For NEWLY_IDLE load_balancing, we don't need to consider * other cpus in our group */ - if (idle == CPU_NEWLY_IDLE) { + if (idle == CPU_NEWLY_IDLE) env.dst_grpmask = NULL; - /* - * we don't care max_lb_iterations in this case, - * in following patch, this will be removed - */ - max_lb_iterations = 0; - } else - max_lb_iterations = cpumask_weight(env.dst_grpmask); cpumask_copy(cpus, cpu_active_mask); @@ -5064,7 +5059,6 @@ redo: schedstat_add(sd, lb_imbalance[idle], env.imbalance); ld_moved = 0; - lb_iterations = 1; if (busiest->nr_running > 1) { /* * Attempt to move tasks. If find_busiest_group has found @@ -5121,14 +5115,17 @@ more_balance: * moreover subsequent load balance cycles should correct the * excess load moved. */ - if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0 && - lb_iterations++ < max_lb_iterations) { + if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) { env.dst_rq = cpu_rq(env.new_dst_cpu); env.dst_cpu = env.new_dst_cpu; env.flags &= ~LBF_SOME_PINNED; env.loop = 0; env.loop_break = sched_nr_migrate_break; + + /* Prevent to re-select dst_cpu via env's cpus */ + cpumask_clear_cpu(env.dst_cpu, env.cpus); + /* * Go back to "more_balance" rather than "redo" since we * need to continue with same src_cpu.