sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
atomic_inc(&sg->sgp->ref);
- sg->balance_cpu = -1;
if (cpumask_test_cpu(cpu, sg_span))
groups = sg;
cpumask_clear(sched_group_cpus(sg));
sg->sgp->power = 0;
- sg->balance_cpu = -1;
for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group)
int *balance, struct sg_lb_stats *sgs)
{
unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
- unsigned int balance_cpu = -1;
- unsigned long balance_load = ~0UL;
+ unsigned int balance_cpu = -1, first_idle_cpu = 0;
unsigned long avg_load_per_task = 0;
int i;
/* Bias balancing toward cpus of our domain */
if (local_group) {
- load = target_load(i, load_idx);
- if (load < balance_load || idle_cpu(i)) {
- balance_load = load;
+ if (idle_cpu(i) && !first_idle_cpu) {
+ first_idle_cpu = 1;
balance_cpu = i;
}
+
+ load = target_load(i, load_idx);
} else {
load = source_load(i, load_idx);
if (load > max_cpu_load) {
*/
if (local_group) {
if (env->idle != CPU_NEWLY_IDLE) {
- if (balance_cpu != env->dst_cpu ||
- cmpxchg(&group->balance_cpu, -1, balance_cpu) != -1) {
+ if (balance_cpu != env->dst_cpu) {
*balance = 0;
return;
}
int balance = 1;
struct rq *rq = cpu_rq(cpu);
unsigned long interval;
- struct sched_domain *sd, *last = NULL;
+ struct sched_domain *sd;
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
rcu_read_lock();
for_each_domain(cpu, sd) {
- last = sd;
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
if (!balance)
break;
}
- for (sd = last; sd; sd = sd->child)
- (void)cmpxchg(&sd->groups->balance_cpu, cpu, -1);
-
rcu_read_unlock();
/*