walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
}
+static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
+{
+ spin_unlock(&rq->lock);
+ update_shares(sd);
+ spin_lock(&rq->lock);
+}
+
static void update_h_load(int cpu)
{
walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
{
}
+static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
+{
+}
+
#endif
#endif
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
redo:
+ update_shares_locked(this_rq, sd);
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
&sd_idle, cpus, NULL);
if (!group) {
} else
sd->nr_balance_failed = 0;
+ update_shares_locked(this_rq, sd);
return ld_moved;
out_balanced: