\r)
From: Nick Piggin <nickpiggin@yahoo.com.au>
Track the last waker CPU, and only consider wakeup-balancing if there's a
match between current waker CPU and the previous waker CPU. This ensures
that there is some correlation between two subsequent wakeup events before
we move the task. Should help random-wakeup workloads on large SMP
systems, by reducing the migration attempts by a factor of nr_cpus.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
int lock_depth; /* BKL lock depth */
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
+ int last_waker_cpu; /* CPU that last woke this task up */
+#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
int oncpu;
+#endif
#endif
int prio, static_prio;
struct list_head run_list;
}
}
+ if (p->last_waker_cpu != this_cpu)
+ goto out_set_cpu;
+
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
goto out_set_cpu;
cpu = task_cpu(p);
}
+ p->last_waker_cpu = this_cpu;
+
out_activate:
#endif /* CONFIG_SMP */
if (old_state == TASK_UNINTERRUPTIBLE) {
#ifdef CONFIG_SCHEDSTATS
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
+ p->last_waker_cpu = cpu;
+#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
p->oncpu = 0;
#endif
+#endif
#ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;