From 7dbd13ed06513b047216a7ffc718bad9df0660f1 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 7 Oct 2013 11:29:29 +0100 Subject: [PATCH] sched/numa: Prevent parallel updates to group stats during placement Having multiple tasks in a group go through task_numa_placement simultaneously can lead to a task picking a wrong node to run on, because the group stats may be in the middle of an update. This patch avoids parallel updates by holding the numa_group lock during placement decisions. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Andrea Arcangeli Cc: Johannes Weiner Cc: Srikar Dronamraju Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1381141781-10992-52-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c4df2de6ca4a..147349987bfe 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1233,6 +1233,7 @@ static void task_numa_placement(struct task_struct *p) { int seq, nid, max_nid = -1, max_group_nid = -1; unsigned long max_faults = 0, max_group_faults = 0; + spinlock_t *group_lock = NULL; seq = ACCESS_ONCE(p->mm->numa_scan_seq); if (p->numa_scan_seq == seq) @@ -1241,6 +1242,12 @@ static void task_numa_placement(struct task_struct *p) p->numa_migrate_seq++; p->numa_scan_period_max = task_scan_max(p); + /* If the task is part of a group prevent parallel updates to group stats */ + if (p->numa_group) { + group_lock = &p->numa_group->lock; + spin_lock(group_lock); + } + /* Find the node with the highest number of faults */ for_each_online_node(nid) { unsigned long faults = 0, group_faults = 0; @@ -1279,20 +1286,24 @@ static void task_numa_placement(struct task_struct *p) } } - /* - * If the preferred task and group nids are different, - * iterate over the nodes again to find the best place. - */ - if (p->numa_group && max_nid != max_group_nid) { - unsigned long weight, max_weight = 0; - - for_each_online_node(nid) { - weight = task_weight(p, nid) + group_weight(p, nid); - if (weight > max_weight) { - max_weight = weight; - max_nid = nid; + if (p->numa_group) { + /* + * If the preferred task and group nids are different, + * iterate over the nodes again to find the best place. + */ + if (max_nid != max_group_nid) { + unsigned long weight, max_weight = 0; + + for_each_online_node(nid) { + weight = task_weight(p, nid) + group_weight(p, nid); + if (weight > max_weight) { + max_weight = weight; + max_nid = nid; + } } } + + spin_unlock(group_lock); } /* Preferred node as the node with the most faults */ -- 2.20.1