if (!total_faults)
return 0;
- return 1200 * group_faults(p, nid) / total_faults;
+ return 1000 * group_faults(p, nid) / total_faults;
}
static unsigned long weighted_cpuload(const int cpu);
/*
* If dst and source tasks are in the same NUMA group, or not
- * in any group then look only at task weights otherwise give
- * priority to the group weights.
+ * in any group then look only at task weights.
*/
- if (!cur->numa_group || !env->p->numa_group ||
- cur->numa_group == env->p->numa_group) {
+ if (cur->numa_group == env->p->numa_group) {
imp = taskimp + task_weight(cur, env->src_nid) -
task_weight(cur, env->dst_nid);
+ /*
+ * Add some hysteresis to prevent swapping the
+ * tasks within a group over tiny differences.
+ */
+ if (cur->numa_group)
+ imp -= imp/16;
} else {
- imp = groupimp + group_weight(cur, env->src_nid) -
- group_weight(cur, env->dst_nid);
+ /*
+ * Compare the group weights. If a task is all by
+ * itself (not part of a group), use the task weight
+ * instead.
+ */
+ if (env->p->numa_group)
+ imp = groupimp;
+ else
+ imp = taskimp;
+
+ if (cur->numa_group)
+ imp += group_weight(cur, env->src_nid) -
+ group_weight(cur, env->dst_nid);
+ else
+ imp += task_weight(cur, env->src_nid) -
+ task_weight(cur, env->dst_nid);
}
}