}
/*
- * When runqueue is busy, check whether this task is heaviest.
- * If this task is not heaviest in runqueue, allow it to be migrated.
+ * When runqueue is overloaded, check whether cpu's util exceed coverage ratio.
+ * If so, allow the task to be migrated.
*/
if (cpu_rq(src_cpu)->nr_running > 1) {
- struct task_struct *curr = cpu_curr(src_cpu);
- struct sched_entity *se = __pick_first_entity(p->se.cfs_rq);
- int count;
+ unsigned long cpu_util = cpu_util_wake(src_cpu, p);
+ unsigned long util = task_util(p);
+ unsigned long coverage_ratio = get_coverage_ratio(src_cpu);
- /* Firstly, compare with curr running task */
- if (ontime_load_avg(p) < ontime_load_avg(curr)) {
- trace_ems_ontime_check_migrate(p, dst_cpu, true, "busy runqueue");
+ if ((cpu_util * 100 >= capacity_orig_of(src_cpu) * coverage_ratio)
+ && (cpu_util > util)) {
+ trace_ems_ontime_check_migrate(p, dst_cpu, true, "exceed coverage");
return true;
}
-
- /* Secondly, compare with tasks in rq */
- for (count = 0; se && count < TASK_TRACK_COUNT;
- se = __pick_next_entity(se), count++) {
- if (entity_is_cfs_rq(se))
- continue;
-
- if (ontime_load_avg(p) < ontime_load_avg(task_of(se))) {
- trace_ems_ontime_check_migrate(p, dst_cpu, true, "busy runqueue");
- return true;
- }
- }
}
trace_ems_ontime_check_migrate(p, dst_cpu, false, "heavy task");