sched: ems: ontime: Don't allow to down-migrate heaviest task.
authorDaeyeong Lee <daeyeong.lee@samsung.com>
Mon, 14 May 2018 10:09:11 +0000 (19:09 +0900)
committerlakkyung.jung <lakkyung.jung@samsung.com>
Mon, 23 Jul 2018 05:58:50 +0000 (14:58 +0900)
Change-Id: I0daf9e82d69438155ce80c33a6a4709523462491
Signed-off-by: Daeyeong Lee <daeyeong.lee@samsung.com>
kernel/sched/ems/ontime.c

index e10fc9e47b2d05543382b3a1d17a284899d1df45..5ec29d40b48dc7530aefb147c101d91cb71427af 100644 (file)
@@ -535,9 +535,32 @@ int ontime_can_migration(struct task_struct *p, int dst_cpu)
                return true;
        }
 
+       /*
+        * When runqueue is busy, check whether this task is heaviest.
+        * If this task is not heaviest in runqueue, allow it to be migrated.
+        */
        if (cpu_rq(src_cpu)->nr_running > 1) {
-               trace_ems_ontime_check_migrate(p, dst_cpu, true, "curr is busy");
-               return true;
+               struct task_struct *curr = cpu_curr(src_cpu);
+               struct sched_entity *se = __pick_first_entity(p->se.cfs_rq);
+               int count;
+
+               /* Firstly, compare with curr running task */
+               if (ontime_load_avg(p) < ontime_load_avg(curr)) {
+                       trace_ems_ontime_check_migrate(p, dst_cpu, true, "busy runqueue");
+                       return true;
+               }
+
+               /* Secondly, compare with tasks in rq */
+               for (count = 0; se && count < TASK_TRACK_COUNT;
+                               se = __pick_next_entity(se), count++) {
+                       if (entity_is_cfs_rq(se))
+                               continue;
+
+                       if (ontime_load_avg(p) < ontime_load_avg(task_of(se))) {
+                               trace_ems_ontime_check_migrate(p, dst_cpu, true, "busy runqueue");
+                               return true;
+                       }
+               }
        }
 
        trace_ems_ontime_check_migrate(p, dst_cpu, false, "heavy task");