sched: ems: ontime: Remove distinction between ontime and normal task.
authorDaeyeong Lee <daeyeong.lee@samsung.com>
Tue, 8 May 2018 10:56:09 +0000 (19:56 +0900)
committerlakkyung.jung <lakkyung.jung@samsung.com>
Mon, 23 Jul 2018 05:58:47 +0000 (14:58 +0900)
Change-Id: I343feff2d7db0d97d3813b570193f1ee8e3af93e
Signed-off-by: Daeyeong Lee <daeyeong.lee@samsung.com>
include/linux/sched.h
kernel/sched/ems/ontime.c

index d5ae5f9867d9945a19c84a85b5ff3cd5ad21ca69..5ceb51cd3997b45c0488b9c5979273e843ad31b6 100644 (file)
@@ -347,10 +347,6 @@ struct sched_avg {
        unsigned long                   util_avg;
 };
 
-#define NOT_ONTIME             1
-#define ONTIME_MIGRATING       2
-#define ONTIME                 4
-
 #define ontime_of(p)                   (&p->se.ontime)
 
 struct ontime_avg {
@@ -362,7 +358,7 @@ struct ontime_avg {
 
 struct ontime_entity {
        struct ontime_avg avg;
-       int flags;
+       int migrating;
        int cpu;
 };
 
index 6b28cfef2c52b28bfb17968bab0c826a2528b68d..33ca13c9e19c6f0a37a8a024f0f2ee710463c9ef 100644 (file)
@@ -23,9 +23,7 @@
 #define TASK_TRACK_COUNT       5
 #define MAX_CAPACITY_CPU       (NR_CPUS - 1)
 
-#define ontime_task_cpu(p)             (ontime_of(p)->cpu)
-#define ontime_flag(p)                 (ontime_of(p)->flags)
-#define ontime_load_avg(p)             (ontime_of(p)->avg.load_avg)
+#define ontime_load_avg(p)     (ontime_of(p)->avg.load_avg)
 
 #define cap_scale(v, s)                ((v)*(s) >> SCHED_CAPACITY_SHIFT)
 
@@ -94,18 +92,6 @@ static inline struct sched_entity *se_of(struct sched_avg *sa)
        return container_of(sa, struct sched_entity, avg);
 }
 
-static inline void include_ontime_task(struct task_struct *p, int dst_cpu)
-{
-       ontime_flag(p) = ONTIME;
-       ontime_task_cpu(p) = dst_cpu;
-}
-
-static inline void exclude_ontime_task(struct task_struct *p)
-{
-       ontime_task_cpu(p) = 0;
-       ontime_flag(p) = NOT_ONTIME;
-}
-
 static int
 ontime_select_target_cpu(struct cpumask *dst_cpus, const struct cpumask *mask)
 {
@@ -253,7 +239,7 @@ static int ontime_migration_cpu_stop(void *data)
 
        raw_spin_lock_irq(&src_rq->lock);
 
-       if (!(ontime_flag(p) & ONTIME_MIGRATING))
+       if (ontime_of(p)->migrating == 0)
                goto out_unlock;
 
        if (p->exit_state)
@@ -278,28 +264,20 @@ static int ontime_migration_cpu_stop(void *data)
                        break;
 
        if (likely(sd) && move_specific_task(p, env)) {
-               if (boost_migration) {
-                       /* boost task is not classified as ontime task */
-                       exclude_ontime_task(p);
-               } else {
-                       include_ontime_task(p, dst_cpu);
-               }
-
                rcu_read_unlock();
                double_unlock_balance(src_rq, dst_rq);
 
                trace_ems_ontime_migration(p, ontime_of(p)->avg.load_avg,
                                        src_cpu, dst_cpu, boost_migration);
-               goto success_unlock;
+               goto out_unlock;
        }
 
        rcu_read_unlock();
        double_unlock_balance(src_rq, dst_rq);
 
 out_unlock:
-       exclude_ontime_task(p);
+       ontime_of(p)->migrating = 0;
 
-success_unlock:
        src_rq->active_balance = 0;
        dst_rq->ontime_migrating = 0;
 
@@ -340,7 +318,7 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
 /****************************************************************/
 void ontime_trace_task_info(struct task_struct *p)
 {
-       trace_ems_ontime_load_avg_task(p, &ontime_of(p)->avg, ontime_flag(p));
+       trace_ems_ontime_load_avg_task(p, &ontime_of(p)->avg, ontime_of(p)->migrating);
 }
 
 DEFINE_PER_CPU(struct cpu_stop_work, ontime_migration_work);
@@ -419,7 +397,7 @@ void ontime_migration(void)
                                continue;
                        }
 
-                       ontime_flag(p) = ONTIME_MIGRATING;
+                       ontime_of(p)->migrating = 1;
                        get_task_struct(p);
 
                        /* Set environment data */
@@ -452,9 +430,12 @@ int ontime_task_wakeup(struct task_struct *p)
        int dst_cpu = -1;
 
        /* When wakeup task is on ontime migrating, do not ontime wakeup */
-       if (ontime_flag(p) == ONTIME_MIGRATING)
+       if (ontime_of(p)->migrating == 1)
                return -1;
 
+       if (ontime_load_avg(p) < get_down_threshold(src_cpu))
+               goto release;
+
        /*
         * When wakeup task satisfies ontime condition to up migration,
         * check there is a possible target cpu.
@@ -472,97 +453,64 @@ int ontime_task_wakeup(struct task_struct *p)
 
                if (cpu_selected(dst_cpu)) {
                        trace_ems_ontime_task_wakeup(p, src_cpu, dst_cpu, "up ontime");
-                       goto ontime_up;
+                       return dst_cpu;
                }
        }
 
        /*
-        * If wakeup task is not ontime and doesn't satisfy ontime condition,
-        * it cannot be ontime task.
+        * If there is a possible dst_cpu to stay, task will wake up at this cpu.
         */
-       if (ontime_flag(p) == NOT_ONTIME)
-               goto ontime_out;
-
-       if (ontime_flag(p) == ONTIME) {
-               /*
-                * If wakeup task is ontime but doesn't keep ontime condition,
-                * exclude this task from ontime.
-                */
-               if (ontime_load_avg(p) < get_down_threshold(ontime_task_cpu(p))) {
-                       trace_ems_ontime_task_wakeup(p, src_cpu, -1, "release ontime");
-                       goto ontime_out;
-               }
-
-               /*
-                * If there is a possible cpu to stay ontime, task will wake up at this cpu.
-                */
-               cpumask_copy(&target_mask, cpu_coregroup_mask(ontime_task_cpu(p)));
-               dst_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p));
-
-               if (cpu_selected(dst_cpu)) {
-                       trace_ems_ontime_task_wakeup(p, src_cpu, dst_cpu, "stay ontime");
-                       goto ontime_stay;
-               }
+       cpumask_copy(&target_mask, cpu_coregroup_mask(src_cpu));
+       dst_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p));
 
-               trace_ems_ontime_task_wakeup(p, src_cpu, -1, "banished");
-               goto ontime_out;
+       if (cpu_selected(dst_cpu)) {
+               trace_ems_ontime_task_wakeup(p, src_cpu, dst_cpu, "stay ontime");
+               return dst_cpu;
        }
 
-       if (!cpu_selected(dst_cpu))
-               goto ontime_out;
-
-ontime_up:
-       include_ontime_task(p, dst_cpu);
-
-ontime_stay:
-       return dst_cpu;
-
-ontime_out:
-       exclude_ontime_task(p);
+release:
+       /*
+        * If wakeup task doesn't satisfy ontime condition or there is no
+        * possible dst_cpu, release this task from ontime
+        */
+       trace_ems_ontime_task_wakeup(p, src_cpu, -1, "release ontime");
        return -1;
 }
 
 int ontime_can_migration(struct task_struct *p, int dst_cpu)
 {
-       if (ontime_flag(p) & NOT_ONTIME) {
-               trace_ems_ontime_check_migrate(p, dst_cpu, true, "not ontime");
-               return true;
-       }
+       int src_cpu = task_cpu(p);
 
-       if (ontime_flag(p) & ONTIME_MIGRATING) {
+       if (ontime_of(p)->migrating == 1) {
                trace_ems_ontime_check_migrate(p, dst_cpu, false, "migrating");
                return false;
        }
 
-       if (cpumask_test_cpu(dst_cpu, cpu_coregroup_mask(ontime_task_cpu(p)))) {
+       if (cpumask_test_cpu(dst_cpu, cpu_coregroup_mask(src_cpu))) {
                trace_ems_ontime_check_migrate(p, dst_cpu, true, "same coregroup");
                return true;
        }
 
-       if (capacity_orig_of(dst_cpu) > capacity_orig_of(ontime_task_cpu(p))) {
+       if (capacity_orig_of(dst_cpu) > capacity_orig_of(src_cpu)) {
                trace_ems_ontime_check_migrate(p, dst_cpu, true, "bigger cpu");
                return true;
        }
 
        /*
-        * At this point, task is "ontime task" and running on big
-        * and load balancer is trying to migrate task to LITTLE.
+        * At this point, load balancer is trying to migrate task to smaller CPU.
         */
-       if (cpu_rq(task_cpu(p))->nr_running > 1) {
-               trace_ems_ontime_check_migrate(p, dst_cpu, true, "big is busy");
-               goto release;
+       if (ontime_load_avg(p) < get_down_threshold(src_cpu)) {
+               trace_ems_ontime_check_migrate(p, dst_cpu, true, "ontime_release");
+               return true;
        }
 
-       if (ontime_load_avg(p) >= get_down_threshold(ontime_task_cpu(p))) {
-               trace_ems_ontime_check_migrate(p, dst_cpu, false, "heavy task");
-               return false;
+       if (cpu_rq(src_cpu)->nr_running > 1) {
+               trace_ems_ontime_check_migrate(p, dst_cpu, true, "big is busy");
+               return true;
        }
 
-       trace_ems_ontime_check_migrate(p, dst_cpu, true, "ontime_release");
-release:
-       exclude_ontime_task(p);
-
-       return true;
+       trace_ems_ontime_check_migrate(p, dst_cpu, false, "heavy task");
+       return false;
 }
 
 /*
@@ -620,7 +568,7 @@ void ontime_new_entity_load(struct task_struct *parent, struct sched_entity *se)
        ontime->avg.load_sum = ontime_of(parent)->avg.load_sum;
        ontime->avg.load_avg = ontime_of(parent)->avg.load_avg;
        ontime->avg.period_contrib = 1023;
-       ontime->flags = NOT_ONTIME;
+       ontime->migrating = 0;
 
        trace_ems_ontime_new_entity_load(task_of(se), &ontime->avg);
 }