[COMMON] sched: ems: Modify ontime migration to apply to each cgroup individually.
authorDaeyeong Lee <daeyeong.lee@samsung.com>
Mon, 3 Sep 2018 05:45:36 +0000 (14:45 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:24:57 +0000 (20:24 +0300)
Change-Id: I268c50933dbe11b81c89857c721745df3bf01b69
Signed-off-by: Daeyeong Lee <daeyeong.lee@samsung.com>
kernel/sched/ems/ontime.c
kernel/sched/tune.c
kernel/sched/tune.h

index 853fd3fc2c6b098bc96bb351ddf93fc55b81fcb4..a8326a20cad29eb2fc363bfc13222d0df9936880 100644 (file)
@@ -239,10 +239,12 @@ ontime_pick_heavy_task(struct sched_entity *se, int *boost_migration)
                *boost_migration = 1;
                return p;
        }
-       if (ontime_load_avg(p) >= get_upper_boundary(task_cpu(p))) {
-               heaviest_task = p;
-               max_util_avg = ontime_load_avg(p);
-               *boost_migration = 0;
+       if (schedtune_ontime_en(p)) {
+               if (ontime_load_avg(p) >= get_upper_boundary(task_cpu(p))) {
+                       heaviest_task = p;
+                       max_util_avg = ontime_load_avg(p);
+                       *boost_migration = 0;
+               }
        }
 
        se = __pick_first_entity(se->cfs_rq);
@@ -258,6 +260,9 @@ ontime_pick_heavy_task(struct sched_entity *se, int *boost_migration)
                        break;
                }
 
+               if (!schedtune_ontime_en(p))
+                       goto next_entity;
+
                if (ontime_load_avg(p) < get_upper_boundary(task_cpu(p)))
                        goto next_entity;
 
@@ -528,6 +533,10 @@ int ontime_task_wakeup(struct task_struct *p, int sync)
        struct cpumask fit_cpus;
        int dst_cpu, src_cpu = task_cpu(p);
 
+       /* If this task is not allowed to ontime, do not ontime wakeup */
+       if (!schedtune_ontime_en(p))
+               return -1;
+
        /* When wakeup task is on ontime migrating, do not ontime wakeup */
        if (ontime_of(p)->migrating == 1)
                return -1;
@@ -565,6 +574,9 @@ int ontime_can_migration(struct task_struct *p, int dst_cpu)
 {
        int src_cpu = task_cpu(p);
 
+       if (!schedtune_ontime_en(p))
+               return true;
+
        if (ontime_of(p)->migrating == 1) {
                trace_ems_ontime_check_migrate(p, dst_cpu, false, "on migrating");
                return false;
index 95ee812faf5cbff8a2c809ce16608eeb95697df4..1e00cdee6f07d055996f73c433886012d05393d1 100644 (file)
@@ -46,6 +46,9 @@ struct schedtune {
 
        /* Hint to group tasks by process */
        int band;
+
+       /* SchedTune ontime migration */
+       int ontime_en;
 };
 
 static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
@@ -464,6 +467,24 @@ int schedtune_util_est_en(struct task_struct *p)
        return util_est_en;
 }
 
+int schedtune_ontime_en(struct task_struct *p)
+{
+       struct schedtune *st;
+       int ontime_en;
+
+       if (unlikely(!schedtune_initialized))
+               return 0;
+
+       /* Get ontime value */
+       rcu_read_lock();
+       st = task_schedtune(p);
+       ontime_en = st->ontime_en;
+       rcu_read_unlock();
+
+       return ontime_en;
+
+}
+
 int schedtune_prefer_idle(struct task_struct *p)
 {
        struct schedtune *st;
@@ -540,6 +561,24 @@ util_est_en_write(struct cgroup_subsys_state *css, struct cftype *cft,
        return 0;
 }
 
+static u64
+ontime_en_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+       struct schedtune *st = css_st(css);
+
+       return st->ontime_en;
+}
+
+static int
+ontime_en_write(struct cgroup_subsys_state *css, struct cftype *cft,
+               u64 ontime_en)
+{
+       struct schedtune *st = css_st(css);
+       st->ontime_en = ontime_en;
+
+       return 0;
+}
+
 static u64
 band_read(struct cgroup_subsys_state *css, struct cftype *cft)
 {
@@ -645,6 +684,11 @@ static struct cftype files[] = {
                .read_u64 = util_est_en_read,
                .write_u64 = util_est_en_write,
        },
+       {
+               .name = "ontime_en",
+               .read_u64 = ontime_en_read,
+               .write_u64 = ontime_en_write,
+       },
        { }     /* terminate */
 };
 
index 5b35686491882a7717aa213c115f7b4569167f17..9d8acf5ec02ad2e8a9534ad3f40c9ca02d272b76 100644 (file)
@@ -18,6 +18,7 @@ int schedtune_task_boost(struct task_struct *tsk);
 int schedtune_prefer_idle(struct task_struct *tsk);
 int schedtune_prefer_perf(struct task_struct *tsk);
 int schedtune_util_est_en(struct task_struct *tsk);
+int schedtune_ontime_en(struct task_struct *tsk);
 
 void schedtune_enqueue_task(struct task_struct *p, int cpu);
 void schedtune_dequeue_task(struct task_struct *p, int cpu);
@@ -30,6 +31,7 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu);
 #define schedtune_prefer_idle(tsk) 0
 #define schedtune_prefer_perf(tsk) 0
 #define schedtune_util_est_en(tsk) 0
+#define schedtune_ontime_en(tsk) 0
 
 #define schedtune_enqueue_task(task, cpu) do { } while (0)
 #define schedtune_dequeue_task(task, cpu) do { } while (0)