sched: ems: ontime: Remove min_residency at ontime condition.
authorDaeyeong Lee <daeyeong.lee@samsung.com>
Tue, 8 May 2018 10:34:41 +0000 (19:34 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:24:55 +0000 (20:24 +0300)
Change-Id: I2263bff40f49ff9c9f112aac1db0546330c1447f
Signed-off-by: Daeyeong Lee <daeyeong.lee@samsung.com>
kernel/sched/ems/ontime.c

index 3ede1b14e69dd7c7fdc82dff27c25b90d53696e7..6b28cfef2c52b28bfb17968bab0c826a2528b68d 100644 (file)
@@ -25,7 +25,6 @@
 
 #define ontime_task_cpu(p)             (ontime_of(p)->cpu)
 #define ontime_flag(p)                 (ontime_of(p)->flags)
-#define ontime_migration_time(p)       (ontime_of(p)->avg.ontime_migration_time)
 #define ontime_load_avg(p)             (ontime_of(p)->avg.load_avg)
 
 #define cap_scale(v, s)                ((v)*(s) >> SCHED_CAPACITY_SHIFT)
@@ -39,7 +38,6 @@ struct ontime_cond {
 
        unsigned long           up_threshold;
        unsigned long           down_threshold;
-       unsigned int            min_residency;
 
        int                     coregroup;
        struct cpumask          cpus;
@@ -86,18 +84,6 @@ static unsigned long get_down_threshold(int cpu)
        return 0;
 }
 
-static unsigned int get_min_residency(int cpu)
-{
-       struct ontime_cond *curr;
-
-       list_for_each_entry(curr, &cond_list, list) {
-               if (cpumask_test_cpu(cpu, &curr->cpus))
-                       return curr->min_residency;
-       }
-
-       return 0;
-}
-
 static inline struct task_struct *task_of(struct sched_entity *se)
 {
        return container_of(se, struct task_struct, se);
@@ -112,15 +98,11 @@ static inline void include_ontime_task(struct task_struct *p, int dst_cpu)
 {
        ontime_flag(p) = ONTIME;
        ontime_task_cpu(p) = dst_cpu;
-
-       /* Manage time based on clock task of boot cpu(cpu0) */
-       ontime_migration_time(p) = cpu_rq(0)->clock_task;
 }
 
 static inline void exclude_ontime_task(struct task_struct *p)
 {
        ontime_task_cpu(p) = 0;
-       ontime_migration_time(p) = 0;
        ontime_flag(p) = NOT_ONTIME;
 }
 
@@ -466,7 +448,6 @@ int ontime_task_wakeup(struct task_struct *p)
 {
        struct ontime_cond *curr, *next = NULL;
        struct cpumask target_mask;
-       u64 delta;
        int src_cpu = task_cpu(p);
        int dst_cpu = -1;
 
@@ -507,11 +488,7 @@ int ontime_task_wakeup(struct task_struct *p)
                 * If wakeup task is ontime but doesn't keep ontime condition,
                 * exclude this task from ontime.
                 */
-               delta = cpu_rq(0)->clock_task - ontime_migration_time(p);
-               delta = delta >> 10;
-
-               if (delta > get_min_residency(ontime_task_cpu(p)) &&
-                               ontime_load_avg(p) < get_down_threshold(ontime_task_cpu(p))) {
+               if (ontime_load_avg(p) < get_down_threshold(ontime_task_cpu(p))) {
                        trace_ems_ontime_task_wakeup(p, src_cpu, -1, "release ontime");
                        goto ontime_out;
                }
@@ -547,8 +524,6 @@ ontime_out:
 
 int ontime_can_migration(struct task_struct *p, int dst_cpu)
 {
-       u64 delta;
-
        if (ontime_flag(p) & NOT_ONTIME) {
                trace_ems_ontime_check_migrate(p, dst_cpu, true, "not ontime");
                return true;
@@ -573,13 +548,6 @@ int ontime_can_migration(struct task_struct *p, int dst_cpu)
         * At this point, task is "ontime task" and running on big
         * and load balancer is trying to migrate task to LITTLE.
         */
-       delta = cpu_rq(0)->clock_task - ontime_migration_time(p);
-       delta = delta >> 10;
-       if (delta <= get_min_residency(ontime_task_cpu(p))) {
-               trace_ems_ontime_check_migrate(p, dst_cpu, false, "min residency");
-               return false;
-       }
-
        if (cpu_rq(task_cpu(p))->nr_running > 1) {
                trace_ems_ontime_check_migrate(p, dst_cpu, true, "big is busy");
                goto release;
@@ -651,7 +619,6 @@ void ontime_new_entity_load(struct task_struct *parent, struct sched_entity *se)
 
        ontime->avg.load_sum = ontime_of(parent)->avg.load_sum;
        ontime->avg.load_avg = ontime_of(parent)->avg.load_avg;
-       ontime->avg.ontime_migration_time = 0;
        ontime->avg.period_contrib = 1023;
        ontime->flags = NOT_ONTIME;
 
@@ -698,13 +665,10 @@ static ssize_t store_##_name(struct kobject *k, const char *buf, size_t count)    \
 
 ontime_show(up_threshold);
 ontime_show(down_threshold);
-ontime_show(min_residency);
 ontime_store(up_threshold, unsigned long, 1024);
 ontime_store(down_threshold, unsigned long, 1024);
-ontime_store(min_residency, unsigned int, UINT_MAX);
 ontime_attr_rw(up_threshold);
 ontime_attr_rw(down_threshold);
-ontime_attr_rw(min_residency);
 
 static ssize_t show(struct kobject *kobj, struct attribute *at, char *buf)
 {
@@ -729,7 +693,6 @@ static const struct sysfs_ops ontime_sysfs_ops = {
 static struct attribute *ontime_attrs[] = {
        &up_threshold_attr.attr,
        &down_threshold_attr.attr,
-       &min_residency_attr.attr,
        NULL
 };
 
@@ -799,9 +762,6 @@ parse_ontime(struct device_node *dn, struct ontime_cond *cond, int cnt)
        res |= of_property_read_u32(coregroup, "down-threshold", &prop);
        cond->down_threshold = prop;
 
-       res |= of_property_read_u32(coregroup, "min-residency-us", &prop);
-       cond->min_residency = prop;
-
        if (res)
                goto disable;
 
@@ -812,7 +772,6 @@ disable:
        cond->enabled = false;
        cond->up_threshold = ULONG_MAX;
        cond->down_threshold = 0;
-       cond->min_residency = 0;
 }
 
 static int __init init_ontime(void)