From 1a64132ea67bec14160e3a63e1c29d780d2b36d4 Mon Sep 17 00:00:00 2001 From: Daeyeong Lee Date: Mon, 12 Mar 2018 21:52:26 +0900 Subject: [PATCH] [COMMON] sched: ehmp: Modify cpu of ontime_entity should be mangaged Change-Id: I6c2c23e5117d387f8deb9d7995a7377174a06e80 Signed-off-by: Daeyeong Lee --- kernel/sched/ehmp.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/kernel/sched/ehmp.c b/kernel/sched/ehmp.c index b4680fa9a583..a0ad5d74110f 100644 --- a/kernel/sched/ehmp.c +++ b/kernel/sched/ehmp.c @@ -1089,6 +1089,7 @@ out: #define TASK_TRACK_COUNT 5 #define ontime_of(p) (&p->se.ontime) +#define ontime_task_cpu(p) (ontime_of(p)->cpu) #define ontime_flag(p) (ontime_of(p)->flags) #define ontime_migration_time(p) (ontime_of(p)->avg.ontime_migration_time) #define ontime_load_avg(p) (ontime_of(p)->avg.load_avg) @@ -1210,9 +1211,10 @@ static int set_min_residency(int cpu, int val) return -EINVAL; } -static inline void include_ontime_task(struct task_struct *p) +static inline void include_ontime_task(struct task_struct *p, int dst_cpu) { ontime_flag(p) = ONTIME; + ontime_task_cpu(p) = dst_cpu; /* Manage time based on clock task of boot cpu(cpu0) */ ontime_migration_time(p) = cpu_rq(0)->clock_task; @@ -1220,6 +1222,7 @@ static inline void include_ontime_task(struct task_struct *p) static inline void exclude_ontime_task(struct task_struct *p) { + ontime_task_cpu(p) = 0; ontime_migration_time(p) = 0; ontime_flag(p) = NOT_ONTIME; } @@ -1398,8 +1401,9 @@ static int ontime_migration_cpu_stop(void *data) if (boost_migration) { /* boost task is not classified as ontime task */ exclude_ontime_task(p); - } else - include_ontime_task(p); + } else { + include_ontime_task(p, dst_cpu); + } rcu_read_unlock(); double_unlock_balance(src_rq, dst_rq); @@ -1456,8 +1460,9 @@ static int ontime_task_wakeup(struct task_struct *p) target_cpu = ontime_select_target_cpu(sched_group_cpus(sd->groups), tsk_cpus_allowed(p)); if (cpu_selected(target_cpu)) { - if (ontime_flag(p) & NOT_ONTIME) - include_ontime_task(p); + if (ontime_flag(p) & NOT_ONTIME) { + include_ontime_task(p, target_cpu); + } } else { if (ontime_flag(p) & ONTIME) exclude_ontime_task(p); @@ -1616,8 +1621,13 @@ int ontime_can_migration(struct task_struct *p, int dst_cpu) return false; } - if (cpumask_test_cpu(dst_cpu, cpu_coregroup_mask(maxcap_cpu))) { - trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "ontime on big"); + if (cpumask_test_cpu(dst_cpu, cpu_coregroup_mask(ontime_task_cpu(p)))) { + trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "same coregroup"); + return true; + } + + if (capacity_orig_of(dst_cpu) > capacity_orig_of(ontime_task_cpu(p))) { + trace_ehmp_ontime_check_migrate(p, dst_cpu, true, "bigger cpu"); return true; } @@ -1627,7 +1637,7 @@ int ontime_can_migration(struct task_struct *p, int dst_cpu) */ delta = cpu_rq(0)->clock_task - ontime_migration_time(p); delta = delta >> 10; - if (delta <= get_min_residency(task_cpu(p))) { + if (delta <= get_min_residency(ontime_task_cpu(p))) { trace_ehmp_ontime_check_migrate(p, dst_cpu, false, "min residency"); return false; } @@ -1637,7 +1647,7 @@ int ontime_can_migration(struct task_struct *p, int dst_cpu) goto release; } - if (ontime_load_avg(p) >= get_down_threshold(task_cpu(p))) { + if (ontime_load_avg(p) >= get_down_threshold(ontime_task_cpu(p))) { trace_ehmp_ontime_check_migrate(p, dst_cpu, false, "heavy task"); return false; } -- 2.20.1