From: Daeyeong Lee Date: Thu, 14 Jun 2018 06:56:50 +0000 (+0900) Subject: sched: ems: Don't check lbt_bring_overutilize when wake balance X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=dbe2b1d2b46e7dd48dba2258afc24f775f4b2c46;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git sched: ems: Don't check lbt_bring_overutilize when wake balance Change-Id: I2b3cd086d0a4329270c7b877967897ce4735e5a0 Signed-off-by: Daeyeong Lee --- diff --git a/kernel/sched/ems/core.c b/kernel/sched/ems/core.c index a293ffe8f212..b06f5d8f25fb 100644 --- a/kernel/sched/ems/core.c +++ b/kernel/sched/ems/core.c @@ -146,14 +146,6 @@ static int select_proper_cpu(struct task_struct *p, int prev_cpu) if (new_util > capacity_orig) continue; - /* - * According to the criteria determined by the LBT(Load - * Balance trigger), the cpu that becomes overutilized - * when the task is assigned is skipped. - */ - if (lbt_bring_overutilize(i, p)) - continue; - /* * Best target) lowest utilization among lowest-cap cpu * diff --git a/kernel/sched/ems/ems.h b/kernel/sched/ems/ems.h index 7f2742d72ad5..1e9599094645 100644 --- a/kernel/sched/ems/ems.h +++ b/kernel/sched/ems/ems.h @@ -21,7 +21,6 @@ extern int ontime_task_wakeup(struct task_struct *p); extern int select_perf_cpu(struct task_struct *p); extern int global_boosting(struct task_struct *p); extern int global_boosted(void); -extern bool lbt_bring_overutilize(int cpu, struct task_struct *p); extern int select_energy_cpu(struct task_struct *p, int prev_cpu, int sd_flag, int sync); extern int band_play_cpu(struct task_struct *p); diff --git a/kernel/sched/ems/energy.c b/kernel/sched/ems/energy.c index ee6a74dfa18e..b3490474d167 100644 --- a/kernel/sched/ems/energy.c +++ b/kernel/sched/ems/energy.c @@ -89,14 +89,6 @@ static void find_eco_target(struct eco_env *eenv) if (new_util > capacity_orig) continue; - /* - * According to the criteria determined by the LBT(Load - * Balance trigger), the cpu that becomes overutilized when - * the task is assigned is skipped. - */ - if (lbt_bring_overutilize(cpu, p)) - continue; - /* * Backup target) shallowest idle cpu among min-cap cpu * diff --git a/kernel/sched/ems/lbt.c b/kernel/sched/ems/lbt.c index 5104b4f74790..74587979cb8b 100644 --- a/kernel/sched/ems/lbt.c +++ b/kernel/sched/ems/lbt.c @@ -87,23 +87,6 @@ bool lbt_overutilized(int cpu, int level) return overutilized; } -bool lbt_bring_overutilize(int cpu, struct task_struct *p) -{ - struct sched_domain *sd; - struct lbt_overutil *ou = per_cpu(lbt_overutil, cpu); - unsigned long util_sum = cpu_util_wake(cpu, p) + task_util(p); - - if (!ou) - return false; - - for_each_domain(cpu, sd) { - if (util_sum > ou[sd->level].capacity) - return true; - } - - return false; -} - void update_lbt_overutil(int cpu, unsigned long capacity) { struct lbt_overutil *ou = per_cpu(lbt_overutil, cpu);