From: Pavankumar Kondeti Date: Thu, 8 Feb 2018 11:13:45 +0000 (+0530) Subject: ANDROID: sched/fair: fix CPU selection for non latency sensitive tasks X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=98d51aa17757367052ff2ce7bf31b388d29cb2f0;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git ANDROID: sched/fair: fix CPU selection for non latency sensitive tasks The Non latency sensitive tasks CPU selection targets for an active CPU in the little cluster. The shallowest c-state CPU is stored as a backup. However if all CPUs in the little cluster are idle, we pick an active CPU in the BIG cluster as the target CPU. This incorrect choice of the target CPU may not get corrected by the select_energy_cpu_idx() depending on the energy difference between previous CPU and target CPU. This can be fixed easily by maintaining the same variable that tracks maximum capacity of the traversed CPU for both idle and active CPUs. Change-Id: I3efb8bc82ff005383163921ef2bd39fcac4589ad Signed-off-by: Pavankumar Kondeti --- diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 61b7bcce11c8..58fe4fc13105 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6908,7 +6908,6 @@ static int start_cpu(bool boosted) static inline int find_best_target(struct task_struct *p, int *backup_cpu, bool boosted, bool prefer_idle) { - unsigned long best_idle_min_cap_orig = ULONG_MAX; unsigned long min_util = boosted_task_util(p); unsigned long target_capacity = ULONG_MAX; unsigned long min_wake_util = ULONG_MAX; @@ -7052,6 +7051,13 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, (capacity_orig * SCHED_CAPACITY_SCALE)) continue; + /* + * Favor CPUs with smaller capacity for non latency + * sensitive tasks. + */ + if (capacity_orig > target_capacity) + continue; + /* * Case B) Non latency sensitive tasks on IDLE CPUs. * @@ -7079,10 +7085,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, if (idle_cpu(i)) { int idle_idx = idle_get_state_idx(cpu_rq(i)); - /* Select idle CPU with lower cap_orig */ - if (capacity_orig > best_idle_min_cap_orig) - continue; - /* * Skip CPUs in deeper idle state, but only * if they are also less energy efficient. @@ -7093,8 +7095,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, best_idle_cstate <= idle_idx) continue; - /* Keep track of best idle CPU */ - best_idle_min_cap_orig = capacity_orig; + target_capacity = capacity_orig; best_idle_cstate = idle_idx; best_idle_cpu = i; continue; @@ -7120,10 +7121,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, * capacity. */ - /* Favor CPUs with smaller capacity */ - if (capacity_orig > target_capacity) - continue; - /* Favor CPUs with maximum spare capacity */ if ((capacity_orig - new_util) < target_max_spare_cap) continue;