From: Daeyeong Lee Date: Mon, 3 Sep 2018 06:13:16 +0000 (+0900) Subject: [COMMON] sched: ems: Change how to select ontime target CPU X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=e7160ff87f16680277879be07041e5fc91a39395;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git [COMMON] sched: ems: Change how to select ontime target CPU Change-Id: I2e36d26feb1858de6f8a912a9365627927f4e9f3 Signed-off-by: Daeyeong Lee --- diff --git a/kernel/sched/ems/ontime.c b/kernel/sched/ems/ontime.c index c48b7c8bdf07..37f2cf1f9ba5 100644 --- a/kernel/sched/ems/ontime.c +++ b/kernel/sched/ems/ontime.c @@ -167,17 +167,22 @@ static int ontime_select_target_cpu(struct task_struct *p, struct cpumask *fit_cpus) { struct cpumask candidates; - int cpu, min_energy_cpu = -1; + int cpu, energy_cpu = -1; int candidate_count = 0; + rcu_read_lock(); + cpumask_clear(&candidates); /* * First) Find min_util_cpu for each coregroup in fit cpus and candidate it. */ for_each_cpu(cpu, fit_cpus) { - int i, min_util_cpu = -1; - unsigned long coverage_util, min_util = ULONG_MAX; + int i; + int best_cpu = -1, backup_cpu = -1; + unsigned int min_exit_latency = UINT_MAX; + unsigned long min_util = ULONG_MAX; + unsigned long coverage_util; if (cpu != cpumask_first(cpu_coregroup_mask(cpu))) continue; @@ -185,7 +190,6 @@ ontime_select_target_cpu(struct task_struct *p, struct cpumask *fit_cpus) coverage_util = capacity_orig_of(cpu) * get_coverage_ratio(cpu); for_each_cpu_and(i, cpu_coregroup_mask(cpu), cpu_active_mask) { - unsigned long new_util; if (!cpumask_test_cpu(i, tsk_cpus_allowed(p))) continue; @@ -193,23 +197,43 @@ ontime_select_target_cpu(struct task_struct *p, struct cpumask *fit_cpus) if (cpu_rq(i)->ontime_migrating) continue; - new_util = task_util(p) + cpu_util_wake(i, p); - - if (new_util * 100 >= coverage_util) - continue; - - if (new_util < min_util) { - min_util = new_util; - min_util_cpu = i; + if (idle_cpu(i)) { + /* 1. Find shallowest idle_cpu */ + struct cpuidle_state *idle = idle_get_state(cpu_rq(cpu)); + + if (!idle) { + best_cpu = i; + break; + } + + if (idle->exit_latency < min_exit_latency) { + min_exit_latency = idle->exit_latency; + best_cpu = i; + } + } else { + /* 2. Find cpu that have to spare */ + unsigned long new_util = task_util(p) + cpu_util_wake(i, p); + + if (new_util * 100 >= coverage_util) + continue; + + if (new_util < min_util) { + min_util = new_util; + backup_cpu = i; + } } } - - if (cpu_selected(min_util_cpu)) { - cpumask_set_cpu(min_util_cpu, &candidates); + if (cpu_selected(best_cpu)) { + cpumask_set_cpu(best_cpu, &candidates); + candidate_count++; + } else if (cpu_selected(backup_cpu)) { + cpumask_set_cpu(backup_cpu, &candidates); candidate_count++; } } + rcu_read_unlock(); + /* * Second) Find min_energy_cpu among the candidates and return it. */ @@ -225,17 +249,17 @@ ontime_select_target_cpu(struct task_struct *p, struct cpumask *fit_cpus) if (min_energy > new_energy) { min_energy = new_energy; - min_energy_cpu = cpu; + energy_cpu = cpu; } } } else if (candidate_count == 1) { /* * If there is just one candidate, this will be min_energy_cpu. */ - min_energy_cpu = cpumask_first(&candidates); + energy_cpu = cpumask_first(&candidates); } - return min_energy_cpu; + return energy_cpu; } extern struct sched_entity *__pick_next_entity(struct sched_entity *se);