From 980e51953771401fb3acd739506506a1d8e08384 Mon Sep 17 00:00:00 2001 From: "lakkyung.jung" Date: Wed, 14 Mar 2018 13:46:14 +0900 Subject: [PATCH] [COMMON] sched: fair/ehmp: Add to estimate core/cluster power down. after moving task, previous cpu/cluster can be powered down. so it should be consider it when idle power was calculated. Change-Id: I13d954a1640940a7be93d0429248be8802fd615c Signed-off-by: lakkyung.jung --- include/linux/ehmp.h | 4 ++++ kernel/sched/ehmp.c | 36 ++++++++++++++++++++++++++++++++++++ kernel/sched/fair.c | 12 ++++++++++++ 3 files changed, 52 insertions(+) diff --git a/include/linux/ehmp.h b/include/linux/ehmp.h index ac1c09185c8e..6fdf99396efa 100644 --- a/include/linux/ehmp.h +++ b/include/linux/ehmp.h @@ -31,6 +31,8 @@ struct gb_qos_request { }; #ifdef CONFIG_SCHED_EHMP +extern int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask, + int state, int cpus); extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd, struct task_struct *p); extern void exynos_init_entity_util_avg(struct sched_entity *se); @@ -59,6 +61,8 @@ extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value); extern void request_kernel_prefer_perf(int grp_idx, int enable); #else +static inline int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask, + int state, int cpus) { return 0; } static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd, struct task_struct *p) { return NULL; } static inline void exynos_init_entity_util_avg(struct sched_entity *se) { } diff --git a/kernel/sched/ehmp.c b/kernel/sched/ehmp.c index ce396851e9b9..18ab92fbb754 100644 --- a/kernel/sched/ehmp.c +++ b/kernel/sched/ehmp.c @@ -73,6 +73,42 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) +/********************************************************************** + * Energy diff * + **********************************************************************/ +#define EAS_CPU_PRV 0 +#define EAS_CPU_NXT 1 +#define EAS_CPU_BKP 2 + +int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask, + int state, int cpus) +{ + unsigned int deepest_state_residency = 0; + unsigned int next_timer_us = 0; + int grp_nr_running = 0; + int deepest_state = 0; + int i; + int estimate_state = 0; + + if (cpu_idx == EAS_CPU_PRV) + grp_nr_running++; + + for_each_cpu(i, mask) { + grp_nr_running += cpu_rq(i)->nr_running; + + next_timer_us = ktime_to_us(tick_nohz_get_sleep_length_cpu(i)); + deepest_state_residency = cpuidle_get_target_residency(i, state); + + if (next_timer_us > deepest_state_residency) + deepest_state++; + } + + if (!grp_nr_running && deepest_state == cpus) + estimate_state = state + 1; + + return estimate_state; +} + /********************************************************************** * task initialization * **********************************************************************/ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 17e9e680e1a6..5605b2ec9e8e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6118,6 +6118,18 @@ static int group_idle_state(struct energy_env *eenv, int cpu_idx) */ max_idle_state_idx = sg->sge->nr_idle_states - 2; new_state = grp_util * max_idle_state_idx; + + /* + * after moving, previous cpu/cluster can be powered down, + * so it should be consider it when idle power was calculated. + */ + if (sched_feat(EXYNOS_HMP)) { + new_state = exynos_estimate_idle_state(cpu_idx, sched_group_span(sg), + max_idle_state_idx, sg->group_weight); + if (new_state) + return new_state; + } + if (grp_util <= 0) { /* group will have no util, use lowest state */ new_state = max_idle_state_idx + 1; -- 2.20.1