after moving task, previous cpu/cluster can be powered down.
so it should be consider it when idle power was calculated.
Change-Id: I13d954a1640940a7be93d0429248be8802fd615c
Signed-off-by: lakkyung.jung <lakkyung.jung@samsung.com>
};
#ifdef CONFIG_SCHED_EHMP
+extern int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
+ int state, int cpus);
extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
struct task_struct *p);
extern void exynos_init_entity_util_avg(struct sched_entity *se);
extern void request_kernel_prefer_perf(int grp_idx, int enable);
#else
+static inline int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
+ int state, int cpus) { return 0; }
static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
struct task_struct *p) { return NULL; }
static inline void exynos_init_entity_util_avg(struct sched_entity *se) { }
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+/**********************************************************************
+ * Energy diff *
+ **********************************************************************/
+#define EAS_CPU_PRV 0
+#define EAS_CPU_NXT 1
+#define EAS_CPU_BKP 2
+
+int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
+ int state, int cpus)
+{
+ unsigned int deepest_state_residency = 0;
+ unsigned int next_timer_us = 0;
+ int grp_nr_running = 0;
+ int deepest_state = 0;
+ int i;
+ int estimate_state = 0;
+
+ if (cpu_idx == EAS_CPU_PRV)
+ grp_nr_running++;
+
+ for_each_cpu(i, mask) {
+ grp_nr_running += cpu_rq(i)->nr_running;
+
+ next_timer_us = ktime_to_us(tick_nohz_get_sleep_length_cpu(i));
+ deepest_state_residency = cpuidle_get_target_residency(i, state);
+
+ if (next_timer_us > deepest_state_residency)
+ deepest_state++;
+ }
+
+ if (!grp_nr_running && deepest_state == cpus)
+ estimate_state = state + 1;
+
+ return estimate_state;
+}
+
/**********************************************************************
* task initialization *
**********************************************************************/
*/
max_idle_state_idx = sg->sge->nr_idle_states - 2;
new_state = grp_util * max_idle_state_idx;
+
+ /*
+ * after moving, previous cpu/cluster can be powered down,
+ * so it should be consider it when idle power was calculated.
+ */
+ if (sched_feat(EXYNOS_HMP)) {
+ new_state = exynos_estimate_idle_state(cpu_idx, sched_group_span(sg),
+ max_idle_state_idx, sg->group_weight);
+ if (new_state)
+ return new_state;
+ }
+
if (grp_util <= 0) {
/* group will have no util, use lowest state */
new_state = max_idle_state_idx + 1;