[COMMON] sched: fair/ehmp: Add to estimate core/cluster power down.
authorlakkyung.jung <lakkyung.jung@samsung.com>
Wed, 14 Mar 2018 04:46:14 +0000 (13:46 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:30:49 +0000 (17:30 +0900)
after moving task, previous cpu/cluster can be powered down.
so it should be consider it when idle power was calculated.

Change-Id: I13d954a1640940a7be93d0429248be8802fd615c
Signed-off-by: lakkyung.jung <lakkyung.jung@samsung.com>
include/linux/ehmp.h
kernel/sched/ehmp.c
kernel/sched/fair.c

index 6d52f62efd6fb05e7052f40ffd7cc206a657553f..694771b22eb1df554e3e634ca6880714935bb300 100644 (file)
@@ -31,6 +31,8 @@ struct gb_qos_request {
 };
 
 #ifdef CONFIG_SCHED_EHMP
+extern int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
+                               int state, int cpus);
 extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
                struct task_struct *p);
 extern void exynos_init_entity_util_avg(struct sched_entity *se);
@@ -59,6 +61,8 @@ extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value);
 
 extern void request_kernel_prefer_perf(int grp_idx, int enable);
 #else
+static inline int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
+                               int state, int cpus) { return 0; }
 static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
                struct task_struct *p) { return NULL; }
 static inline void exynos_init_entity_util_avg(struct sched_entity *se) { }
index ce396851e9b9cc4c34a5fccb8dfbd8c29971728d..18ab92fbb7545531513e217e2b13f0f9b5eff96d 100644 (file)
@@ -73,6 +73,42 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
 
 #define tsk_cpus_allowed(tsk)  (&(tsk)->cpus_allowed)
 
+/**********************************************************************
+ * Energy diff                                                       *
+ **********************************************************************/
+#define EAS_CPU_PRV    0
+#define EAS_CPU_NXT    1
+#define EAS_CPU_BKP    2
+
+int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
+                               int state, int cpus)
+{
+       unsigned int deepest_state_residency = 0;
+       unsigned int next_timer_us = 0;
+       int grp_nr_running = 0;
+       int deepest_state = 0;
+       int i;
+       int estimate_state = 0;
+
+       if (cpu_idx == EAS_CPU_PRV)
+               grp_nr_running++;
+
+       for_each_cpu(i, mask) {
+               grp_nr_running += cpu_rq(i)->nr_running;
+
+               next_timer_us = ktime_to_us(tick_nohz_get_sleep_length_cpu(i));
+               deepest_state_residency = cpuidle_get_target_residency(i, state);
+
+               if (next_timer_us > deepest_state_residency)
+                       deepest_state++;
+       }
+
+       if (!grp_nr_running && deepest_state == cpus)
+               estimate_state = state + 1;
+
+       return estimate_state;
+}
+
 /**********************************************************************
  * task initialization                                                *
  **********************************************************************/
index 655d5a6a311589b4f4cac20e2007d9cc80121842..19ead9ab01ed3ea5718020810f268907a36ac474 100644 (file)
@@ -5710,6 +5710,18 @@ static int group_idle_state(struct energy_env *eenv, int cpu_idx)
         */
        max_idle_state_idx = sg->sge->nr_idle_states - 2;
        new_state = grp_util * max_idle_state_idx;
+
+       /*
+        * after moving, previous cpu/cluster can be powered down,
+        * so it should be consider it when idle power was calculated.
+        */
+       if (sched_feat(EXYNOS_HMP)) {
+               new_state = exynos_estimate_idle_state(cpu_idx, sched_group_span(sg),
+                                               max_idle_state_idx, sg->group_weight);
+               if (new_state)
+                       return new_state;
+       }
+
        if (grp_util <= 0) {
                /* group will have no util, use lowest state */
                new_state = max_idle_state_idx + 1;