sched: ems: support schedtune.boost in wakeup balance.
authorPark Bumgyu <bumgyu.park@samsung.com>
Thu, 14 Jun 2018 04:20:46 +0000 (13:20 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:24:56 +0000 (20:24 +0300)
Change-Id: I18938f89a6cf1372c6be96e0d6c769960cd2918c
Signed-off-by: Park Bumgyu <bumgyu.park@samsung.com>
include/linux/sched.h
kernel/sched/ems/core.c
kernel/sched/ems/ems.h
kernel/sched/ems/energy.c
kernel/sched/ems/ontime.c
kernel/sched/ems/st_addon.c
kernel/sched/fair.c

index 3a9727019f7c3309a0bce41652e09e7c66d7f0df..443b9422abbc062783c468db6339ea0a13b2f32c 100644 (file)
@@ -417,8 +417,6 @@ struct sched_avg {
        struct util_est                 util_est;
 };
 
-#define ontime_of(p)                   (&p->se.ontime)
-
 struct ontime_avg {
        u64 ontime_migration_time;
        u64 load_sum;
index 762266e71c0bf3627d07d5f6c8c315bb26949106..a293ffe8f2124afa3d70a5f740b11377175292f5 100644 (file)
@@ -12,6 +12,7 @@
 
 #include "ems.h"
 #include "../sched.h"
+#include "../tune.h"
 
 unsigned long task_util(struct task_struct *p)
 {
@@ -139,6 +140,7 @@ static int select_proper_cpu(struct task_struct *p, int prev_cpu)
 
                        wake_util = cpu_util_wake(i, p);
                        new_util = wake_util + task_util_est(p);
+                       new_util = max(new_util, boosted_task_util(p));
 
                        /* skip over-capacity cpu */
                        if (new_util > capacity_orig)
index f558d3c58cdeb8c78aa2255280ca5ddacd4e717c..5b3a5ae794e836ba369f006cbcf82397599b4d57 100644 (file)
@@ -39,3 +39,5 @@ extern unsigned long task_util(struct task_struct *p);
 extern int cpu_util_wake(int cpu, struct task_struct *p);
 extern unsigned long task_util_est(struct task_struct *p);
 extern inline unsigned int get_cpu_mips(unsigned int cpu);
+
+extern unsigned long boosted_task_util(struct task_struct *p);
index 12d59e25e2535ae8c9252bdbe27558cb31e94bce..ee6a74dfa18e7638534656a492a3cb27e2a48485 100644 (file)
@@ -79,6 +79,7 @@ static void find_eco_target(struct eco_env *eenv)
 
                wake_util = cpu_util_wake(cpu, p);
                new_util = wake_util + task_util_est(p);
+               new_util = max(new_util, boosted_task_util(p));
 
                /* checking prev cpu is meaningless */
                if (eenv->prev_cpu == cpu)
index f4092692643bc095c3a8e350e7c4b7ff58254f4a..bb21451a80b8c27b098bd69945507c1919e3e70f 100644 (file)
@@ -24,7 +24,7 @@
 #define MIN_CAPACITY_CPU       0
 #define MAX_CAPACITY_CPU       (NR_CPUS - 1)
 
-#define ontime_load_avg(p)     (ontime_of(p)->avg.load_avg)
+#define ontime_of(p)           (&p->se.ontime)
 
 #define cap_scale(v, s)                ((v)*(s) >> SCHED_CAPACITY_SHIFT)
 
@@ -71,6 +71,18 @@ static inline struct sched_entity *se_of(struct sched_avg *sa)
        return container_of(sa, struct sched_entity, avg);
 }
 
+extern long schedtune_margin(unsigned long signal, long boost);
+static inline unsigned long ontime_load_avg(struct task_struct *p)
+{
+       int boost = schedtune_task_boost(p);
+       unsigned long load_avg = ontime_of(p)->avg.load_avg;
+
+       if (boost == 0)
+               return load_avg;
+
+       return load_avg + schedtune_margin(load_avg, boost);
+}
+
 struct ontime_cond *get_current_cond(int cpu)
 {
        struct ontime_cond *curr;
index 7a38172d7ebc4e1a55e1f6bf1ca600d831b641f0..97944e50e47e061244f32509328c9bacc8710a0a 100644 (file)
@@ -104,6 +104,7 @@ static int select_idle_cpu(struct task_struct *p)
 
                        wake_util = cpu_util_wake(i, p);
                        new_util = wake_util + task_util_est(p);
+                       new_util = max(new_util, boosted_task_util(p));
 
                        trace_ems_prefer_idle(p, task_cpu(p), i, capacity_orig, task_util_est(p),
                                                        new_util, idle_cpu(i));
index c2cb290059fca9f7078457203af3ef2f651258b1..4b904acecd5a67fc75ec99a2dfa746e6bdeab888 100644 (file)
@@ -6617,7 +6617,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
 #ifdef CONFIG_SCHED_TUNE
 struct reciprocal_value schedtune_spc_rdiv;
 
-static long
+long
 schedtune_margin(unsigned long signal, long boost)
 {
        long long margin = 0;