From: Park Bumgyu Date: Thu, 14 Jun 2018 04:20:46 +0000 (+0900) Subject: sched: ems: support schedtune.boost in wakeup balance. X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=eb6b681ce0a989de643eb0b653027984f214deef;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git sched: ems: support schedtune.boost in wakeup balance. Change-Id: I18938f89a6cf1372c6be96e0d6c769960cd2918c Signed-off-by: Park Bumgyu --- diff --git a/include/linux/sched.h b/include/linux/sched.h index 1cbd64bd7e0b..65cb668f3ac8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -376,8 +376,6 @@ struct sched_avg { struct util_est util_est; }; -#define ontime_of(p) (&p->se.ontime) - struct ontime_avg { u64 ontime_migration_time; u64 load_sum; diff --git a/kernel/sched/ems/core.c b/kernel/sched/ems/core.c index 762266e71c0b..a293ffe8f212 100644 --- a/kernel/sched/ems/core.c +++ b/kernel/sched/ems/core.c @@ -12,6 +12,7 @@ #include "ems.h" #include "../sched.h" +#include "../tune.h" unsigned long task_util(struct task_struct *p) { @@ -139,6 +140,7 @@ static int select_proper_cpu(struct task_struct *p, int prev_cpu) wake_util = cpu_util_wake(i, p); new_util = wake_util + task_util_est(p); + new_util = max(new_util, boosted_task_util(p)); /* skip over-capacity cpu */ if (new_util > capacity_orig) diff --git a/kernel/sched/ems/ems.h b/kernel/sched/ems/ems.h index f558d3c58cde..5b3a5ae794e8 100644 --- a/kernel/sched/ems/ems.h +++ b/kernel/sched/ems/ems.h @@ -39,3 +39,5 @@ extern unsigned long task_util(struct task_struct *p); extern int cpu_util_wake(int cpu, struct task_struct *p); extern unsigned long task_util_est(struct task_struct *p); extern inline unsigned int get_cpu_mips(unsigned int cpu); + +extern unsigned long boosted_task_util(struct task_struct *p); diff --git a/kernel/sched/ems/energy.c b/kernel/sched/ems/energy.c index 12d59e25e253..ee6a74dfa18e 100644 --- a/kernel/sched/ems/energy.c +++ b/kernel/sched/ems/energy.c @@ -79,6 +79,7 @@ static void find_eco_target(struct eco_env *eenv) wake_util = cpu_util_wake(cpu, p); new_util = wake_util + task_util_est(p); + new_util = max(new_util, boosted_task_util(p)); /* checking prev cpu is meaningless */ if (eenv->prev_cpu == cpu) diff --git a/kernel/sched/ems/ontime.c b/kernel/sched/ems/ontime.c index f4092692643b..bb21451a80b8 100644 --- a/kernel/sched/ems/ontime.c +++ b/kernel/sched/ems/ontime.c @@ -24,7 +24,7 @@ #define MIN_CAPACITY_CPU 0 #define MAX_CAPACITY_CPU (NR_CPUS - 1) -#define ontime_load_avg(p) (ontime_of(p)->avg.load_avg) +#define ontime_of(p) (&p->se.ontime) #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) @@ -71,6 +71,18 @@ static inline struct sched_entity *se_of(struct sched_avg *sa) return container_of(sa, struct sched_entity, avg); } +extern long schedtune_margin(unsigned long signal, long boost); +static inline unsigned long ontime_load_avg(struct task_struct *p) +{ + int boost = schedtune_task_boost(p); + unsigned long load_avg = ontime_of(p)->avg.load_avg; + + if (boost == 0) + return load_avg; + + return load_avg + schedtune_margin(load_avg, boost); +} + struct ontime_cond *get_current_cond(int cpu) { struct ontime_cond *curr; diff --git a/kernel/sched/ems/st_addon.c b/kernel/sched/ems/st_addon.c index 7a38172d7ebc..97944e50e47e 100644 --- a/kernel/sched/ems/st_addon.c +++ b/kernel/sched/ems/st_addon.c @@ -104,6 +104,7 @@ static int select_idle_cpu(struct task_struct *p) wake_util = cpu_util_wake(i, p); new_util = wake_util + task_util_est(p); + new_util = max(new_util, boosted_task_util(p)); trace_ems_prefer_idle(p, task_cpu(p), i, capacity_orig, task_util_est(p), new_util, idle_cpu(i)); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 00c2e909c39d..6bb68fd91c64 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6351,7 +6351,7 @@ static inline unsigned long task_util(struct task_struct *p); #ifdef CONFIG_SCHED_TUNE struct reciprocal_value schedtune_spc_rdiv; -static long +long schedtune_margin(unsigned long signal, long boost) { long long margin = 0;