From: Johnlay Park Date: Wed, 28 Feb 2018 13:06:47 +0000 (+0900) Subject: [COMMON] sched/rt: hooking cpufreq callback X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=9f362c60332650d47f361d5c1740843bc6b60b03;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git [COMMON] sched/rt: hooking cpufreq callback The cpufreq hook should be called any time the root RT rq utilization changes. This can occur when a task is switched to or from the rt class, or a task moves between groups or CPUs, but these paths currently do not call the cpufreq hook. Refer I9e6ef6f34157d8bd51f385c5340756090ec050fe Change-Id: Ibe6b36c133aa7b90962cb188a0b95fcd779e9d2a Signed-off-by: Johnlay Park --- diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index cd02c8a30136..ba6c2b1f9f60 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3402,7 +3402,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running) { - int ret; + int decayed, removed_util = 0; struct sched_avg *sa = &rt_rq->avg; if (atomic_long_read(&rt_rq->removed_load_avg)) { @@ -3418,19 +3418,23 @@ int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running) long r = atomic_long_xchg(&rt_rq->removed_util_avg, 0); sub_positive(&sa->util_avg, r); sub_positive(&sa->util_sum, r * LOAD_AVG_MAX); + removed_util = 1; #ifdef CONFIG_RT_GROUP_SCHED rt_rq->propagate_avg = 1; #endif } - ret = ___update_load_avg(now, cpu, sa, 0, running, NULL, rt_rq); + decayed = ___update_load_avg(now, cpu, sa, 0, running, NULL, rt_rq); #ifndef CONFIG_64BIT smp_wmb(); rt_rq->load_last_update_time_copy = sa->last_update_time; #endif - return ret; + if (running && (decayed || removed_util)) + rt_rq_util_change(rt_rq); + + return decayed; } unsigned long sched_get_rt_rq_util(int cpu) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c3e566eaa215..9fff152b84ef 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1302,6 +1302,7 @@ attach_rt_entity_load_avg(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) #ifdef CONFIG_RT_GROUP_SCHED rt_rq->propagate_avg = 1; #endif + rt_rq_util_change(rt_rq); } static void @@ -1314,6 +1315,7 @@ detach_rt_entity_load_avg(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) #ifdef CONFIG_RT_GROUP_SCHED rt_rq->propagate_avg = 1; #endif + rt_rq_util_change(rt_rq); } #else static inline void @@ -1956,6 +1958,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) } #ifdef CONFIG_SMP + +void rt_rq_util_change(struct rt_rq *rt_rq) +{ + if (&this_rq()->rt == rt_rq) + cpufreq_update_util(rt_rq->rq, SCHED_CPUFREQ_RT); +} + #ifdef CONFIG_RT_GROUP_SCHED /* Take into account change of utilization of a child task group */ static inline void diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index dee30713798f..ebbb212725ce 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1777,7 +1777,7 @@ static inline int hrtick_enabled(struct rq *rq) #ifdef CONFIG_SMP extern void sched_avg_update(struct rq *rq); extern unsigned long sched_get_rt_rq_util(int cpu); - +extern void rt_rq_util_change(struct rt_rq *rt_rq); #ifndef arch_scale_freq_capacity static __always_inline unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)