[COMMON] sched/rt: hooking cpufreq callback
authorJohnlay Park <jonglae.park@samsung.com>
Wed, 28 Feb 2018 13:06:47 +0000 (22:06 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:33:15 +0000 (17:33 +0900)
The cpufreq hook should be called any time the root RT rq utilization
changes. This can occur when a task is switched to or from the rt
class, or a task moves between groups or CPUs, but these paths
currently do not call the cpufreq hook.

Refer I9e6ef6f34157d8bd51f385c5340756090ec050fe

Change-Id: Ibe6b36c133aa7b90962cb188a0b95fcd779e9d2a
Signed-off-by: Johnlay Park <jonglae.park@samsung.com>
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h

index cd02c8a3013607f837c88acd5f27e272c287703d..ba6c2b1f9f6044ba49ff318a2927adb3d0132166 100644 (file)
@@ -3402,7 +3402,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 
 int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running)
 {
-       int ret;
+       int decayed, removed_util = 0;
        struct sched_avg *sa = &rt_rq->avg;
 
        if (atomic_long_read(&rt_rq->removed_load_avg)) {
@@ -3418,19 +3418,23 @@ int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running)
                long r = atomic_long_xchg(&rt_rq->removed_util_avg, 0);
                sub_positive(&sa->util_avg, r);
                sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
+               removed_util = 1;
 #ifdef CONFIG_RT_GROUP_SCHED
                rt_rq->propagate_avg = 1;
 #endif
        }
 
-       ret = ___update_load_avg(now, cpu, sa, 0, running, NULL, rt_rq);
+       decayed = ___update_load_avg(now, cpu, sa, 0, running, NULL, rt_rq);
 
 #ifndef CONFIG_64BIT
        smp_wmb();
        rt_rq->load_last_update_time_copy = sa->last_update_time;
 #endif
 
-       return ret;
+       if (running && (decayed || removed_util))
+               rt_rq_util_change(rt_rq);
+
+       return decayed;
 }
 
 unsigned long sched_get_rt_rq_util(int cpu)
index c3e566eaa215d49f48805e175acfb4d073a41ba6..9fff152b84efedfe5619ae7fdb5db77698fa0074 100644 (file)
@@ -1302,6 +1302,7 @@ attach_rt_entity_load_avg(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
 #ifdef CONFIG_RT_GROUP_SCHED
        rt_rq->propagate_avg = 1;
 #endif
+       rt_rq_util_change(rt_rq);
 }
 
 static void
@@ -1314,6 +1315,7 @@ detach_rt_entity_load_avg(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
 #ifdef CONFIG_RT_GROUP_SCHED
        rt_rq->propagate_avg = 1;
 #endif
+       rt_rq_util_change(rt_rq);
 }
 #else
 static inline void
@@ -1956,6 +1958,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 }
 
 #ifdef CONFIG_SMP
+
+void rt_rq_util_change(struct rt_rq *rt_rq)
+{
+       if (&this_rq()->rt == rt_rq)
+               cpufreq_update_util(rt_rq->rq, SCHED_CPUFREQ_RT);
+}
+
 #ifdef CONFIG_RT_GROUP_SCHED
 /* Take into account change of utilization of a child task group */
 static inline void
index dee30713798fef74dceaad9fe03fddd89ff8679e..ebbb212725ce96c347e1b67688b7a06f098ecd91 100644 (file)
@@ -1777,7 +1777,7 @@ static inline int hrtick_enabled(struct rq *rq)
 #ifdef CONFIG_SMP
 extern void sched_avg_update(struct rq *rq);
 extern unsigned long sched_get_rt_rq_util(int cpu);
-
+extern void rt_rq_util_change(struct rt_rq *rt_rq);
 #ifndef arch_scale_freq_capacity
 static __always_inline
 unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)