int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running)
{
- int ret;
+ int decayed, removed_util = 0;
struct sched_avg *sa = &rt_rq->avg;
if (atomic_long_read(&rt_rq->removed_load_avg)) {
long r = atomic_long_xchg(&rt_rq->removed_util_avg, 0);
sub_positive(&sa->util_avg, r);
sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
+ removed_util = 1;
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->propagate_avg = 1;
#endif
}
- ret = ___update_load_avg(now, cpu, sa, 0, running, NULL, rt_rq);
+ decayed = ___update_load_avg(now, cpu, sa, 0, running, NULL, rt_rq);
#ifndef CONFIG_64BIT
smp_wmb();
rt_rq->load_last_update_time_copy = sa->last_update_time;
#endif
- return ret;
+ if (running && (decayed || removed_util))
+ rt_rq_util_change(rt_rq);
+
+ return decayed;
}
unsigned long sched_get_rt_rq_util(int cpu)
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->propagate_avg = 1;
#endif
+ rt_rq_util_change(rt_rq);
}
static void
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->propagate_avg = 1;
#endif
+ rt_rq_util_change(rt_rq);
}
#else
static inline void
}
#ifdef CONFIG_SMP
+
+void rt_rq_util_change(struct rt_rq *rt_rq)
+{
+ if (&this_rq()->rt == rt_rq)
+ cpufreq_update_util(rt_rq->rq, SCHED_CPUFREQ_RT);
+}
+
#ifdef CONFIG_RT_GROUP_SCHED
/* Take into account change of utilization of a child task group */
static inline void
#ifdef CONFIG_SMP
extern void sched_avg_update(struct rq *rq);
extern unsigned long sched_get_rt_rq_util(int cpu);
-
+extern void rt_rq_util_change(struct rt_rq *rt_rq);
#ifndef arch_scale_freq_capacity
static __always_inline
unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)