sched/fair: Move cpufreq hook to update_cfs_rq_load_avg()
authorSteve Muckle <steve.muckle@linaro.org>
Tue, 22 Mar 2016 00:21:07 +0000 (17:21 -0700)
committerIngo Molnar <mingo@kernel.org>
Sat, 23 Apr 2016 12:20:35 +0000 (14:20 +0200)
The cpufreq hook should be called whenever the root cfs_rq
utilization changes so update_cfs_rq_load_avg() is a better
place for it. The current location is not invoked in the
enqueue_entity() or update_blocked_averages() paths.

Suggested-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Steve Muckle <smuckle@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <Juri.Lelli@arm.com>
Cc: Michael Turquette <mturquette@baylibre.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rafael J. Wysocki <rafael@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1458606068-7476-1-git-send-email-smuckle@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c

index 6e371f43fc804b0c6598e8d98e401f5d898c45b5..6df80d47a52500c24312e2eaec30540bb7c33baa 100644 (file)
@@ -2878,7 +2878,9 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 {
        struct sched_avg *sa = &cfs_rq->avg;
+       struct rq *rq = rq_of(cfs_rq);
        int decayed, removed = 0;
+       int cpu = cpu_of(rq);
 
        if (atomic_long_read(&cfs_rq->removed_load_avg)) {
                s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
@@ -2893,7 +2895,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
                sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
        }
 
-       decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
+       decayed = __update_load_avg(now, cpu, sa,
                scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
 
 #ifndef CONFIG_64BIT
@@ -2901,28 +2903,6 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
        cfs_rq->load_last_update_time_copy = sa->last_update_time;
 #endif
 
-       return decayed || removed;
-}
-
-/* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct sched_entity *se, int update_tg)
-{
-       struct cfs_rq *cfs_rq = cfs_rq_of(se);
-       u64 now = cfs_rq_clock_task(cfs_rq);
-       struct rq *rq = rq_of(cfs_rq);
-       int cpu = cpu_of(rq);
-
-       /*
-        * Track task load average for carrying it to new CPU after migrated, and
-        * track group sched_entity load average for task_h_load calc in migration
-        */
-       __update_load_avg(now, cpu, &se->avg,
-                         se->on_rq * scale_load_down(se->load.weight),
-                         cfs_rq->curr == se, NULL);
-
-       if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
-               update_tg_load_avg(cfs_rq, 0);
-
        if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) {
                unsigned long max = rq->cpu_capacity_orig;
 
@@ -2943,8 +2923,30 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
                 * See cpu_util().
                 */
                cpufreq_update_util(rq_clock(rq),
-                                   min(cfs_rq->avg.util_avg, max), max);
+                                   min(sa->util_avg, max), max);
        }
+
+       return decayed || removed;
+}
+
+/* Update task and its cfs_rq load average */
+static inline void update_load_avg(struct sched_entity *se, int update_tg)
+{
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       u64 now = cfs_rq_clock_task(cfs_rq);
+       struct rq *rq = rq_of(cfs_rq);
+       int cpu = cpu_of(rq);
+
+       /*
+        * Track task load average for carrying it to new CPU after migrated, and
+        * track group sched_entity load average for task_h_load calc in migration
+        */
+       __update_load_avg(now, cpu, &se->avg,
+                         se->on_rq * scale_load_down(se->load.weight),
+                         cfs_rq->curr == se, NULL);
+
+       if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
+               update_tg_load_avg(cfs_rq, 0);
 }
 
 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)