DEBUG: sched,cpufreq: add cpu_capacity change tracepoint
authorJuri Lelli <juri.lelli@arm.com>
Thu, 30 Apr 2015 16:35:23 +0000 (17:35 +0100)
committerLeo Yan <leo.yan@linaro.org>
Tue, 10 May 2016 08:54:42 +0000 (16:54 +0800)
This is useful when we want to compare cpu utilization and
cpu curr capacity side by side.

Signed-off-by: Juri Lelli <juri.lelli@arm.com>
drivers/cpufreq/cpufreq.c
include/linux/sched.h
include/trace/events/power.h
kernel/sched/fair.c
kernel/sched/sched.h

index 5ca8e9517aae40c66fff3c0244feeb3070ce5634..0f30d11fb528e3dd64b47832892717cc97010cd8 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
 #include <linux/tick.h>
+#include <linux/sched.h>
 #include <trace/events/power.h>
 
 static LIST_HEAD(cpufreq_policy_list);
@@ -473,6 +474,7 @@ static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
                struct cpufreq_freqs *freqs)
 {
+       int cpu;
 
        /*
         * Catch double invocations of _begin() which lead to self-deadlock.
@@ -501,6 +503,8 @@ wait:
        spin_unlock(&policy->transition_lock);
 
        scale_freq_capacity(policy, freqs);
+       for_each_cpu(cpu, policy->cpus)
+               trace_cpu_capacity(capacity_curr_of(cpu), cpu);
 
        cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
 }
index 1460c6a4f65f3aaf05d034285dffc5e778c3bd62..b9b11b2dbabd0c0df671b37a0fa6070d93993a60 100644 (file)
@@ -1046,6 +1046,8 @@ struct sched_group_energy {
        struct capacity_state *cap_states; /* ptr to capacity state array */
 };
 
+unsigned long capacity_curr_of(int cpu);
+
 struct sched_group;
 
 struct sched_domain {
index 284244ebfe8d2c3ef7036c3a7e9b55ea15b11078..f4be04e4425225eb6bc4c5f61f6905bda4813f37 100644 (file)
@@ -120,6 +120,13 @@ DEFINE_EVENT(cpu, cpu_frequency,
        TP_ARGS(frequency, cpu_id)
 );
 
+DEFINE_EVENT(cpu, cpu_capacity,
+
+       TP_PROTO(unsigned int capacity, unsigned int cpu_id),
+
+       TP_ARGS(capacity, cpu_id)
+);
+
 TRACE_EVENT(device_pm_callback_start,
 
        TP_PROTO(struct device *dev, const char *pm_ops, int event),
index b5ea9ec335d841fde7a55cec16cb2bbf62ec4df6..9b74b86aba3aadd1b5f364c482db16b998469e0a 100644 (file)
@@ -4698,6 +4698,17 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
 
 #endif
 
+/*
+ * Returns the current capacity of cpu after applying both
+ * cpu and freq scaling.
+ */
+unsigned long capacity_curr_of(int cpu)
+{
+       return cpu_rq(cpu)->cpu_capacity_orig *
+              arch_scale_freq_capacity(NULL, cpu)
+              >> SCHED_CAPACITY_SHIFT;
+}
+
 static inline bool energy_aware(void)
 {
        return sched_feat(ENERGY_AWARE);
index 983c132af11b498535b2f32505b11f2cd4b9d767..db3f3db9849c841cf2dbf91d65828cad85500760 100644 (file)
@@ -1512,17 +1512,6 @@ static inline unsigned long cpu_util(int cpu)
        return __cpu_util(cpu, 0);
 }
 
-/*
- * Returns the current capacity of cpu after applying both
- * cpu and freq scaling.
- */
-static inline unsigned long capacity_curr_of(int cpu)
-{
-       return cpu_rq(cpu)->cpu_capacity_orig *
-              arch_scale_freq_capacity(NULL, cpu)
-              >> SCHED_CAPACITY_SHIFT;
-}
-
 #endif
 
 #ifdef CONFIG_CPU_FREQ_GOV_SCHED