cpufreq_stats: Fix stats leak during update policy
authorJason Hrycay <jason.hrycay@motorola.com>
Fri, 22 Mar 2019 21:01:50 +0000 (22:01 +0100)
committerDanny Wood <danwood76@gmail.com>
Sun, 31 Mar 2019 08:48:12 +0000 (09:48 +0100)
When the cpufreq policy is moved from one CPU to another, the percpu
stats_table is overwritten and leaked. Properly free the old stats table
and ensure its protected in the non-sysfs paths of update policy and
acct_update_power. The sysfs entries are removed in the cpufreq core
driver before migrating the policy. We introduce a new spinlock
specifically for these operations to avoid needed to convert all the
other spinlocks into the irq safe variants since acct_update_power is
typically called in ISR context.

[ported to apq8084-common by Corinna Vinschen <xda@vinschen.de>]

Change-Id: I95ff24c07834065cd0fd3c763a488a9843097a1d
Signed-off-by: Jason Hrycay <jason.hrycay@motorola.com>
Reviewed-on: https://gerrit.mot.com/921752
SLTApproved: Slta Waiver <sltawvr@motorola.com>
SME-Granted: SME Approvals Granted
Reviewed-by: Igor Kovalenko <igork@motorola.com>
drivers/cpufreq/cpufreq_stats.c

index 5cc21e636664b5478e03aebdd5b8a09bcc8734d7..977916bf37f1d4da6a123c970a9bd1e72fbeaf51 100644 (file)
@@ -40,6 +40,7 @@ DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
 
 static spinlock_t cpufreq_stats_lock;
 
+static DEFINE_SPINLOCK(cpufreq_stats_table_lock);
 static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
 static DEFINE_RT_MUTEX(uid_lock); /* uid_hash_table */
 
@@ -354,14 +355,16 @@ void acct_update_power(struct task_struct *task, cputime_t cputime) {
        int cpu_freq_i;
        int all_freq_i;
        unsigned long flags;
+       unsigned long stl_flags;
 
        if (!task)
                return;
 
        cpu_num = task_cpu(task);
+       spin_lock_irqsave(&cpufreq_stats_table_lock, stl_flags);
        stats = per_cpu(cpufreq_stats_table, cpu_num);
        if (!stats)
-               return;
+               goto out;
 
        all_freq_i = atomic_read(&stats->all_freq_i);
 
@@ -382,15 +385,18 @@ void acct_update_power(struct task_struct *task, cputime_t cputime) {
 
        powerstats = per_cpu(cpufreq_power_stats, cpu_num);
        if (!powerstats)
-               return;
+               goto out;
 
        cpu_freq_i = atomic_read(&stats->cpu_freq_i);
        if (cpu_freq_i == -1)
-               return;
+               goto out;
 
        curr = powerstats->curr[cpu_freq_i];
        if (task->cpu_power != ULLONG_MAX)
                task->cpu_power += curr * cputime_to_usecs(cputime);
+
+out:
+       spin_unlock_irqrestore(&cpufreq_stats_table_lock, stl_flags);
 }
 EXPORT_SYMBOL_GPL(acct_update_power);
 
@@ -716,21 +722,26 @@ error_get_fail:
 
 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
 {
+       struct cpufreq_stats *old;
        struct cpufreq_stats *stat;
+       unsigned long flags;
 
-       pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
-                       policy->cpu, policy->last_cpu);
-       stat = per_cpu(cpufreq_stats_table, policy->cpu);
-       if (stat) {
-               kfree(stat->time_in_state);
-               kfree(stat);
+       spin_lock_irqsave(&cpufreq_stats_table_lock, flags);
+       old = per_cpu(cpufreq_stats_table, policy->cpu);
+       stat = per_cpu(cpufreq_stats_table, policy->last_cpu);
+
+       if (old) {
+               kfree(old->time_in_state);
+               kfree(old);
        }
 
-       stat = per_cpu(cpufreq_stats_table, policy->last_cpu);
+       pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
+                       policy->cpu, policy->last_cpu);
        per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
                        policy->last_cpu);
        per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
        stat->cpu = policy->cpu;
+       spin_unlock_irqrestore(&cpufreq_stats_table_lock, flags);
 }
 
 static void cpufreq_powerstats_create(unsigned int cpu,