static spinlock_t cpufreq_stats_lock;
+static DEFINE_SPINLOCK(cpufreq_stats_table_lock);
static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
static DEFINE_RT_MUTEX(uid_lock); /* uid_hash_table */
int cpu_freq_i;
int all_freq_i;
unsigned long flags;
+ unsigned long stl_flags;
if (!task)
return;
cpu_num = task_cpu(task);
+ spin_lock_irqsave(&cpufreq_stats_table_lock, stl_flags);
stats = per_cpu(cpufreq_stats_table, cpu_num);
if (!stats)
- return;
+ goto out;
all_freq_i = atomic_read(&stats->all_freq_i);
powerstats = per_cpu(cpufreq_power_stats, cpu_num);
if (!powerstats)
- return;
+ goto out;
cpu_freq_i = atomic_read(&stats->cpu_freq_i);
if (cpu_freq_i == -1)
- return;
+ goto out;
curr = powerstats->curr[cpu_freq_i];
if (task->cpu_power != ULLONG_MAX)
task->cpu_power += curr * cputime_to_usecs(cputime);
+
+out:
+ spin_unlock_irqrestore(&cpufreq_stats_table_lock, stl_flags);
}
EXPORT_SYMBOL_GPL(acct_update_power);
static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
{
+ struct cpufreq_stats *old;
struct cpufreq_stats *stat;
+ unsigned long flags;
- pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
- policy->cpu, policy->last_cpu);
- stat = per_cpu(cpufreq_stats_table, policy->cpu);
- if (stat) {
- kfree(stat->time_in_state);
- kfree(stat);
+ spin_lock_irqsave(&cpufreq_stats_table_lock, flags);
+ old = per_cpu(cpufreq_stats_table, policy->cpu);
+ stat = per_cpu(cpufreq_stats_table, policy->last_cpu);
+
+ if (old) {
+ kfree(old->time_in_state);
+ kfree(old);
}
- stat = per_cpu(cpufreq_stats_table, policy->last_cpu);
+ pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
+ policy->cpu, policy->last_cpu);
per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
policy->last_cpu);
per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
stat->cpu = policy->cpu;
+ spin_unlock_irqrestore(&cpufreq_stats_table_lock, flags);
}
static void cpufreq_powerstats_create(unsigned int cpu,