battery: sec_battery: export {CURRENT/VOLTAGE}_MAX to sysfs
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / cpufreq / cpufreq_stats.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/cpufreq/cpufreq_stats.c
3 *
4 * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
0a829c5a 5 * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12473bd6 12#include <linux/atomic.h>
1da177e4 13#include <linux/kernel.h>
5a0e3ad6 14#include <linux/slab.h>
1da177e4
LT
15#include <linux/cpu.h>
16#include <linux/sysfs.h>
17#include <linux/cpufreq.h>
12473bd6 18#include <linux/hashtable.h>
5c720d37 19#include <linux/module.h>
1da177e4
LT
20#include <linux/jiffies.h>
21#include <linux/percpu.h>
22#include <linux/kobject.h>
23#include <linux/spinlock.h>
c32b6b8e 24#include <linux/notifier.h>
3c2a0909
S
25#include <linux/sort.h>
26#include <linux/err.h>
b630eed8 27#include <linux/of.h>
12473bd6
AO
28#include <linux/proc_fs.h>
29#include <linux/profile.h>
a8ef53f5 30#include <linux/sched.h>
12473bd6 31#include <linux/seq_file.h>
58f1df25 32#include <asm/cputime.h>
3c2a0909
S
33#ifdef CONFIG_BL_SWITCHER
34#include <asm/bL_switcher.h>
35#endif
1da177e4 36
12473bd6
AO
37#define UID_HASH_BITS 10
38
39DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
40
1da177e4
LT
41static spinlock_t cpufreq_stats_lock;
42
630ff4b6 43static DEFINE_SPINLOCK(cpufreq_stats_table_lock);
6d52c949 44static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
12473bd6
AO
45static DEFINE_RT_MUTEX(uid_lock); /* uid_hash_table */
46
47struct uid_entry {
48 uid_t uid;
49 unsigned int dead_max_states;
50 unsigned int alive_max_states;
51 u64 *dead_time_in_state;
52 u64 *alive_time_in_state;
53 struct hlist_node hash;
54};
55
1da177e4
LT
56struct cpufreq_stats {
57 unsigned int cpu;
58 unsigned int total_trans;
58f1df25 59 unsigned long long last_time;
1da177e4
LT
60 unsigned int max_state;
61 unsigned int state_num;
12473bd6
AO
62 atomic_t cpu_freq_i;
63 atomic_t all_freq_i;
1e7586a1 64 u64 *time_in_state;
1da177e4
LT
65 unsigned int *freq_table;
66#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
67 unsigned int *trans_table;
68#endif
69};
70
b630eed8
RK
71struct cpufreq_power_stats {
72 unsigned int state_num;
73 unsigned int *curr;
74 unsigned int *freq_table;
75};
76
3c2a0909
S
77struct all_cpufreq_stats {
78 unsigned int state_num;
79 cputime64_t *time_in_state;
80 unsigned int *freq_table;
81};
82
83struct all_freq_table {
84 unsigned int *freq_table;
85 unsigned int table_size;
86};
87
88static struct all_freq_table *all_freq_table;
12473bd6 89static bool cpufreq_all_freq_init;
3c2a0909
S
90
91static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
7a6aedfa 92static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
3c2a0909 93static DEFINE_PER_CPU(struct cpufreq_stats *, prev_cpufreq_stats_table);
b630eed8 94static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);
1da177e4
LT
95
96struct cpufreq_stats_attribute {
97 struct attribute attr;
98 ssize_t(*show) (struct cpufreq_stats *, char *);
99};
100
12473bd6
AO
101/* Caller must hold uid lock */
102static struct uid_entry *find_uid_entry(uid_t uid)
103{
104 struct uid_entry *uid_entry;
105
106 hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) {
107 if (uid_entry->uid == uid)
108 return uid_entry;
109 }
110 return NULL;
111}
112
113/* Caller must hold uid lock */
114static struct uid_entry *find_or_register_uid(uid_t uid)
115{
116 struct uid_entry *uid_entry;
117
118 uid_entry = find_uid_entry(uid);
119 if (uid_entry)
120 return uid_entry;
121
122 uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
123 if (!uid_entry)
124 return NULL;
125
126 uid_entry->uid = uid;
127
128 hash_add(uid_hash_table, &uid_entry->hash, uid);
129
130 return uid_entry;
131}
132
133
134static int uid_time_in_state_show(struct seq_file *m, void *v)
135{
136 struct uid_entry *uid_entry;
137 struct task_struct *task, *temp;
6d52c949 138 unsigned long bkt, flags;
12473bd6
AO
139 int i;
140
141 if (!all_freq_table || !cpufreq_all_freq_init)
142 return 0;
143
144 seq_puts(m, "uid:");
145 for (i = 0; i < all_freq_table->table_size; ++i)
146 seq_printf(m, " %d", all_freq_table->freq_table[i]);
147 seq_putc(m, '\n');
148
149 rt_mutex_lock(&uid_lock);
150
151 rcu_read_lock();
152 do_each_thread(temp, task) {
12473bd6
AO
153
154 uid_entry = find_or_register_uid(from_kuid_munged(
155 current_user_ns(), task_uid(task)));
156 if (!uid_entry)
157 continue;
158
159 if (uid_entry->alive_max_states < task->max_states) {
160 uid_entry->alive_time_in_state = krealloc(
161 uid_entry->alive_time_in_state,
162 task->max_states *
163 sizeof(uid_entry->alive_time_in_state[0]),
164 GFP_ATOMIC);
165 memset(uid_entry->alive_time_in_state +
166 uid_entry->alive_max_states,
167 0, (task->max_states -
168 uid_entry->alive_max_states) *
169 sizeof(uid_entry->alive_time_in_state[0]));
170 uid_entry->alive_max_states = task->max_states;
171 }
172
6d52c949
AO
173 spin_lock_irqsave(&task_time_in_state_lock, flags);
174 if (task->time_in_state) {
175 for (i = 0; i < task->max_states; ++i) {
176 uid_entry->alive_time_in_state[i] +=
177 atomic_read(&task->time_in_state[i]);
178 }
12473bd6 179 }
6d52c949
AO
180 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
181
12473bd6
AO
182 } while_each_thread(temp, task);
183 rcu_read_unlock();
184
185 hash_for_each(uid_hash_table, bkt, uid_entry, hash) {
186 int max_states = uid_entry->dead_max_states;
187
188 if (uid_entry->alive_max_states > max_states)
189 max_states = uid_entry->alive_max_states;
190 if (max_states)
191 seq_printf(m, "%d:", uid_entry->uid);
192 for (i = 0; i < max_states; ++i) {
193 u64 total_time_in_state = 0;
194
195 if (uid_entry->dead_time_in_state &&
196 i < uid_entry->dead_max_states) {
197 total_time_in_state =
198 uid_entry->dead_time_in_state[i];
199 }
200 if (uid_entry->alive_time_in_state &&
201 i < uid_entry->alive_max_states) {
202 total_time_in_state +=
203 uid_entry->alive_time_in_state[i];
204 }
205 seq_printf(m, " %lu", (unsigned long)
206 cputime_to_clock_t(total_time_in_state));
207 }
208 if (max_states)
209 seq_putc(m, '\n');
210
211 kfree(uid_entry->alive_time_in_state);
212 uid_entry->alive_time_in_state = NULL;
213 uid_entry->alive_max_states = 0;
214 }
215
216 rt_mutex_unlock(&uid_lock);
217 return 0;
218}
219
0a829c5a 220static int cpufreq_stats_update(unsigned int cpu)
1da177e4
LT
221{
222 struct cpufreq_stats *stat;
3c2a0909 223 struct all_cpufreq_stats *all_stat;
58f1df25
VP
224 unsigned long long cur_time;
225
226 cur_time = get_jiffies_64();
1da177e4 227 spin_lock(&cpufreq_stats_lock);
7a6aedfa 228 stat = per_cpu(cpufreq_stats_table, cpu);
3c2a0909
S
229 all_stat = per_cpu(all_cpufreq_stats, cpu);
230 if (!stat) {
231 spin_unlock(&cpufreq_stats_lock);
232 return 0;
233 }
234 if (stat->time_in_state) {
12473bd6
AO
235 int cpu_freq_i = atomic_read(&stat->cpu_freq_i);
236
237 stat->time_in_state[cpu_freq_i] += cur_time - stat->last_time;
3c2a0909 238 if (all_stat)
12473bd6 239 all_stat->time_in_state[cpu_freq_i] +=
3c2a0909
S
240 cur_time - stat->last_time;
241 }
58f1df25 242 stat->last_time = cur_time;
1da177e4
LT
243 spin_unlock(&cpufreq_stats_lock);
244 return 0;
245}
246
12473bd6
AO
247void cpufreq_task_stats_init(struct task_struct *p)
248{
249 size_t alloc_size;
6d52c949
AO
250 void *temp;
251 unsigned long flags;
12473bd6 252
6d52c949
AO
253 spin_lock_irqsave(&task_time_in_state_lock, flags);
254 p->time_in_state = NULL;
255 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
12473bd6
AO
256 WRITE_ONCE(p->max_states, 0);
257
258 if (!all_freq_table || !cpufreq_all_freq_init)
259 return;
260
261 WRITE_ONCE(p->max_states, all_freq_table->table_size);
262
263 /* Create all_freq_table for clockticks in all possible freqs in all
264 * cpus
265 */
266 alloc_size = p->max_states * sizeof(p->time_in_state[0]);
6d52c949 267 temp = kzalloc(alloc_size, GFP_KERNEL);
12473bd6 268
6d52c949
AO
269 spin_lock_irqsave(&task_time_in_state_lock, flags);
270 p->time_in_state = temp;
271 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
12473bd6
AO
272}
273
274void cpufreq_task_stats_exit(struct task_struct *p)
275{
6d52c949
AO
276 unsigned long flags;
277 void *temp;
12473bd6 278
6d52c949
AO
279 spin_lock_irqsave(&task_time_in_state_lock, flags);
280 temp = p->time_in_state;
281 p->time_in_state = NULL;
282 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
12473bd6
AO
283 kfree(temp);
284}
285
286int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
287 struct pid *pid, struct task_struct *p)
288{
289 int i;
6d52c949
AO
290 cputime_t cputime;
291 unsigned long flags;
12473bd6
AO
292
293 if (!all_freq_table || !cpufreq_all_freq_init || !p->time_in_state)
294 return 0;
295
296 spin_lock(&cpufreq_stats_lock);
297 for (i = 0; i < p->max_states; ++i) {
6d52c949
AO
298 cputime = 0;
299 spin_lock_irqsave(&task_time_in_state_lock, flags);
300 if (p->time_in_state)
301 cputime = atomic_read(&p->time_in_state[i]);
302 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
303
12473bd6 304 seq_printf(m, "%d %lu\n", all_freq_table->freq_table[i],
6d52c949 305 (unsigned long)cputime_to_clock_t(cputime));
12473bd6
AO
306 }
307 spin_unlock(&cpufreq_stats_lock);
308
309 return 0;
310}
311
0a829c5a 312static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
1da177e4 313{
7a6aedfa 314 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
511e9ee1 315 if (!stat)
1da177e4
LT
316 return 0;
317 return sprintf(buf, "%d\n",
7a6aedfa 318 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
1da177e4
LT
319}
320
0a829c5a 321static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
1da177e4
LT
322{
323 ssize_t len = 0;
324 int i;
7a6aedfa 325 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
511e9ee1 326 if (!stat)
1da177e4
LT
327 return 0;
328 cpufreq_stats_update(stat->cpu);
329 for (i = 0; i < stat->state_num; i++) {
32ee8c3e 330 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
0a829c5a 331 (unsigned long long)
35c23914 332 jiffies_64_to_clock_t(stat->time_in_state[i]));
1da177e4
LT
333 }
334 return len;
335}
336
3c2a0909
S
337static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
338 unsigned int freq)
339{
340 int i;
341 if (!all_stat)
342 return -1;
343 for (i = 0; i < all_stat->state_num; i++) {
344 if (all_stat->freq_table[i] == freq)
345 return i;
346 }
347 return -1;
348}
349
12473bd6 350/* Called without cpufreq_stats_lock held */
a8ef53f5
RK
351void acct_update_power(struct task_struct *task, cputime_t cputime) {
352 struct cpufreq_power_stats *powerstats;
353 struct cpufreq_stats *stats;
354 unsigned int cpu_num, curr;
12473bd6
AO
355 int cpu_freq_i;
356 int all_freq_i;
6d52c949 357 unsigned long flags;
630ff4b6 358 unsigned long stl_flags;
a8ef53f5
RK
359
360 if (!task)
361 return;
12473bd6 362
a8ef53f5 363 cpu_num = task_cpu(task);
630ff4b6 364 spin_lock_irqsave(&cpufreq_stats_table_lock, stl_flags);
a8ef53f5 365 stats = per_cpu(cpufreq_stats_table, cpu_num);
12473bd6 366 if (!stats)
630ff4b6 367 goto out;
a8ef53f5 368
12473bd6 369 all_freq_i = atomic_read(&stats->all_freq_i);
12473bd6
AO
370
371 /* This function is called from a different context
372 * Interruptions in between reads/assignements are ok
373 */
6d52c949 374 if (all_freq_table && cpufreq_all_freq_init &&
12473bd6
AO
375 !(task->flags & PF_EXITING) &&
376 all_freq_i != -1 && all_freq_i < READ_ONCE(task->max_states)) {
6d52c949
AO
377
378 spin_lock_irqsave(&task_time_in_state_lock, flags);
379 if (task->time_in_state) {
380 atomic64_add(cputime,
381 &task->time_in_state[all_freq_i]);
382 }
383 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
12473bd6
AO
384 }
385
386 powerstats = per_cpu(cpufreq_power_stats, cpu_num);
387 if (!powerstats)
630ff4b6 388 goto out;
12473bd6
AO
389
390 cpu_freq_i = atomic_read(&stats->cpu_freq_i);
391 if (cpu_freq_i == -1)
630ff4b6 392 goto out;
12473bd6
AO
393
394 curr = powerstats->curr[cpu_freq_i];
1c1646e5
RK
395 if (task->cpu_power != ULLONG_MAX)
396 task->cpu_power += curr * cputime_to_usecs(cputime);
630ff4b6
JH
397
398out:
399 spin_unlock_irqrestore(&cpufreq_stats_table_lock, stl_flags);
a8ef53f5
RK
400}
401EXPORT_SYMBOL_GPL(acct_update_power);
402
b630eed8
RK
403static ssize_t show_current_in_state(struct kobject *kobj,
404 struct kobj_attribute *attr, char *buf)
405{
406 ssize_t len = 0;
407 unsigned int i, cpu;
408 struct cpufreq_power_stats *powerstats;
409
410 spin_lock(&cpufreq_stats_lock);
411 for_each_possible_cpu(cpu) {
412 powerstats = per_cpu(cpufreq_power_stats, cpu);
413 if (!powerstats)
414 continue;
415 len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
416 for (i = 0; i < powerstats->state_num; i++)
417 len += scnprintf(buf + len, PAGE_SIZE - len,
418 "%d=%d ", powerstats->freq_table[i],
419 powerstats->curr[i]);
420 len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
421 }
422 spin_unlock(&cpufreq_stats_lock);
423 return len;
424}
425
3c2a0909
S
426static ssize_t show_all_time_in_state(struct kobject *kobj,
427 struct kobj_attribute *attr, char *buf)
428{
429 ssize_t len = 0;
430 unsigned int i, cpu, freq, index;
431 struct all_cpufreq_stats *all_stat;
432 struct cpufreq_policy *policy;
433
434 len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
435 for_each_possible_cpu(cpu) {
436 len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
437 if (cpu_online(cpu))
438 cpufreq_stats_update(cpu);
439 }
440
441 if (!all_freq_table)
442 goto out;
443 for (i = 0; i < all_freq_table->table_size; i++) {
444 freq = all_freq_table->freq_table[i];
445 len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
446 for_each_possible_cpu(cpu) {
447 policy = cpufreq_cpu_get(cpu);
448 if (policy == NULL)
449 continue;
450 all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
451 index = get_index_all_cpufreq_stat(all_stat, freq);
452 if (index != -1) {
453 len += scnprintf(buf + len, PAGE_SIZE - len,
12473bd6 454 "%lu\t\t", (unsigned long)
3c2a0909
S
455 cputime64_to_clock_t(all_stat->time_in_state[index]));
456 } else {
457 len += scnprintf(buf + len, PAGE_SIZE - len,
458 "N/A\t\t");
459 }
460 cpufreq_cpu_put(policy);
461 }
462 }
463
464out:
465 len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
466 return len;
467}
468
1da177e4 469#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
0a829c5a 470static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
1da177e4
LT
471{
472 ssize_t len = 0;
473 int i, j;
474
7a6aedfa 475 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
511e9ee1 476 if (!stat)
1da177e4
LT
477 return 0;
478 cpufreq_stats_update(stat->cpu);
58f1df25
VP
479 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
480 len += snprintf(buf + len, PAGE_SIZE - len, " : ");
481 for (i = 0; i < stat->state_num; i++) {
482 if (len >= PAGE_SIZE)
483 break;
484 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
485 stat->freq_table[i]);
486 }
487 if (len >= PAGE_SIZE)
25aca347 488 return PAGE_SIZE;
58f1df25
VP
489
490 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
491
1da177e4
LT
492 for (i = 0; i < stat->state_num; i++) {
493 if (len >= PAGE_SIZE)
494 break;
58f1df25
VP
495
496 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
1da177e4
LT
497 stat->freq_table[i]);
498
499 for (j = 0; j < stat->state_num; j++) {
500 if (len >= PAGE_SIZE)
501 break;
58f1df25 502 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
1da177e4
LT
503 stat->trans_table[i*stat->max_state+j]);
504 }
25aca347
CEB
505 if (len >= PAGE_SIZE)
506 break;
1da177e4
LT
507 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
508 }
25aca347
CEB
509 if (len >= PAGE_SIZE)
510 return PAGE_SIZE;
1da177e4
LT
511 return len;
512}
df18e504 513cpufreq_freq_attr_ro(trans_table);
1da177e4
LT
514#endif
515
df18e504
VK
516cpufreq_freq_attr_ro(total_trans);
517cpufreq_freq_attr_ro(time_in_state);
1da177e4
LT
518
519static struct attribute *default_attrs[] = {
df18e504
VK
520 &total_trans.attr,
521 &time_in_state.attr,
1da177e4 522#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
df18e504 523 &trans_table.attr,
1da177e4
LT
524#endif
525 NULL
526};
527static struct attribute_group stats_attr_group = {
528 .attrs = default_attrs,
529 .name = "stats"
530};
531
3c2a0909
S
532static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
533 0444, show_all_time_in_state, NULL);
534
b630eed8
RK
535static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
536 0444, show_current_in_state, NULL);
537
0a829c5a 538static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
1da177e4
LT
539{
540 int index;
541 for (index = 0; index < stat->max_state; index++)
542 if (stat->freq_table[index] == freq)
543 return index;
544 return -1;
545}
546
98586ed8 547/* should be called late in the CPU removal sequence so that the stats
548 * memory is still available in case someone tries to use it.
549 */
a3323473 550static void cpufreq_stats_free_table(unsigned int cpu)
1da177e4 551{
7a6aedfa 552 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
3c2a0909
S
553 struct cpufreq_stats *prev_stat = per_cpu(prev_cpufreq_stats_table, cpu);
554 unsigned int alloc_size;
b8eed8af 555
1da177e4 556 if (stat) {
3c2a0909
S
557 prev_stat = kzalloc(sizeof(*stat), GFP_KERNEL);
558 if (!prev_stat) {
559 pr_err("%s: prev_stat kzalloc failed\n", __func__);
560 return;
561 }
562
563 memcpy(prev_stat, stat, sizeof(*stat));
564 per_cpu(prev_cpufreq_stats_table, cpu) = prev_stat;
565
566 alloc_size = stat->max_state * sizeof(int) + stat->max_state * sizeof(u64);
567#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
568 alloc_size = stat->max_state * stat->max_state * sizeof(int);
569#endif
570 prev_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
571 if (!prev_stat->time_in_state) {
572 pr_err("%s: prev_stat time_in_state kzalloc failed\n", __func__);
573 kfree(prev_stat);
574 return;
575 }
576 memcpy(prev_stat->time_in_state, stat->time_in_state, alloc_size);
577
b8eed8af 578 pr_debug("%s: Free stat table\n", __func__);
1da177e4
LT
579 kfree(stat->time_in_state);
580 kfree(stat);
b8eed8af 581 per_cpu(cpufreq_stats_table, cpu) = NULL;
1da177e4 582 }
98586ed8 583}
584
585/* must be called early in the CPU removal sequence (before
586 * cpufreq_remove_dev) so that policy is still valid.
587 */
588static void cpufreq_stats_free_sysfs(unsigned int cpu)
589{
590 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
633d47d6 591
187da1d9 592 if (!policy)
633d47d6
DB
593 return;
594
187da1d9 595 if (!cpufreq_frequency_get_table(cpu))
596 goto put_ref;
597
598 if (!policy_is_shared(policy)) {
b8eed8af 599 pr_debug("%s: Free sysfs stat\n", __func__);
98586ed8 600 sysfs_remove_group(&policy->kobj, &stats_attr_group);
b8eed8af 601 }
187da1d9 602
603put_ref:
604 cpufreq_cpu_put(policy);
1da177e4
LT
605}
606
3c2a0909
S
607static void cpufreq_allstats_free(void)
608{
609 int cpu;
610 struct all_cpufreq_stats *all_stat;
611
612 sysfs_remove_file(cpufreq_global_kobject,
613 &_attr_all_time_in_state.attr);
614
615 for_each_possible_cpu(cpu) {
616 all_stat = per_cpu(all_cpufreq_stats, cpu);
617 if (!all_stat)
618 continue;
619 kfree(all_stat->time_in_state);
620 kfree(all_stat);
621 per_cpu(all_cpufreq_stats, cpu) = NULL;
622 }
623 if (all_freq_table) {
624 kfree(all_freq_table->freq_table);
625 kfree(all_freq_table);
626 all_freq_table = NULL;
627 }
628}
629
b630eed8
RK
630static void cpufreq_powerstats_free(void)
631{
632 int cpu;
633 struct cpufreq_power_stats *powerstats;
634
635 sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);
636
637 for_each_possible_cpu(cpu) {
638 powerstats = per_cpu(cpufreq_power_stats, cpu);
639 if (!powerstats)
640 continue;
641 kfree(powerstats->curr);
642 kfree(powerstats);
643 per_cpu(cpufreq_power_stats, cpu) = NULL;
644 }
645}
646
0a829c5a 647static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
eb67732d 648 int cpu, struct cpufreq_frequency_table *table, int count)
1da177e4 649{
b630eed8 650 unsigned int i, j, ret = 0;
1da177e4
LT
651 struct cpufreq_stats *stat;
652 struct cpufreq_policy *data;
653 unsigned int alloc_size;
3c2a0909
S
654 struct cpufreq_stats *prev_stat = per_cpu(prev_cpufreq_stats_table, cpu);
655
7a6aedfa 656 if (per_cpu(cpufreq_stats_table, cpu))
1da177e4 657 return -EBUSY;
0a829c5a
DJ
658 stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
659 if ((stat) == NULL)
1da177e4 660 return -ENOMEM;
1da177e4 661
3c2a0909
S
662 if (prev_stat)
663 memcpy(stat, prev_stat, sizeof(*prev_stat));
664
1da177e4 665 data = cpufreq_cpu_get(cpu);
bc7b26fd
DJ
666 if (data == NULL) {
667 ret = -EINVAL;
668 goto error_get_fail;
669 }
670
0a829c5a 671 ret = sysfs_create_group(&data->kobj, &stats_attr_group);
1da177e4
LT
672
673 stat->cpu = cpu;
7a6aedfa 674 per_cpu(cpufreq_stats_table, cpu) = stat;
1da177e4 675
1e7586a1 676 alloc_size = count * sizeof(int) + count * sizeof(u64);
1da177e4
LT
677
678#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
679 alloc_size += count * count * sizeof(int);
680#endif
681 stat->max_state = count;
e98df50c 682 stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
1da177e4
LT
683 if (!stat->time_in_state) {
684 ret = -ENOMEM;
685 goto error_out;
686 }
1da177e4
LT
687 stat->freq_table = (unsigned int *)(stat->time_in_state + count);
688
689#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
690 stat->trans_table = stat->freq_table + count;
691#endif
692 j = 0;
693 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
694 unsigned int freq = table[i].frequency;
695 if (freq == CPUFREQ_ENTRY_INVALID)
696 continue;
697 if (freq_table_get_index(stat, freq) == -1)
698 stat->freq_table[j++] = freq;
699 }
700 stat->state_num = j;
3c2a0909
S
701
702 if (prev_stat) {
703 memcpy(stat->time_in_state, prev_stat->time_in_state, alloc_size);
704 kfree(prev_stat->time_in_state);
705 kfree(prev_stat);
706 per_cpu(prev_cpufreq_stats_table, cpu) = NULL;
707 }
708
1da177e4 709 spin_lock(&cpufreq_stats_lock);
58f1df25 710 stat->last_time = get_jiffies_64();
12473bd6 711 atomic_set(&stat->cpu_freq_i, freq_table_get_index(stat, policy->cur));
1da177e4
LT
712 spin_unlock(&cpufreq_stats_lock);
713 cpufreq_cpu_put(data);
714 return 0;
715error_out:
716 cpufreq_cpu_put(data);
b7fb358c 717error_get_fail:
1da177e4 718 kfree(stat);
7a6aedfa 719 per_cpu(cpufreq_stats_table, cpu) = NULL;
1da177e4
LT
720 return ret;
721}
722
b8eed8af
VK
723static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
724{
630ff4b6 725 struct cpufreq_stats *old;
5298d61c 726 struct cpufreq_stats *stat;
630ff4b6 727 unsigned long flags;
b8eed8af 728
630ff4b6
JH
729 spin_lock_irqsave(&cpufreq_stats_table_lock, flags);
730 old = per_cpu(cpufreq_stats_table, policy->cpu);
731 stat = per_cpu(cpufreq_stats_table, policy->last_cpu);
732
733 if (old) {
734 kfree(old->time_in_state);
735 kfree(old);
5298d61c
S
736 }
737
630ff4b6
JH
738 pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
739 policy->cpu, policy->last_cpu);
b8eed8af
VK
740 per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
741 policy->last_cpu);
742 per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
743 stat->cpu = policy->cpu;
630ff4b6 744 spin_unlock_irqrestore(&cpufreq_stats_table_lock, flags);
b8eed8af
VK
745}
746
b630eed8
RK
747static void cpufreq_powerstats_create(unsigned int cpu,
748 struct cpufreq_frequency_table *table, int count) {
749 unsigned int alloc_size, i = 0, j = 0, ret = 0;
750 struct cpufreq_power_stats *powerstats;
751 struct device_node *cpu_node;
752 char device_path[16];
753
754 powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
755 GFP_KERNEL);
756 if (!powerstats)
757 return;
758
759 /* Allocate memory for freq table per cpu as well as clockticks per
760 * freq*/
761 alloc_size = count * sizeof(unsigned int) +
762 count * sizeof(unsigned int);
763 powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
764 if (!powerstats->curr) {
765 kfree(powerstats);
766 return;
767 }
768 powerstats->freq_table = powerstats->curr + count;
769
770 spin_lock(&cpufreq_stats_lock);
771 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END && j < count; i++) {
772 unsigned int freq = table[i].frequency;
773
774 if (freq == CPUFREQ_ENTRY_INVALID)
775 continue;
776 powerstats->freq_table[j++] = freq;
777 }
778 powerstats->state_num = j;
779
780 snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
781 cpu_node = of_find_node_by_path(device_path);
782 if (cpu_node) {
783 ret = of_property_read_u32_array(cpu_node, "current",
784 powerstats->curr, count);
785 if (ret) {
786 kfree(powerstats->curr);
787 kfree(powerstats);
788 powerstats = NULL;
789 }
790 }
791 per_cpu(cpufreq_power_stats, cpu) = powerstats;
792 spin_unlock(&cpufreq_stats_lock);
793}
794
3c2a0909
S
795static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
796{
797 unsigned int lhs = *(const unsigned int *)(lhs_ptr);
798 unsigned int rhs = *(const unsigned int *)(rhs_ptr);
799 if (lhs < rhs)
800 return -1;
801 if (lhs > rhs)
802 return 1;
803 return 0;
804}
805
806static bool check_all_freq_table(unsigned int freq)
807{
808 int i;
809 for (i = 0; i < all_freq_table->table_size; i++) {
810 if (freq == all_freq_table->freq_table[i])
811 return true;
812 }
813 return false;
814}
815
816static void create_all_freq_table(void)
817{
818 all_freq_table = kzalloc(sizeof(struct all_freq_table),
819 GFP_KERNEL);
820 if (!all_freq_table)
821 pr_warn("could not allocate memory for all_freq_table\n");
822 return;
823}
824
feff613e
NA
825static void free_all_freq_table(void)
826{
827 if (all_freq_table) {
828 if (all_freq_table->freq_table) {
829 kfree(all_freq_table->freq_table);
830 all_freq_table->freq_table = NULL;
831 }
832 kfree(all_freq_table);
833 all_freq_table = NULL;
834 }
835}
836
3c2a0909
S
837static void add_all_freq_table(unsigned int freq)
838{
839 unsigned int size;
12473bd6
AO
840 size = sizeof(all_freq_table->freq_table[0]) *
841 (all_freq_table->table_size + 1);
3c2a0909
S
842 all_freq_table->freq_table = krealloc(all_freq_table->freq_table,
843 size, GFP_ATOMIC);
844 if (IS_ERR(all_freq_table->freq_table)) {
845 pr_warn("Could not reallocate memory for freq_table\n");
846 all_freq_table->freq_table = NULL;
847 return;
848 }
849 all_freq_table->freq_table[all_freq_table->table_size++] = freq;
850}
851
b630eed8
RK
852static void cpufreq_allstats_create(unsigned int cpu,
853 struct cpufreq_frequency_table *table, int count)
3c2a0909
S
854{
855 int i , j = 0;
b630eed8 856 unsigned int alloc_size;
3c2a0909
S
857 struct all_cpufreq_stats *all_stat;
858 bool sort_needed = false;
859
3c2a0909
S
860 all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
861 GFP_KERNEL);
862 if (!all_stat) {
863 pr_warn("Cannot allocate memory for cpufreq stats\n");
864 return;
865 }
866
867 /*Allocate memory for freq table per cpu as well as clockticks per freq*/
868 alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
869 all_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
870 if (!all_stat->time_in_state) {
871 pr_warn("Cannot allocate memory for cpufreq time_in_state\n");
872 kfree(all_stat);
873 all_stat = NULL;
874 return;
875 }
876 all_stat->freq_table = (unsigned int *)
877 (all_stat->time_in_state + count);
878
879 spin_lock(&cpufreq_stats_lock);
880 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
881 unsigned int freq = table[i].frequency;
882 if (freq == CPUFREQ_ENTRY_INVALID)
883 continue;
884 all_stat->freq_table[j++] = freq;
885 if (all_freq_table && !check_all_freq_table(freq)) {
886 add_all_freq_table(freq);
887 sort_needed = true;
888 }
889 }
890 if (sort_needed)
891 sort(all_freq_table->freq_table, all_freq_table->table_size,
892 sizeof(unsigned int), &compare_for_sort, NULL);
893 all_stat->state_num = j;
894 per_cpu(all_cpufreq_stats, cpu) = all_stat;
895 spin_unlock(&cpufreq_stats_lock);
896}
897
ccd8b1cc
AO
898void cpufreq_task_stats_remove_uids(uid_t uid_start, uid_t uid_end)
899{
900 struct uid_entry *uid_entry;
901 struct hlist_node *tmp;
902
903 rt_mutex_lock(&uid_lock);
904
905 for (; uid_start <= uid_end; uid_start++) {
906 hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp,
907 hash, uid_start) {
908 if (uid_start == uid_entry->uid) {
909 hash_del(&uid_entry->hash);
910 kfree(uid_entry->dead_time_in_state);
911 kfree(uid_entry);
912 }
913 }
914 }
915
916 rt_mutex_unlock(&uid_lock);
917}
918
0a829c5a
DJ
919static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
920 unsigned long val, void *data)
1da177e4 921{
b630eed8 922 int ret, count = 0, i;
1da177e4
LT
923 struct cpufreq_policy *policy = data;
924 struct cpufreq_frequency_table *table;
925 unsigned int cpu = policy->cpu;
b8eed8af
VK
926
927 if (val == CPUFREQ_UPDATE_POLICY_CPU) {
928 cpufreq_stats_update_policy_cpu(policy);
929 return 0;
930 }
931
1da177e4
LT
932 if (val != CPUFREQ_NOTIFY)
933 return 0;
934 table = cpufreq_frequency_get_table(cpu);
935 if (!table)
936 return 0;
3c2a0909 937
b630eed8
RK
938 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
939 unsigned int freq = table[i].frequency;
940
941 if (freq == CPUFREQ_ENTRY_INVALID)
942 continue;
943 count++;
944 }
945
3c2a0909 946 if (!per_cpu(all_cpufreq_stats, cpu))
b630eed8
RK
947 cpufreq_allstats_create(cpu, table, count);
948
949 if (!per_cpu(cpufreq_power_stats, cpu))
950 cpufreq_powerstats_create(cpu, table, count);
3c2a0909 951
eb67732d 952 ret = cpufreq_stats_create_table(policy, cpu, table, count);
0a829c5a 953 if (ret)
1da177e4
LT
954 return ret;
955 return 0;
956}
957
0a829c5a
DJ
958static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
959 unsigned long val, void *data)
1da177e4 960{
12473bd6 961 int i;
1da177e4
LT
962 struct cpufreq_freqs *freq = data;
963 struct cpufreq_stats *stat;
12473bd6
AO
964 int cpu_freq_old_i, cpu_freq_new_i;
965 int all_freq_old_i, all_freq_new_i;
1da177e4
LT
966
967 if (val != CPUFREQ_POSTCHANGE)
968 return 0;
969
7a6aedfa 970 stat = per_cpu(cpufreq_stats_table, freq->cpu);
1da177e4
LT
971 if (!stat)
972 return 0;
8edc59d9 973
12473bd6
AO
974 cpu_freq_old_i = atomic_read(&stat->cpu_freq_i);
975 cpu_freq_new_i = freq_table_get_index(stat, freq->new);
976
977 all_freq_old_i = atomic_read(&stat->all_freq_i);
978 for (i = 0; i < all_freq_table->table_size; ++i) {
979 if (all_freq_table->freq_table[i] == freq->new)
980 break;
981 }
982 if (i != all_freq_table->table_size)
983 all_freq_new_i = i;
984 else
985 all_freq_new_i = -1;
1da177e4 986
46a310b8 987 /* We can't do stat->time_in_state[-1]= .. */
12473bd6
AO
988 if (cpu_freq_old_i == -1 || cpu_freq_new_i == -1)
989 return 0;
990
991 if (all_freq_old_i == -1 || all_freq_new_i == -1)
1da177e4
LT
992 return 0;
993
46a310b8
KRW
994 cpufreq_stats_update(freq->cpu);
995
12473bd6
AO
996 if (cpu_freq_old_i == cpu_freq_new_i)
997 return 0;
998
999 if (all_freq_old_i == all_freq_new_i)
8edc59d9
VP
1000 return 0;
1001
1da177e4 1002 spin_lock(&cpufreq_stats_lock);
12473bd6
AO
1003 atomic_set(&stat->cpu_freq_i, cpu_freq_new_i);
1004 atomic_set(&stat->all_freq_i, all_freq_new_i);
1da177e4 1005#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
12473bd6 1006 stat->trans_table[cpu_freq_old_i * stat->max_state + cpu_freq_new_i]++;
1da177e4
LT
1007#endif
1008 stat->total_trans++;
1009 spin_unlock(&cpufreq_stats_lock);
1010 return 0;
1011}
1012
12473bd6
AO
1013static int process_notifier(struct notifier_block *self,
1014 unsigned long cmd, void *v)
1015{
1016 struct task_struct *task = v;
1017 struct uid_entry *uid_entry;
6d52c949 1018 unsigned long flags;
12473bd6
AO
1019 uid_t uid;
1020 int i;
1021
1022 if (!task)
1023 return NOTIFY_OK;
1024
1025 rt_mutex_lock(&uid_lock);
1026
1027 uid = from_kuid_munged(current_user_ns(), task_uid(task));
1028 uid_entry = find_or_register_uid(uid);
1029 if (!uid_entry) {
1030 rt_mutex_unlock(&uid_lock);
1031 pr_err("%s: failed to find uid %d\n", __func__, uid);
1032 return NOTIFY_OK;
1033 }
1034
1035 if (uid_entry->dead_max_states < task->max_states) {
1036 uid_entry->dead_time_in_state = krealloc(
1037 uid_entry->dead_time_in_state,
1038 task->max_states *
1039 sizeof(uid_entry->dead_time_in_state[0]),
1040 GFP_ATOMIC);
1041 memset(uid_entry->dead_time_in_state +
1042 uid_entry->dead_max_states,
1043 0, (task->max_states - uid_entry->dead_max_states) *
1044 sizeof(uid_entry->dead_time_in_state[0]));
1045 uid_entry->dead_max_states = task->max_states;
1046 }
1047
6d52c949
AO
1048 spin_lock_irqsave(&task_time_in_state_lock, flags);
1049 if (task->time_in_state) {
1050 for (i = 0; i < task->max_states; ++i) {
1051 uid_entry->dead_time_in_state[i] +=
1052 atomic_read(&task->time_in_state[i]);
1053 }
12473bd6 1054 }
6d52c949 1055 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
12473bd6
AO
1056
1057 rt_mutex_unlock(&uid_lock);
1058 return NOTIFY_OK;
1059}
1060
1061static int uid_time_in_state_open(struct inode *inode, struct file *file)
1062{
1063 return single_open(file, uid_time_in_state_show, PDE_DATA(inode));
1064}
1065
1066static const struct file_operations uid_time_in_state_fops = {
1067 .open = uid_time_in_state_open,
1068 .read = seq_read,
1069 .llseek = seq_lseek,
1070 .release = single_release,
1071};
1072
3c2a0909
S
1073static int cpufreq_stats_create_table_cpu(unsigned int cpu)
1074{
1075 struct cpufreq_policy *policy;
1076 struct cpufreq_frequency_table *table;
b630eed8 1077 int ret = -ENODEV, i, count = 0;
3c2a0909
S
1078
1079 policy = cpufreq_cpu_get(cpu);
1080 if (!policy)
1081 return -ENODEV;
1082
1083 table = cpufreq_frequency_get_table(cpu);
1084 if (!table)
1085 goto out;
1086
b630eed8
RK
1087 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
1088 unsigned int freq = table[i].frequency;
1089
1090 if (freq == CPUFREQ_ENTRY_INVALID)
1091 continue;
1092 count++;
1093 }
1094
3c2a0909 1095 if (!per_cpu(all_cpufreq_stats, cpu))
b630eed8
RK
1096 cpufreq_allstats_create(cpu, table, count);
1097
1098 if (!per_cpu(cpufreq_power_stats, cpu))
1099 cpufreq_powerstats_create(cpu, table, count);
3c2a0909 1100
eb67732d 1101 ret = cpufreq_stats_create_table(policy, cpu, table, count);
3c2a0909
S
1102
1103out:
1104 cpufreq_cpu_put(policy);
1105 return ret;
1106}
1107
55395ae7
SS
1108static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
1109 unsigned long action,
1110 void *hcpu)
c32b6b8e
AR
1111{
1112 unsigned int cpu = (unsigned long)hcpu;
1113
1114 switch (action) {
1115 case CPU_ONLINE:
c0252748 1116 case CPU_ONLINE_FROZEN:
c32b6b8e
AR
1117 cpufreq_update_policy(cpu);
1118 break;
98586ed8 1119 case CPU_DOWN_PREPARE:
9d3ce4af 1120 case CPU_DOWN_PREPARE_FROZEN:
98586ed8 1121 cpufreq_stats_free_sysfs(cpu);
1122 break;
c32b6b8e 1123 case CPU_DEAD:
9d3ce4af 1124 case CPU_DEAD_FROZEN:
c32b6b8e
AR
1125 cpufreq_stats_free_table(cpu);
1126 break;
3c2a0909
S
1127 case CPU_DOWN_FAILED:
1128 case CPU_DOWN_FAILED_FROZEN:
1129 cpufreq_stats_create_table_cpu(cpu);
1130 break;
c32b6b8e
AR
1131 }
1132 return NOTIFY_OK;
1133}
1134
98586ed8 1135/* priority=1 so this will get called before cpufreq_remove_dev */
469057d5 1136static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
c32b6b8e 1137 .notifier_call = cpufreq_stat_cpu_callback,
98586ed8 1138 .priority = 1,
c32b6b8e
AR
1139};
1140
1da177e4
LT
1141static struct notifier_block notifier_policy_block = {
1142 .notifier_call = cpufreq_stat_notifier_policy
1143};
1144
1145static struct notifier_block notifier_trans_block = {
1146 .notifier_call = cpufreq_stat_notifier_trans
1147};
1148
12473bd6
AO
1149static struct notifier_block process_notifier_block = {
1150 .notifier_call = process_notifier,
1151};
1152
3c2a0909 1153static int cpufreq_stats_setup(void)
1da177e4
LT
1154{
1155 int ret;
1156 unsigned int cpu;
c32b6b8e 1157
1da177e4 1158 spin_lock_init(&cpufreq_stats_lock);
0a829c5a
DJ
1159 ret = cpufreq_register_notifier(&notifier_policy_block,
1160 CPUFREQ_POLICY_NOTIFIER);
1161 if (ret)
1da177e4
LT
1162 return ret;
1163
feff613e
NA
1164 create_all_freq_table();
1165
56836fb4
KK
1166 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
1167 for_each_online_cpu(cpu)
1168 cpufreq_update_policy(cpu);
1169
12473bd6
AO
1170 /* XXX TODO task support for time_in_state doesn't update freq
1171 * info for tasks already initialized, so tasks initialized early
1172 * (before cpufreq_stat_init is done) do not get time_in_state data
1173 * and CPUFREQ_TRANSITION_NOTIFIER does not update freq info for
1174 * tasks already created
1175 */
0a829c5a
DJ
1176 ret = cpufreq_register_notifier(&notifier_trans_block,
1177 CPUFREQ_TRANSITION_NOTIFIER);
1178 if (ret) {
1da177e4
LT
1179 cpufreq_unregister_notifier(&notifier_policy_block,
1180 CPUFREQ_POLICY_NOTIFIER);
56836fb4
KK
1181 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
1182 for_each_online_cpu(cpu)
1183 cpufreq_stats_free_table(cpu);
feff613e 1184 free_all_freq_table();
1da177e4
LT
1185 return ret;
1186 }
1187
3c2a0909
S
1188 ret = sysfs_create_file(cpufreq_global_kobject,
1189 &_attr_all_time_in_state.attr);
1190 if (ret)
b630eed8
RK
1191 pr_warn("Cannot create sysfs file for cpufreq stats\n");
1192
1193 ret = sysfs_create_file(cpufreq_global_kobject,
1194 &_attr_current_in_state.attr);
1195 if (ret)
1196 pr_warn("Cannot create sysfs file for cpufreq current stats\n");
3c2a0909 1197
12473bd6
AO
1198 proc_create_data("uid_time_in_state", 0444, NULL,
1199 &uid_time_in_state_fops, NULL);
1200
1201 profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
1202
1203 cpufreq_all_freq_init = true;
1204
1da177e4
LT
1205 return 0;
1206}
3c2a0909
S
1207
1208static void cpufreq_stats_cleanup(void)
1da177e4
LT
1209{
1210 unsigned int cpu;
c32b6b8e 1211
1da177e4
LT
1212 cpufreq_unregister_notifier(&notifier_policy_block,
1213 CPUFREQ_POLICY_NOTIFIER);
1214 cpufreq_unregister_notifier(&notifier_trans_block,
1215 CPUFREQ_TRANSITION_NOTIFIER);
65edc68c 1216 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
c32b6b8e 1217 for_each_online_cpu(cpu) {
55395ae7 1218 cpufreq_stats_free_table(cpu);
13f06753 1219 cpufreq_stats_free_sysfs(cpu);
c32b6b8e 1220 }
3c2a0909
S
1221 cpufreq_allstats_free();
1222}
1223
1224#ifdef CONFIG_BL_SWITCHER
1225static int cpufreq_stats_switcher_notifier(struct notifier_block *nfb,
1226 unsigned long action, void *_arg)
1227{
1228 switch (action) {
1229 case BL_NOTIFY_PRE_ENABLE:
1230 case BL_NOTIFY_PRE_DISABLE:
1231 cpufreq_stats_cleanup();
1232 break;
1233
1234 case BL_NOTIFY_POST_ENABLE:
1235 case BL_NOTIFY_POST_DISABLE:
1236 cpufreq_stats_setup();
1237 break;
1238
1239 default:
1240 return NOTIFY_DONE;
1241 }
1242
1243 return NOTIFY_OK;
1244}
1245
1246static struct notifier_block switcher_notifier = {
1247 .notifier_call = cpufreq_stats_switcher_notifier,
1248};
1249#endif
1250
1251static int __init cpufreq_stats_init(void)
1252{
1253 int ret;
1254 spin_lock_init(&cpufreq_stats_lock);
1255
1256 ret = cpufreq_stats_setup();
1257#ifdef CONFIG_BL_SWITCHER
1258 if (!ret)
1259 bL_switcher_register_notifier(&switcher_notifier);
1260#endif
1261 return ret;
1262}
1263
1264static void __exit cpufreq_stats_exit(void)
1265{
1266#ifdef CONFIG_BL_SWITCHER
1267 bL_switcher_unregister_notifier(&switcher_notifier);
1268#endif
1269 cpufreq_stats_cleanup();
b630eed8 1270 cpufreq_powerstats_free();
1da177e4
LT
1271}
1272
0a829c5a
DJ
1273MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
1274MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
e08f5f5b 1275 "through sysfs filesystem");
0a829c5a 1276MODULE_LICENSE("GPL");
1da177e4
LT
1277
1278module_init(cpufreq_stats_init);
1279module_exit(cpufreq_stats_exit);