import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpufreq / cpufreq.c
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <asm/cputime.h>
21 #include <linux/kernel.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/notifier.h>
26 #include <linux/cpufreq.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/spinlock.h>
30 #include <linux/tick.h>
31 #include <linux/device.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
34 #include <linux/completion.h>
35 #include <linux/mutex.h>
36 #include <linux/syscore_ops.h>
37
38 #include <trace/events/power.h>
39
40 /**
41 * The "cpufreq driver" - the arch- or hardware-dependent low
42 * level driver of CPUFreq support, and its spinlock. This lock
43 * also protects the cpufreq_cpu_data array.
44 */
45 static struct cpufreq_driver *cpufreq_driver;
46 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
47 #ifdef CONFIG_HOTPLUG_CPU
48 /* This one keeps track of the previously set governor of a removed CPU */
49 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
50 #endif
51 static DEFINE_RWLOCK(cpufreq_driver_lock);
52 static DEFINE_MUTEX(cpufreq_governor_lock);
53
54 /*
55 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
56 * all cpufreq/hotplug/workqueue/etc related lock issues.
57 *
58 * The rules for this semaphore:
59 * - Any routine that wants to read from the policy structure will
60 * do a down_read on this semaphore.
61 * - Any routine that will write to the policy structure and/or may take away
62 * the policy altogether (eg. CPU hotplug), will hold this lock in write
63 * mode before doing so.
64 *
65 * Additional rules:
66 * - Governor routines that can be called in cpufreq hotplug path should not
67 * take this sem as top level hotplug notifier handler takes this.
68 * - Lock should not be held across
69 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
70 */
71 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
72 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
73
74 #define lock_policy_rwsem(mode, cpu) \
75 static int lock_policy_rwsem_##mode(int cpu) \
76 { \
77 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
78 BUG_ON(policy_cpu == -1); \
79 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
80 \
81 return 0; \
82 }
83
84 lock_policy_rwsem(read, cpu);
85 lock_policy_rwsem(write, cpu);
86
87 #define unlock_policy_rwsem(mode, cpu) \
88 static void unlock_policy_rwsem_##mode(int cpu) \
89 { \
90 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
91 BUG_ON(policy_cpu == -1); \
92 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
93 }
94
95 unlock_policy_rwsem(read, cpu);
96 unlock_policy_rwsem(write, cpu);
97
98 /* internal prototypes */
99 static int __cpufreq_governor(struct cpufreq_policy *policy,
100 unsigned int event);
101 static unsigned int __cpufreq_get(unsigned int cpu);
102 static void handle_update(struct work_struct *work);
103
104 /**
105 * Two notifier lists: the "policy" list is involved in the
106 * validation process for a new CPU frequency policy; the
107 * "transition" list for kernel code that needs to handle
108 * changes to devices when the CPU clock speed changes.
109 * The mutex locks both lists.
110 */
111 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
112 static struct srcu_notifier_head cpufreq_transition_notifier_list;
113
114 static bool init_cpufreq_transition_notifier_list_called;
115 static int __init init_cpufreq_transition_notifier_list(void)
116 {
117 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
118 init_cpufreq_transition_notifier_list_called = true;
119 return 0;
120 }
121 pure_initcall(init_cpufreq_transition_notifier_list);
122
123 static int off __read_mostly;
124 static int cpufreq_disabled(void)
125 {
126 return off;
127 }
128 void disable_cpufreq(void)
129 {
130 off = 1;
131 }
132 static LIST_HEAD(cpufreq_governor_list);
133 static DEFINE_MUTEX(cpufreq_governor_mutex);
134
135 bool have_governor_per_policy(void)
136 {
137 return cpufreq_driver->have_governor_per_policy;
138 }
139 EXPORT_SYMBOL_GPL(have_governor_per_policy);
140
141 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
142 {
143 if (have_governor_per_policy())
144 return &policy->kobj;
145 else
146 return cpufreq_global_kobject;
147 }
148 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
149
150 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
151 {
152 u64 idle_time;
153 u64 cur_wall_time;
154 u64 busy_time;
155
156 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
157
158 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
159 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
160 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
164
165 idle_time = cur_wall_time - busy_time;
166 if (wall)
167 *wall = cputime_to_usecs(cur_wall_time);
168
169 return cputime_to_usecs(idle_time);
170 }
171
172 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
173 {
174 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
175
176 if (idle_time == -1ULL)
177 return get_cpu_idle_time_jiffy(cpu, wall);
178 else if (!io_busy)
179 idle_time += get_cpu_iowait_time_us(cpu, wall);
180
181 return idle_time;
182 }
183 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
184
185 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
186 {
187 struct cpufreq_policy *data;
188 unsigned long flags;
189
190 if (cpu >= nr_cpu_ids)
191 goto err_out;
192
193 /* get the cpufreq driver */
194 read_lock_irqsave(&cpufreq_driver_lock, flags);
195
196 if (!cpufreq_driver)
197 goto err_out_unlock;
198
199 if (!try_module_get(cpufreq_driver->owner))
200 goto err_out_unlock;
201
202
203 /* get the CPU */
204 data = per_cpu(cpufreq_cpu_data, cpu);
205
206 if (!data)
207 goto err_out_put_module;
208
209 if (!sysfs && !kobject_get(&data->kobj))
210 goto err_out_put_module;
211
212 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
213 return data;
214
215 err_out_put_module:
216 module_put(cpufreq_driver->owner);
217 err_out_unlock:
218 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
219 err_out:
220 return NULL;
221 }
222
223 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
224 {
225 if (cpufreq_disabled())
226 return NULL;
227
228 return __cpufreq_cpu_get(cpu, false);
229 }
230 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
231
232 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
233 {
234 return __cpufreq_cpu_get(cpu, true);
235 }
236
237 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
238 {
239 if (!sysfs)
240 kobject_put(&data->kobj);
241 module_put(cpufreq_driver->owner);
242 }
243
244 void cpufreq_cpu_put(struct cpufreq_policy *data)
245 {
246 if (cpufreq_disabled())
247 return;
248
249 __cpufreq_cpu_put(data, false);
250 }
251 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
252
253 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
254 {
255 __cpufreq_cpu_put(data, true);
256 }
257
258 /*********************************************************************
259 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
260 *********************************************************************/
261
262 /**
263 * adjust_jiffies - adjust the system "loops_per_jiffy"
264 *
265 * This function alters the system "loops_per_jiffy" for the clock
266 * speed change. Note that loops_per_jiffy cannot be updated on SMP
267 * systems as each CPU might be scaled differently. So, use the arch
268 * per-CPU loops_per_jiffy value wherever possible.
269 */
270 #ifndef CONFIG_SMP
271 static unsigned long l_p_j_ref;
272 static unsigned int l_p_j_ref_freq;
273
274 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
275 {
276 if (ci->flags & CPUFREQ_CONST_LOOPS)
277 return;
278
279 if (!l_p_j_ref_freq) {
280 l_p_j_ref = loops_per_jiffy;
281 l_p_j_ref_freq = ci->old;
282 pr_debug("saving %lu as reference value for loops_per_jiffy; "
283 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
284 }
285 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
286 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
287 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
288 ci->new);
289 pr_debug("scaling loops_per_jiffy to %lu "
290 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
291 }
292 }
293 #else
294 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
295 {
296 return;
297 }
298 #endif
299
300
301 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
302 struct cpufreq_freqs *freqs, unsigned int state)
303 {
304 BUG_ON(irqs_disabled());
305
306 if (cpufreq_disabled())
307 return;
308
309 freqs->flags = cpufreq_driver->flags;
310 pr_debug("notification %u of frequency transition to %u kHz\n",
311 state, freqs->new);
312
313 switch (state) {
314
315 case CPUFREQ_PRECHANGE:
316 /* detect if the driver reported a value as "old frequency"
317 * which is not equal to what the cpufreq core thinks is
318 * "old frequency".
319 */
320 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
321 if ((policy) && (policy->cpu == freqs->cpu) &&
322 (policy->cur) && (policy->cur != freqs->old)) {
323 pr_debug("Warning: CPU frequency is"
324 " %u, cpufreq assumed %u kHz.\n",
325 freqs->old, policy->cur);
326 freqs->old = policy->cur;
327 }
328 }
329 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
330 CPUFREQ_PRECHANGE, freqs);
331 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
332 break;
333
334 case CPUFREQ_POSTCHANGE:
335 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
336 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
337 (unsigned long)freqs->cpu);
338 trace_cpu_frequency(freqs->new, freqs->cpu);
339 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
340 CPUFREQ_POSTCHANGE, freqs);
341 if (likely(policy) && likely(policy->cpu == freqs->cpu))
342 policy->cur = freqs->new;
343 break;
344 }
345 }
346 /**
347 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
348 * on frequency transition.
349 *
350 * This function calls the transition notifiers and the "adjust_jiffies"
351 * function. It is called twice on all CPU frequency changes that have
352 * external effects.
353 */
354 void cpufreq_notify_transition(struct cpufreq_policy *policy,
355 struct cpufreq_freqs *freqs, unsigned int state)
356 {
357 for_each_cpu(freqs->cpu, policy->cpus)
358 __cpufreq_notify_transition(policy, freqs, state);
359 }
360 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
361
362
363
364 /*********************************************************************
365 * SYSFS INTERFACE *
366 *********************************************************************/
367
368 static struct cpufreq_governor *__find_governor(const char *str_governor)
369 {
370 struct cpufreq_governor *t;
371
372 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
373 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
374 return t;
375
376 return NULL;
377 }
378
379 /**
380 * cpufreq_parse_governor - parse a governor string
381 */
382 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
383 struct cpufreq_governor **governor)
384 {
385 int err = -EINVAL;
386
387 if (!cpufreq_driver)
388 goto out;
389
390 if (cpufreq_driver->setpolicy) {
391 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
392 *policy = CPUFREQ_POLICY_PERFORMANCE;
393 err = 0;
394 } else if (!strnicmp(str_governor, "powersave",
395 CPUFREQ_NAME_LEN)) {
396 *policy = CPUFREQ_POLICY_POWERSAVE;
397 err = 0;
398 }
399 } else if (cpufreq_driver->target) {
400 struct cpufreq_governor *t;
401
402 mutex_lock(&cpufreq_governor_mutex);
403
404 t = __find_governor(str_governor);
405
406 if (t == NULL) {
407 int ret;
408
409 mutex_unlock(&cpufreq_governor_mutex);
410 ret = request_module("cpufreq_%s", str_governor);
411 mutex_lock(&cpufreq_governor_mutex);
412
413 if (ret == 0)
414 t = __find_governor(str_governor);
415 }
416
417 if (t != NULL) {
418 *governor = t;
419 err = 0;
420 }
421
422 mutex_unlock(&cpufreq_governor_mutex);
423 }
424 out:
425 return err;
426 }
427
428
429 /**
430 * cpufreq_per_cpu_attr_read() / show_##file_name() -
431 * print out cpufreq information
432 *
433 * Write out information from cpufreq_driver->policy[cpu]; object must be
434 * "unsigned int".
435 */
436
437 #define show_one(file_name, object) \
438 static ssize_t show_##file_name \
439 (struct cpufreq_policy *policy, char *buf) \
440 { \
441 return sprintf(buf, "%u\n", policy->object); \
442 }
443
444 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
445 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
446 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
447 show_one(scaling_min_freq, min);
448 show_one(scaling_max_freq, max);
449 show_one(scaling_cur_freq, cur);
450
451 static int __cpufreq_set_policy(struct cpufreq_policy *data,
452 struct cpufreq_policy *policy);
453
454 /**
455 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
456 */
457 #define store_one(file_name, object) \
458 static ssize_t store_##file_name \
459 (struct cpufreq_policy *policy, const char *buf, size_t count) \
460 { \
461 unsigned int ret; \
462 struct cpufreq_policy new_policy; \
463 \
464 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
465 if (ret) \
466 return -EINVAL; \
467 \
468 ret = sscanf(buf, "%u", &new_policy.object); \
469 if (ret != 1) \
470 return -EINVAL; \
471 \
472 ret = __cpufreq_set_policy(policy, &new_policy); \
473 policy->user_policy.object = policy->object; \
474 \
475 return ret ? ret : count; \
476 }
477
478 store_one(scaling_min_freq, min);
479 store_one(scaling_max_freq, max);
480
481 /**
482 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
483 */
484 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
485 char *buf)
486 {
487 unsigned int cur_freq = __cpufreq_get(policy->cpu);
488 if (!cur_freq)
489 return sprintf(buf, "<unknown>");
490 return sprintf(buf, "%u\n", cur_freq);
491 }
492
493
494 /**
495 * show_scaling_governor - show the current policy for the specified CPU
496 */
497 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
498 {
499 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
500 return sprintf(buf, "powersave\n");
501 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
502 return sprintf(buf, "performance\n");
503 else if (policy->governor)
504 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
505 policy->governor->name);
506 return -EINVAL;
507 }
508
509
510 /**
511 * store_scaling_governor - store policy for the specified CPU
512 */
513 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
514 const char *buf, size_t count)
515 {
516 unsigned int ret;
517 char str_governor[16];
518 struct cpufreq_policy new_policy;
519
520 ret = cpufreq_get_policy(&new_policy, policy->cpu);
521 if (ret)
522 return ret;
523
524 ret = sscanf(buf, "%15s", str_governor);
525 if (ret != 1)
526 return -EINVAL;
527
528 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
529 &new_policy.governor))
530 return -EINVAL;
531
532 /* Do not use cpufreq_set_policy here or the user_policy.max
533 will be wrongly overridden */
534 ret = __cpufreq_set_policy(policy, &new_policy);
535
536 policy->user_policy.policy = policy->policy;
537 policy->user_policy.governor = policy->governor;
538
539 if (ret)
540 return ret;
541 else
542 return count;
543 }
544
545 /**
546 * show_scaling_driver - show the cpufreq driver currently loaded
547 */
548 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
549 {
550 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
551 }
552
553 /**
554 * show_scaling_available_governors - show the available CPUfreq governors
555 */
556 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
557 char *buf)
558 {
559 ssize_t i = 0;
560 struct cpufreq_governor *t;
561
562 if (!cpufreq_driver->target) {
563 i += sprintf(buf, "performance powersave");
564 goto out;
565 }
566
567 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
568 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
569 - (CPUFREQ_NAME_LEN + 2)))
570 goto out;
571 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
572 }
573 out:
574 i += sprintf(&buf[i], "\n");
575 return i;
576 }
577
578 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
579 {
580 ssize_t i = 0;
581 unsigned int cpu;
582
583 for_each_cpu(cpu, mask) {
584 if (i)
585 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
586 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
587 if (i >= (PAGE_SIZE - 5))
588 break;
589 }
590 i += sprintf(&buf[i], "\n");
591 return i;
592 }
593
594 /**
595 * show_related_cpus - show the CPUs affected by each transition even if
596 * hw coordination is in use
597 */
598 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
599 {
600 return show_cpus(policy->related_cpus, buf);
601 }
602
603 /**
604 * show_affected_cpus - show the CPUs affected by each transition
605 */
606 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
607 {
608 return show_cpus(policy->cpus, buf);
609 }
610
611 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
612 const char *buf, size_t count)
613 {
614 unsigned int freq = 0;
615 unsigned int ret;
616
617 if (!policy->governor || !policy->governor->store_setspeed)
618 return -EINVAL;
619
620 ret = sscanf(buf, "%u", &freq);
621 if (ret != 1)
622 return -EINVAL;
623
624 policy->governor->store_setspeed(policy, freq);
625
626 return count;
627 }
628
629 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
630 {
631 if (!policy->governor || !policy->governor->show_setspeed)
632 return sprintf(buf, "<unsupported>\n");
633
634 return policy->governor->show_setspeed(policy, buf);
635 }
636
637 /**
638 * show_bios_limit - show the current cpufreq HW/BIOS limitation
639 */
640 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
641 {
642 unsigned int limit;
643 int ret;
644 if (cpufreq_driver->bios_limit) {
645 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
646 if (!ret)
647 return sprintf(buf, "%u\n", limit);
648 }
649 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
650 }
651
652 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
653 cpufreq_freq_attr_ro(cpuinfo_min_freq);
654 cpufreq_freq_attr_ro(cpuinfo_max_freq);
655 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
656 cpufreq_freq_attr_ro(scaling_available_governors);
657 cpufreq_freq_attr_ro(scaling_driver);
658 cpufreq_freq_attr_ro(scaling_cur_freq);
659 cpufreq_freq_attr_ro(bios_limit);
660 cpufreq_freq_attr_ro(related_cpus);
661 cpufreq_freq_attr_ro(affected_cpus);
662 cpufreq_freq_attr_rw(scaling_min_freq);
663 cpufreq_freq_attr_rw(scaling_max_freq);
664 cpufreq_freq_attr_rw(scaling_governor);
665 cpufreq_freq_attr_rw(scaling_setspeed);
666
667 static struct attribute *default_attrs[] = {
668 &cpuinfo_min_freq.attr,
669 &cpuinfo_max_freq.attr,
670 &cpuinfo_transition_latency.attr,
671 &scaling_min_freq.attr,
672 &scaling_max_freq.attr,
673 &affected_cpus.attr,
674 &related_cpus.attr,
675 &scaling_governor.attr,
676 &scaling_driver.attr,
677 &scaling_available_governors.attr,
678 &scaling_setspeed.attr,
679 NULL
680 };
681
682 struct kobject *cpufreq_global_kobject;
683 EXPORT_SYMBOL(cpufreq_global_kobject);
684
685 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
686 #define to_attr(a) container_of(a, struct freq_attr, attr)
687
688 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
689 {
690 struct cpufreq_policy *policy = to_policy(kobj);
691 struct freq_attr *fattr = to_attr(attr);
692 ssize_t ret = -EINVAL;
693 policy = cpufreq_cpu_get_sysfs(policy->cpu);
694 if (!policy)
695 goto no_policy;
696
697 if (lock_policy_rwsem_read(policy->cpu) < 0)
698 goto fail;
699
700 if (fattr->show)
701 ret = fattr->show(policy, buf);
702 else
703 ret = -EIO;
704
705 unlock_policy_rwsem_read(policy->cpu);
706 fail:
707 cpufreq_cpu_put_sysfs(policy);
708 no_policy:
709 return ret;
710 }
711
712 static ssize_t store(struct kobject *kobj, struct attribute *attr,
713 const char *buf, size_t count)
714 {
715 struct cpufreq_policy *policy = to_policy(kobj);
716 struct freq_attr *fattr = to_attr(attr);
717 ssize_t ret = -EINVAL;
718 policy = cpufreq_cpu_get_sysfs(policy->cpu);
719 if (!policy)
720 goto no_policy;
721
722 if (lock_policy_rwsem_write(policy->cpu) < 0)
723 goto fail;
724
725 if (fattr->store)
726 ret = fattr->store(policy, buf, count);
727 else
728 ret = -EIO;
729
730 unlock_policy_rwsem_write(policy->cpu);
731 fail:
732 cpufreq_cpu_put_sysfs(policy);
733 no_policy:
734 return ret;
735 }
736
737 static void cpufreq_sysfs_release(struct kobject *kobj)
738 {
739 struct cpufreq_policy *policy = to_policy(kobj);
740 pr_debug("last reference is dropped\n");
741 complete(&policy->kobj_unregister);
742 }
743
744 static const struct sysfs_ops sysfs_ops = {
745 .show = show,
746 .store = store,
747 };
748
749 static struct kobj_type ktype_cpufreq = {
750 .sysfs_ops = &sysfs_ops,
751 .default_attrs = default_attrs,
752 .release = cpufreq_sysfs_release,
753 };
754
755 /* symlink affected CPUs */
756 static int cpufreq_add_dev_symlink(unsigned int cpu,
757 struct cpufreq_policy *policy)
758 {
759 unsigned int j;
760 int ret = 0;
761
762 for_each_cpu(j, policy->cpus) {
763 struct cpufreq_policy *managed_policy;
764 struct device *cpu_dev;
765
766 if (j == cpu)
767 continue;
768
769 pr_debug("CPU %u already managed, adding link\n", j);
770 managed_policy = cpufreq_cpu_get(cpu);
771 cpu_dev = get_cpu_device(j);
772 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
773 "cpufreq");
774 if (ret) {
775 cpufreq_cpu_put(managed_policy);
776 return ret;
777 }
778 }
779 return ret;
780 }
781
782 static int cpufreq_add_dev_interface(unsigned int cpu,
783 struct cpufreq_policy *policy,
784 struct device *dev)
785 {
786 struct cpufreq_policy new_policy;
787 struct freq_attr **drv_attr;
788 unsigned long flags;
789 int ret = 0;
790 unsigned int j;
791
792 /* prepare interface data */
793 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
794 &dev->kobj, "cpufreq");
795 if (ret)
796 return ret;
797
798 /* set up files for this cpu device */
799 drv_attr = cpufreq_driver->attr;
800 while ((drv_attr) && (*drv_attr)) {
801 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
802 if (ret)
803 goto err_out_kobj_put;
804 drv_attr++;
805 }
806 if (cpufreq_driver->get) {
807 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
808 if (ret)
809 goto err_out_kobj_put;
810 }
811 if (cpufreq_driver->target) {
812 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
813 if (ret)
814 goto err_out_kobj_put;
815 }
816 if (cpufreq_driver->bios_limit) {
817 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
818 if (ret)
819 goto err_out_kobj_put;
820 }
821
822 write_lock_irqsave(&cpufreq_driver_lock, flags);
823 for_each_cpu(j, policy->cpus) {
824 per_cpu(cpufreq_cpu_data, j) = policy;
825 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
826 }
827 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
828
829 ret = cpufreq_add_dev_symlink(cpu, policy);
830 if (ret)
831 goto err_out_kobj_put;
832
833 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
834 /* assure that the starting sequence is run in __cpufreq_set_policy */
835 policy->governor = NULL;
836
837 /* set default policy */
838 ret = __cpufreq_set_policy(policy, &new_policy);
839 policy->user_policy.policy = policy->policy;
840 policy->user_policy.governor = policy->governor;
841
842 if (ret) {
843 pr_debug("setting policy failed\n");
844 if (cpufreq_driver->exit)
845 cpufreq_driver->exit(policy);
846 }
847 return ret;
848
849 err_out_kobj_put:
850 kobject_put(&policy->kobj);
851 wait_for_completion(&policy->kobj_unregister);
852 return ret;
853 }
854
855 #ifdef CONFIG_HOTPLUG_CPU
856 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
857 struct device *dev)
858 {
859 struct cpufreq_policy *policy;
860 int ret = 0, has_target = !!cpufreq_driver->target;
861 unsigned long flags;
862
863 policy = cpufreq_cpu_get(sibling);
864 WARN_ON(!policy);
865
866 if (has_target)
867 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
868
869 lock_policy_rwsem_write(sibling);
870
871 write_lock_irqsave(&cpufreq_driver_lock, flags);
872
873 cpumask_set_cpu(cpu, policy->cpus);
874 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
875 per_cpu(cpufreq_cpu_data, cpu) = policy;
876 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
877
878 unlock_policy_rwsem_write(sibling);
879
880 if (has_target) {
881 __cpufreq_governor(policy, CPUFREQ_GOV_START);
882 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
883 }
884
885 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
886 if (ret) {
887 cpufreq_cpu_put(policy);
888 return ret;
889 }
890
891 return 0;
892 }
893 #endif
894
895 /**
896 * cpufreq_add_dev - add a CPU device
897 *
898 * Adds the cpufreq interface for a CPU device.
899 *
900 * The Oracle says: try running cpufreq registration/unregistration concurrently
901 * with with cpu hotplugging and all hell will break loose. Tried to clean this
902 * mess up, but more thorough testing is needed. - Mathieu
903 */
904 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
905 {
906 unsigned int j, cpu = dev->id;
907 int ret = -ENOMEM;
908 struct cpufreq_policy *policy;
909 unsigned long flags;
910 #ifdef CONFIG_HOTPLUG_CPU
911 struct cpufreq_governor *gov;
912 int sibling;
913 #endif
914
915 if (cpu_is_offline(cpu))
916 return 0;
917
918 pr_debug("adding CPU %u\n", cpu);
919
920 #ifdef CONFIG_SMP
921 /* check whether a different CPU already registered this
922 * CPU because it is in the same boat. */
923 policy = cpufreq_cpu_get(cpu);
924 if (unlikely(policy)) {
925 cpufreq_cpu_put(policy);
926 return 0;
927 }
928
929 #ifdef CONFIG_HOTPLUG_CPU
930 /* Check if this cpu was hot-unplugged earlier and has siblings */
931 read_lock_irqsave(&cpufreq_driver_lock, flags);
932 for_each_online_cpu(sibling) {
933 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
934 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
935 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
936 return cpufreq_add_policy_cpu(cpu, sibling, dev);
937 }
938 }
939 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
940 #endif
941 #endif
942
943 if (!try_module_get(cpufreq_driver->owner)) {
944 ret = -EINVAL;
945 goto module_out;
946 }
947
948 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
949 if (!policy)
950 goto nomem_out;
951
952 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
953 goto err_free_policy;
954
955 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
956 goto err_free_cpumask;
957
958 policy->cpu = cpu;
959 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
960 cpumask_copy(policy->cpus, cpumask_of(cpu));
961
962 /* Initially set CPU itself as the policy_cpu */
963 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
964
965 init_completion(&policy->kobj_unregister);
966 INIT_WORK(&policy->update, handle_update);
967
968 /* call driver. From then on the cpufreq must be able
969 * to accept all calls to ->verify and ->setpolicy for this CPU
970 */
971 ret = cpufreq_driver->init(policy);
972 if (ret) {
973 pr_debug("initialization failed\n");
974 goto err_set_policy_cpu;
975 }
976
977 /* related cpus should atleast have policy->cpus */
978 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
979
980 /*
981 * affected cpus must always be the one, which are online. We aren't
982 * managing offline cpus here.
983 */
984 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
985
986 policy->user_policy.min = policy->min;
987 policy->user_policy.max = policy->max;
988
989 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
990 CPUFREQ_START, policy);
991
992 #ifdef CONFIG_HOTPLUG_CPU
993 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
994 if (gov) {
995 policy->governor = gov;
996 pr_debug("Restoring governor %s for cpu %d\n",
997 policy->governor->name, cpu);
998 }
999 #endif
1000
1001 ret = cpufreq_add_dev_interface(cpu, policy, dev);
1002 if (ret)
1003 goto err_out_unregister;
1004
1005 kobject_uevent(&policy->kobj, KOBJ_ADD);
1006 module_put(cpufreq_driver->owner);
1007 pr_debug("initialization complete\n");
1008
1009 return 0;
1010
1011 err_out_unregister:
1012 write_lock_irqsave(&cpufreq_driver_lock, flags);
1013 for_each_cpu(j, policy->cpus)
1014 per_cpu(cpufreq_cpu_data, j) = NULL;
1015 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1016
1017 kobject_put(&policy->kobj);
1018 wait_for_completion(&policy->kobj_unregister);
1019
1020 err_set_policy_cpu:
1021 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1022 free_cpumask_var(policy->related_cpus);
1023 err_free_cpumask:
1024 free_cpumask_var(policy->cpus);
1025 err_free_policy:
1026 kfree(policy);
1027 nomem_out:
1028 module_put(cpufreq_driver->owner);
1029 module_out:
1030 return ret;
1031 }
1032
1033 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1034 {
1035 int j;
1036
1037 policy->last_cpu = policy->cpu;
1038 policy->cpu = cpu;
1039
1040 for_each_cpu(j, policy->cpus)
1041 per_cpu(cpufreq_policy_cpu, j) = cpu;
1042
1043 #ifdef CONFIG_CPU_FREQ_TABLE
1044 cpufreq_frequency_table_update_policy_cpu(policy);
1045 #endif
1046 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1047 CPUFREQ_UPDATE_POLICY_CPU, policy);
1048 }
1049
1050 /**
1051 * __cpufreq_remove_dev - remove a CPU device
1052 *
1053 * Removes the cpufreq interface for a CPU device.
1054 * Caller should already have policy_rwsem in write mode for this CPU.
1055 * This routine frees the rwsem before returning.
1056 */
1057 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1058 {
1059 unsigned int cpu = dev->id, ret, cpus;
1060 unsigned long flags;
1061 struct cpufreq_policy *data;
1062 struct kobject *kobj;
1063 struct completion *cmp;
1064 struct device *cpu_dev;
1065
1066 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1067
1068 write_lock_irqsave(&cpufreq_driver_lock, flags);
1069
1070 data = per_cpu(cpufreq_cpu_data, cpu);
1071 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1072
1073 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1074
1075 if (!data) {
1076 pr_debug("%s: No cpu_data found\n", __func__);
1077 return -EINVAL;
1078 }
1079
1080 if (cpufreq_driver->target)
1081 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1082
1083 #ifdef CONFIG_HOTPLUG_CPU
1084 if (!cpufreq_driver->setpolicy)
1085 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1086 data->governor->name, CPUFREQ_NAME_LEN);
1087 #endif
1088
1089 WARN_ON(lock_policy_rwsem_write(cpu));
1090 cpus = cpumask_weight(data->cpus);
1091
1092 if (cpus > 1)
1093 cpumask_clear_cpu(cpu, data->cpus);
1094 unlock_policy_rwsem_write(cpu);
1095
1096 if (cpu != data->cpu) {
1097 sysfs_remove_link(&dev->kobj, "cpufreq");
1098 } else if (cpus > 1) {
1099 /* first sibling now owns the new sysfs dir */
1100 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1101 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1102 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1103 if (ret) {
1104 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1105
1106 WARN_ON(lock_policy_rwsem_write(cpu));
1107 cpumask_set_cpu(cpu, data->cpus);
1108
1109 write_lock_irqsave(&cpufreq_driver_lock, flags);
1110 per_cpu(cpufreq_cpu_data, cpu) = data;
1111 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1112
1113 unlock_policy_rwsem_write(cpu);
1114
1115 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1116 "cpufreq");
1117 return -EINVAL;
1118 }
1119
1120 WARN_ON(lock_policy_rwsem_write(cpu));
1121 update_policy_cpu(data, cpu_dev->id);
1122 unlock_policy_rwsem_write(cpu);
1123 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1124 __func__, cpu_dev->id, cpu);
1125 }
1126
1127 /* If cpu is last user of policy, free policy */
1128 if (cpus == 1) {
1129 if (cpufreq_driver->target)
1130 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1131
1132 lock_policy_rwsem_read(cpu);
1133 kobj = &data->kobj;
1134 cmp = &data->kobj_unregister;
1135 unlock_policy_rwsem_read(cpu);
1136 kobject_put(kobj);
1137
1138 /* we need to make sure that the underlying kobj is actually
1139 * not referenced anymore by anybody before we proceed with
1140 * unloading.
1141 */
1142 pr_debug("waiting for dropping of refcount\n");
1143 wait_for_completion(cmp);
1144 pr_debug("wait complete\n");
1145
1146 if (cpufreq_driver->exit)
1147 cpufreq_driver->exit(data);
1148
1149 free_cpumask_var(data->related_cpus);
1150 free_cpumask_var(data->cpus);
1151 kfree(data);
1152 } else {
1153 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1154 cpufreq_cpu_put(data);
1155 if (cpufreq_driver->target) {
1156 __cpufreq_governor(data, CPUFREQ_GOV_START);
1157 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1158 }
1159 }
1160
1161 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1162 return 0;
1163 }
1164
1165
1166 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1167 {
1168 unsigned int cpu = dev->id;
1169 int retval;
1170
1171 if (cpu_is_offline(cpu))
1172 return 0;
1173
1174 retval = __cpufreq_remove_dev(dev, sif);
1175 return retval;
1176 }
1177
1178
1179 static void handle_update(struct work_struct *work)
1180 {
1181 struct cpufreq_policy *policy =
1182 container_of(work, struct cpufreq_policy, update);
1183 unsigned int cpu = policy->cpu;
1184 pr_debug("handle_update for cpu %u called\n", cpu);
1185 cpufreq_update_policy(cpu);
1186 }
1187
1188 /**
1189 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1190 * @cpu: cpu number
1191 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1192 * @new_freq: CPU frequency the CPU actually runs at
1193 *
1194 * We adjust to current frequency first, and need to clean up later.
1195 * So either call to cpufreq_update_policy() or schedule handle_update()).
1196 */
1197 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1198 unsigned int new_freq)
1199 {
1200 struct cpufreq_policy *policy;
1201 struct cpufreq_freqs freqs;
1202 unsigned long flags;
1203
1204
1205 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1206 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1207
1208 freqs.old = old_freq;
1209 freqs.new = new_freq;
1210
1211 read_lock_irqsave(&cpufreq_driver_lock, flags);
1212 policy = per_cpu(cpufreq_cpu_data, cpu);
1213 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1214
1215 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1216 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1217 }
1218
1219
1220 /**
1221 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1222 * @cpu: CPU number
1223 *
1224 * This is the last known freq, without actually getting it from the driver.
1225 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1226 */
1227 unsigned int cpufreq_quick_get(unsigned int cpu)
1228 {
1229 struct cpufreq_policy *policy;
1230 unsigned int ret_freq = 0;
1231
1232 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1233 return cpufreq_driver->get(cpu);
1234
1235 policy = cpufreq_cpu_get(cpu);
1236 if (policy) {
1237 ret_freq = policy->cur;
1238 cpufreq_cpu_put(policy);
1239 }
1240
1241 return ret_freq;
1242 }
1243 EXPORT_SYMBOL(cpufreq_quick_get);
1244
1245 /**
1246 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1247 * @cpu: CPU number
1248 *
1249 * Just return the max possible frequency for a given CPU.
1250 */
1251 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1252 {
1253 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1254 unsigned int ret_freq = 0;
1255
1256 if (policy) {
1257 ret_freq = policy->max;
1258 cpufreq_cpu_put(policy);
1259 }
1260
1261 return ret_freq;
1262 }
1263 EXPORT_SYMBOL(cpufreq_quick_get_max);
1264
1265
1266 static unsigned int __cpufreq_get(unsigned int cpu)
1267 {
1268 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1269 unsigned int ret_freq = 0;
1270
1271 if (!cpufreq_driver->get)
1272 return ret_freq;
1273
1274 ret_freq = cpufreq_driver->get(cpu);
1275
1276 if (ret_freq && policy->cur &&
1277 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1278 /* verify no discrepancy between actual and
1279 saved value exists */
1280 if (unlikely(ret_freq != policy->cur)) {
1281 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1282 schedule_work(&policy->update);
1283 }
1284 }
1285
1286 return ret_freq;
1287 }
1288
1289 /**
1290 * cpufreq_get - get the current CPU frequency (in kHz)
1291 * @cpu: CPU number
1292 *
1293 * Get the CPU current (static) CPU frequency
1294 */
1295 unsigned int cpufreq_get(unsigned int cpu)
1296 {
1297 unsigned int ret_freq = 0;
1298 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1299
1300 if (!policy)
1301 goto out;
1302
1303 if (unlikely(lock_policy_rwsem_read(cpu)))
1304 goto out_policy;
1305
1306 ret_freq = __cpufreq_get(cpu);
1307
1308 unlock_policy_rwsem_read(cpu);
1309
1310 out_policy:
1311 cpufreq_cpu_put(policy);
1312 out:
1313 return ret_freq;
1314 }
1315 EXPORT_SYMBOL(cpufreq_get);
1316
1317 static struct subsys_interface cpufreq_interface = {
1318 .name = "cpufreq",
1319 .subsys = &cpu_subsys,
1320 .add_dev = cpufreq_add_dev,
1321 .remove_dev = cpufreq_remove_dev,
1322 };
1323
1324
1325 /**
1326 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1327 *
1328 * This function is only executed for the boot processor. The other CPUs
1329 * have been put offline by means of CPU hotplug.
1330 */
1331 static int cpufreq_bp_suspend(void)
1332 {
1333 int ret = 0;
1334
1335 int cpu = smp_processor_id();
1336 struct cpufreq_policy *cpu_policy;
1337
1338 pr_debug("suspending cpu %u\n", cpu);
1339
1340 /* If there's no policy for the boot CPU, we have nothing to do. */
1341 cpu_policy = cpufreq_cpu_get(cpu);
1342 if (!cpu_policy)
1343 return 0;
1344
1345 if (cpufreq_driver->suspend) {
1346 ret = cpufreq_driver->suspend(cpu_policy);
1347 if (ret)
1348 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1349 "step on CPU %u\n", cpu_policy->cpu);
1350 }
1351
1352 cpufreq_cpu_put(cpu_policy);
1353 return ret;
1354 }
1355
1356 /**
1357 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1358 *
1359 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1360 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1361 * restored. It will verify that the current freq is in sync with
1362 * what we believe it to be. This is a bit later than when it
1363 * should be, but nonethteless it's better than calling
1364 * cpufreq_driver->get() here which might re-enable interrupts...
1365 *
1366 * This function is only executed for the boot CPU. The other CPUs have not
1367 * been turned on yet.
1368 */
1369 static void cpufreq_bp_resume(void)
1370 {
1371 int ret = 0;
1372
1373 int cpu = smp_processor_id();
1374 struct cpufreq_policy *cpu_policy;
1375
1376 pr_debug("resuming cpu %u\n", cpu);
1377
1378 /* If there's no policy for the boot CPU, we have nothing to do. */
1379 cpu_policy = cpufreq_cpu_get(cpu);
1380 if (!cpu_policy)
1381 return;
1382
1383 if (cpufreq_driver->resume) {
1384 ret = cpufreq_driver->resume(cpu_policy);
1385 if (ret) {
1386 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1387 "step on CPU %u\n", cpu_policy->cpu);
1388 goto fail;
1389 }
1390 }
1391
1392 schedule_work(&cpu_policy->update);
1393
1394 fail:
1395 cpufreq_cpu_put(cpu_policy);
1396 }
1397
1398 static struct syscore_ops cpufreq_syscore_ops = {
1399 .suspend = cpufreq_bp_suspend,
1400 .resume = cpufreq_bp_resume,
1401 };
1402
1403 /**
1404 * cpufreq_get_current_driver - return current driver's name
1405 *
1406 * Return the name string of the currently loaded cpufreq driver
1407 * or NULL, if none.
1408 */
1409 const char *cpufreq_get_current_driver(void)
1410 {
1411 if (cpufreq_driver)
1412 return cpufreq_driver->name;
1413
1414 return NULL;
1415 }
1416 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1417
1418 /*********************************************************************
1419 * NOTIFIER LISTS INTERFACE *
1420 *********************************************************************/
1421
1422 /**
1423 * cpufreq_register_notifier - register a driver with cpufreq
1424 * @nb: notifier function to register
1425 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1426 *
1427 * Add a driver to one of two lists: either a list of drivers that
1428 * are notified about clock rate changes (once before and once after
1429 * the transition), or a list of drivers that are notified about
1430 * changes in cpufreq policy.
1431 *
1432 * This function may sleep, and has the same return conditions as
1433 * blocking_notifier_chain_register.
1434 */
1435 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1436 {
1437 int ret;
1438
1439 if (cpufreq_disabled())
1440 return -EINVAL;
1441
1442 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1443
1444 switch (list) {
1445 case CPUFREQ_TRANSITION_NOTIFIER:
1446 ret = srcu_notifier_chain_register(
1447 &cpufreq_transition_notifier_list, nb);
1448 break;
1449 case CPUFREQ_POLICY_NOTIFIER:
1450 ret = blocking_notifier_chain_register(
1451 &cpufreq_policy_notifier_list, nb);
1452 break;
1453 default:
1454 ret = -EINVAL;
1455 }
1456
1457 return ret;
1458 }
1459 EXPORT_SYMBOL(cpufreq_register_notifier);
1460
1461
1462 /**
1463 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1464 * @nb: notifier block to be unregistered
1465 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1466 *
1467 * Remove a driver from the CPU frequency notifier list.
1468 *
1469 * This function may sleep, and has the same return conditions as
1470 * blocking_notifier_chain_unregister.
1471 */
1472 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1473 {
1474 int ret;
1475
1476 if (cpufreq_disabled())
1477 return -EINVAL;
1478
1479 switch (list) {
1480 case CPUFREQ_TRANSITION_NOTIFIER:
1481 ret = srcu_notifier_chain_unregister(
1482 &cpufreq_transition_notifier_list, nb);
1483 break;
1484 case CPUFREQ_POLICY_NOTIFIER:
1485 ret = blocking_notifier_chain_unregister(
1486 &cpufreq_policy_notifier_list, nb);
1487 break;
1488 default:
1489 ret = -EINVAL;
1490 }
1491
1492 return ret;
1493 }
1494 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1495
1496
1497 /*********************************************************************
1498 * GOVERNORS *
1499 *********************************************************************/
1500
1501
1502 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1503 unsigned int target_freq,
1504 unsigned int relation)
1505 {
1506 int retval = -EINVAL;
1507 unsigned int old_target_freq = target_freq;
1508
1509 if (cpufreq_disabled())
1510 return -ENODEV;
1511
1512 /* Make sure that target_freq is within supported range */
1513 if (target_freq > policy->max)
1514 target_freq = policy->max;
1515 if (target_freq < policy->min)
1516 target_freq = policy->min;
1517
1518 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1519 policy->cpu, target_freq, relation, old_target_freq);
1520
1521 if (target_freq == policy->cur)
1522 return 0;
1523
1524 if (cpufreq_driver->target)
1525 retval = cpufreq_driver->target(policy, target_freq, relation);
1526
1527 return retval;
1528 }
1529 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1530
1531 int cpufreq_driver_target(struct cpufreq_policy *policy,
1532 unsigned int target_freq,
1533 unsigned int relation)
1534 {
1535 int ret = -EINVAL;
1536
1537 policy = cpufreq_cpu_get(policy->cpu);
1538 if (!policy)
1539 goto no_policy;
1540
1541 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1542 goto fail;
1543
1544 ret = __cpufreq_driver_target(policy, target_freq, relation);
1545
1546 unlock_policy_rwsem_write(policy->cpu);
1547
1548 fail:
1549 cpufreq_cpu_put(policy);
1550 no_policy:
1551 return ret;
1552 }
1553 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1554
1555 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1556 {
1557 int ret = 0;
1558
1559 if (cpufreq_disabled())
1560 return ret;
1561
1562 if (!cpufreq_driver->getavg)
1563 return 0;
1564
1565 policy = cpufreq_cpu_get(policy->cpu);
1566 if (!policy)
1567 return -EINVAL;
1568
1569 ret = cpufreq_driver->getavg(policy, cpu);
1570
1571 cpufreq_cpu_put(policy);
1572 return ret;
1573 }
1574 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1575
1576 /*
1577 * when "event" is CPUFREQ_GOV_LIMITS
1578 */
1579
1580 static int __cpufreq_governor(struct cpufreq_policy *policy,
1581 unsigned int event)
1582 {
1583 int ret;
1584
1585 /* Only must be defined when default governor is known to have latency
1586 restrictions, like e.g. conservative or ondemand.
1587 That this is the case is already ensured in Kconfig
1588 */
1589 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1590 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1591 #else
1592 struct cpufreq_governor *gov = NULL;
1593 #endif
1594
1595 if (policy->governor->max_transition_latency &&
1596 policy->cpuinfo.transition_latency >
1597 policy->governor->max_transition_latency) {
1598 if (!gov)
1599 return -EINVAL;
1600 else {
1601 printk(KERN_WARNING "%s governor failed, too long"
1602 " transition latency of HW, fallback"
1603 " to %s governor\n",
1604 policy->governor->name,
1605 gov->name);
1606 policy->governor = gov;
1607 }
1608 }
1609
1610 if (!try_module_get(policy->governor->owner))
1611 return -EINVAL;
1612
1613 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1614 policy->cpu, event);
1615
1616 mutex_lock(&cpufreq_governor_lock);
1617 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1618 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1619 mutex_unlock(&cpufreq_governor_lock);
1620 return -EBUSY;
1621 }
1622
1623 if (event == CPUFREQ_GOV_STOP)
1624 policy->governor_enabled = false;
1625 else if (event == CPUFREQ_GOV_START)
1626 policy->governor_enabled = true;
1627
1628 mutex_unlock(&cpufreq_governor_lock);
1629
1630 ret = policy->governor->governor(policy, event);
1631
1632 if (!ret) {
1633 if (event == CPUFREQ_GOV_POLICY_INIT)
1634 policy->governor->initialized++;
1635 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1636 policy->governor->initialized--;
1637 } else {
1638 /* Restore original values */
1639 mutex_lock(&cpufreq_governor_lock);
1640 if (event == CPUFREQ_GOV_STOP)
1641 policy->governor_enabled = true;
1642 else if (event == CPUFREQ_GOV_START)
1643 policy->governor_enabled = false;
1644 mutex_unlock(&cpufreq_governor_lock);
1645 }
1646
1647 /* we keep one module reference alive for
1648 each CPU governed by this CPU */
1649 if ((event != CPUFREQ_GOV_START) || ret)
1650 module_put(policy->governor->owner);
1651 if ((event == CPUFREQ_GOV_STOP) && !ret)
1652 module_put(policy->governor->owner);
1653
1654 return ret;
1655 }
1656
1657
1658 int cpufreq_register_governor(struct cpufreq_governor *governor)
1659 {
1660 int err;
1661
1662 if (!governor)
1663 return -EINVAL;
1664
1665 if (cpufreq_disabled())
1666 return -ENODEV;
1667
1668 mutex_lock(&cpufreq_governor_mutex);
1669
1670 governor->initialized = 0;
1671 err = -EBUSY;
1672 if (__find_governor(governor->name) == NULL) {
1673 err = 0;
1674 list_add(&governor->governor_list, &cpufreq_governor_list);
1675 }
1676
1677 mutex_unlock(&cpufreq_governor_mutex);
1678 return err;
1679 }
1680 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1681
1682
1683 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1684 {
1685 #ifdef CONFIG_HOTPLUG_CPU
1686 int cpu;
1687 #endif
1688
1689 if (!governor)
1690 return;
1691
1692 if (cpufreq_disabled())
1693 return;
1694
1695 #ifdef CONFIG_HOTPLUG_CPU
1696 for_each_present_cpu(cpu) {
1697 if (cpu_online(cpu))
1698 continue;
1699 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1700 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1701 }
1702 #endif
1703
1704 mutex_lock(&cpufreq_governor_mutex);
1705 list_del(&governor->governor_list);
1706 mutex_unlock(&cpufreq_governor_mutex);
1707 return;
1708 }
1709 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1710
1711
1712
1713 /*********************************************************************
1714 * POLICY INTERFACE *
1715 *********************************************************************/
1716
1717 /**
1718 * cpufreq_get_policy - get the current cpufreq_policy
1719 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1720 * is written
1721 *
1722 * Reads the current cpufreq policy.
1723 */
1724 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1725 {
1726 struct cpufreq_policy *cpu_policy;
1727 if (!policy)
1728 return -EINVAL;
1729
1730 cpu_policy = cpufreq_cpu_get(cpu);
1731 if (!cpu_policy)
1732 return -EINVAL;
1733
1734 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1735
1736 cpufreq_cpu_put(cpu_policy);
1737 return 0;
1738 }
1739 EXPORT_SYMBOL(cpufreq_get_policy);
1740
1741
1742 /*
1743 * data : current policy.
1744 * policy : policy to be set.
1745 */
1746 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1747 struct cpufreq_policy *policy)
1748 {
1749 int ret = 0, failed = 1;
1750
1751 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1752 policy->min, policy->max);
1753
1754 memcpy(&policy->cpuinfo, &data->cpuinfo,
1755 sizeof(struct cpufreq_cpuinfo));
1756
1757 if (policy->min > data->max || policy->max < data->min) {
1758 ret = -EINVAL;
1759 goto error_out;
1760 }
1761
1762 /* verify the cpu speed can be set within this limit */
1763 ret = cpufreq_driver->verify(policy);
1764 if (ret)
1765 goto error_out;
1766
1767 /* adjust if necessary - all reasons */
1768 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1769 CPUFREQ_ADJUST, policy);
1770
1771 /* adjust if necessary - hardware incompatibility*/
1772 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1773 CPUFREQ_INCOMPATIBLE, policy);
1774
1775 /* verify the cpu speed can be set within this limit,
1776 which might be different to the first one */
1777 ret = cpufreq_driver->verify(policy);
1778 if (ret)
1779 goto error_out;
1780
1781 /* notification of the new policy */
1782 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1783 CPUFREQ_NOTIFY, policy);
1784
1785 data->min = policy->min;
1786 data->max = policy->max;
1787
1788 pr_debug("new min and max freqs are %u - %u kHz\n",
1789 data->min, data->max);
1790
1791 if (cpufreq_driver->setpolicy) {
1792 data->policy = policy->policy;
1793 pr_debug("setting range\n");
1794 ret = cpufreq_driver->setpolicy(policy);
1795 } else {
1796 if (policy->governor != data->governor) {
1797 /* save old, working values */
1798 struct cpufreq_governor *old_gov = data->governor;
1799
1800 pr_debug("governor switch\n");
1801
1802 /* end old governor */
1803 if (data->governor) {
1804 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1805 unlock_policy_rwsem_write(policy->cpu);
1806 __cpufreq_governor(data,
1807 CPUFREQ_GOV_POLICY_EXIT);
1808 lock_policy_rwsem_write(policy->cpu);
1809 }
1810
1811 /* start new governor */
1812 data->governor = policy->governor;
1813 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1814 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1815 failed = 0;
1816 } else {
1817 unlock_policy_rwsem_write(policy->cpu);
1818 __cpufreq_governor(data,
1819 CPUFREQ_GOV_POLICY_EXIT);
1820 lock_policy_rwsem_write(policy->cpu);
1821 }
1822 }
1823
1824 if (failed) {
1825 /* new governor failed, so re-start old one */
1826 pr_debug("starting governor %s failed\n",
1827 data->governor->name);
1828 if (old_gov) {
1829 data->governor = old_gov;
1830 __cpufreq_governor(data,
1831 CPUFREQ_GOV_POLICY_INIT);
1832 __cpufreq_governor(data,
1833 CPUFREQ_GOV_START);
1834 }
1835 ret = -EINVAL;
1836 goto error_out;
1837 }
1838 /* might be a policy change, too, so fall through */
1839 }
1840 pr_debug("governor: change or update limits\n");
1841 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1842 }
1843
1844 error_out:
1845 return ret;
1846 }
1847
1848 /**
1849 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1850 * @cpu: CPU which shall be re-evaluated
1851 *
1852 * Useful for policy notifiers which have different necessities
1853 * at different times.
1854 */
1855 int cpufreq_update_policy(unsigned int cpu)
1856 {
1857 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1858 struct cpufreq_policy policy;
1859 int ret;
1860
1861 if (!data) {
1862 ret = -ENODEV;
1863 goto no_policy;
1864 }
1865
1866 if (unlikely(lock_policy_rwsem_write(cpu))) {
1867 ret = -EINVAL;
1868 goto fail;
1869 }
1870
1871 pr_debug("updating policy for CPU %u\n", cpu);
1872 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1873 policy.min = data->user_policy.min;
1874 policy.max = data->user_policy.max;
1875 policy.policy = data->user_policy.policy;
1876 policy.governor = data->user_policy.governor;
1877
1878 /* BIOS might change freq behind our back
1879 -> ask driver for current freq and notify governors about a change */
1880 if (cpufreq_driver->get) {
1881 policy.cur = cpufreq_driver->get(cpu);
1882 if (!data->cur) {
1883 pr_debug("Driver did not initialize current freq");
1884 data->cur = policy.cur;
1885 } else {
1886 if (data->cur != policy.cur && cpufreq_driver->target)
1887 cpufreq_out_of_sync(cpu, data->cur,
1888 policy.cur);
1889 }
1890 }
1891
1892 ret = __cpufreq_set_policy(data, &policy);
1893
1894 unlock_policy_rwsem_write(cpu);
1895
1896 fail:
1897 cpufreq_cpu_put(data);
1898 no_policy:
1899 return ret;
1900 }
1901 EXPORT_SYMBOL(cpufreq_update_policy);
1902
1903 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1904 unsigned long action, void *hcpu)
1905 {
1906 unsigned int cpu = (unsigned long)hcpu;
1907 struct device *dev;
1908
1909 dev = get_cpu_device(cpu);
1910 if (dev) {
1911 switch (action) {
1912 case CPU_ONLINE:
1913 case CPU_ONLINE_FROZEN:
1914 cpufreq_add_dev(dev, NULL);
1915 break;
1916 case CPU_DOWN_PREPARE:
1917 case CPU_DOWN_PREPARE_FROZEN:
1918 __cpufreq_remove_dev(dev, NULL);
1919 break;
1920 case CPU_DOWN_FAILED:
1921 case CPU_DOWN_FAILED_FROZEN:
1922 cpufreq_add_dev(dev, NULL);
1923 break;
1924 }
1925 }
1926 return NOTIFY_OK;
1927 }
1928
1929 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1930 .notifier_call = cpufreq_cpu_callback,
1931 };
1932
1933 /*********************************************************************
1934 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1935 *********************************************************************/
1936
1937 /**
1938 * cpufreq_register_driver - register a CPU Frequency driver
1939 * @driver_data: A struct cpufreq_driver containing the values#
1940 * submitted by the CPU Frequency driver.
1941 *
1942 * Registers a CPU Frequency driver to this core code. This code
1943 * returns zero on success, -EBUSY when another driver got here first
1944 * (and isn't unregistered in the meantime).
1945 *
1946 */
1947 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1948 {
1949 unsigned long flags;
1950 int ret;
1951
1952 if (cpufreq_disabled())
1953 return -ENODEV;
1954
1955 if (!driver_data || !driver_data->verify || !driver_data->init ||
1956 ((!driver_data->setpolicy) && (!driver_data->target)))
1957 return -EINVAL;
1958
1959 pr_debug("trying to register driver %s\n", driver_data->name);
1960
1961 if (driver_data->setpolicy)
1962 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1963
1964 write_lock_irqsave(&cpufreq_driver_lock, flags);
1965 if (cpufreq_driver) {
1966 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1967 return -EBUSY;
1968 }
1969 cpufreq_driver = driver_data;
1970 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1971
1972 ret = subsys_interface_register(&cpufreq_interface);
1973 if (ret)
1974 goto err_null_driver;
1975
1976 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1977 int i;
1978 ret = -ENODEV;
1979
1980 /* check for at least one working CPU */
1981 for (i = 0; i < nr_cpu_ids; i++)
1982 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1983 ret = 0;
1984 break;
1985 }
1986
1987 /* if all ->init() calls failed, unregister */
1988 if (ret) {
1989 pr_debug("no CPU initialized for driver %s\n",
1990 driver_data->name);
1991 goto err_if_unreg;
1992 }
1993 }
1994
1995 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1996 pr_debug("driver %s up and running\n", driver_data->name);
1997
1998 return 0;
1999 err_if_unreg:
2000 subsys_interface_unregister(&cpufreq_interface);
2001 err_null_driver:
2002 write_lock_irqsave(&cpufreq_driver_lock, flags);
2003 cpufreq_driver = NULL;
2004 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2005 return ret;
2006 }
2007 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2008
2009
2010 /**
2011 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2012 *
2013 * Unregister the current CPUFreq driver. Only call this if you have
2014 * the right to do so, i.e. if you have succeeded in initialising before!
2015 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2016 * currently not initialised.
2017 */
2018 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2019 {
2020 unsigned long flags;
2021
2022 if (!cpufreq_driver || (driver != cpufreq_driver))
2023 return -EINVAL;
2024
2025 pr_debug("unregistering driver %s\n", driver->name);
2026
2027 subsys_interface_unregister(&cpufreq_interface);
2028 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2029
2030 write_lock_irqsave(&cpufreq_driver_lock, flags);
2031 cpufreq_driver = NULL;
2032 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2033
2034 return 0;
2035 }
2036 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2037
2038 static int __init cpufreq_core_init(void)
2039 {
2040 int cpu;
2041
2042 if (cpufreq_disabled())
2043 return -ENODEV;
2044
2045 for_each_possible_cpu(cpu) {
2046 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2047 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2048 }
2049
2050 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2051 BUG_ON(!cpufreq_global_kobject);
2052 register_syscore_ops(&cpufreq_syscore_ops);
2053
2054 return 0;
2055 }
2056 core_initcall(cpufreq_core_init);