Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpufreq / cpufreq.c
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/notifier.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/completion.h>
30 #include <linux/mutex.h>
31
32 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 "cpufreq-core", msg)
34
35 /**
36 * The "cpufreq driver" - the arch- or hardware-dependent low
37 * level driver of CPUFreq support, and its spinlock. This lock
38 * also protects the cpufreq_cpu_data array.
39 */
40 static struct cpufreq_driver *cpufreq_driver;
41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
42 #ifdef CONFIG_HOTPLUG_CPU
43 /* This one keeps track of the previously set governor of a removed CPU */
44 static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor);
45 #endif
46 static DEFINE_SPINLOCK(cpufreq_driver_lock);
47
48 /*
49 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50 * all cpufreq/hotplug/workqueue/etc related lock issues.
51 *
52 * The rules for this semaphore:
53 * - Any routine that wants to read from the policy structure will
54 * do a down_read on this semaphore.
55 * - Any routine that will write to the policy structure and/or may take away
56 * the policy altogether (eg. CPU hotplug), will hold this lock in write
57 * mode before doing so.
58 *
59 * Additional rules:
60 * - All holders of the lock should check to make sure that the CPU they
61 * are concerned with are online after they get the lock.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 */
65 static DEFINE_PER_CPU(int, policy_cpu);
66 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
67
68 #define lock_policy_rwsem(mode, cpu) \
69 int lock_policy_rwsem_##mode \
70 (int cpu) \
71 { \
72 int policy_cpu = per_cpu(policy_cpu, cpu); \
73 BUG_ON(policy_cpu == -1); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
75 if (unlikely(!cpu_online(cpu))) { \
76 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
77 return -1; \
78 } \
79 \
80 return 0; \
81 }
82
83 lock_policy_rwsem(read, cpu);
84 EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
85
86 lock_policy_rwsem(write, cpu);
87 EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
88
89 void unlock_policy_rwsem_read(int cpu)
90 {
91 int policy_cpu = per_cpu(policy_cpu, cpu);
92 BUG_ON(policy_cpu == -1);
93 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
94 }
95 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
96
97 void unlock_policy_rwsem_write(int cpu)
98 {
99 int policy_cpu = per_cpu(policy_cpu, cpu);
100 BUG_ON(policy_cpu == -1);
101 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
102 }
103 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
104
105
106 /* internal prototypes */
107 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
108 static unsigned int __cpufreq_get(unsigned int cpu);
109 static void handle_update(struct work_struct *work);
110
111 /**
112 * Two notifier lists: the "policy" list is involved in the
113 * validation process for a new CPU frequency policy; the
114 * "transition" list for kernel code that needs to handle
115 * changes to devices when the CPU clock speed changes.
116 * The mutex locks both lists.
117 */
118 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
119 static struct srcu_notifier_head cpufreq_transition_notifier_list;
120
121 static bool init_cpufreq_transition_notifier_list_called;
122 static int __init init_cpufreq_transition_notifier_list(void)
123 {
124 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
125 init_cpufreq_transition_notifier_list_called = true;
126 return 0;
127 }
128 pure_initcall(init_cpufreq_transition_notifier_list);
129
130 static LIST_HEAD(cpufreq_governor_list);
131 static DEFINE_MUTEX (cpufreq_governor_mutex);
132
133 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
134 {
135 struct cpufreq_policy *data;
136 unsigned long flags;
137
138 if (cpu >= nr_cpu_ids)
139 goto err_out;
140
141 /* get the cpufreq driver */
142 spin_lock_irqsave(&cpufreq_driver_lock, flags);
143
144 if (!cpufreq_driver)
145 goto err_out_unlock;
146
147 if (!try_module_get(cpufreq_driver->owner))
148 goto err_out_unlock;
149
150
151 /* get the CPU */
152 data = per_cpu(cpufreq_cpu_data, cpu);
153
154 if (!data)
155 goto err_out_put_module;
156
157 if (!kobject_get(&data->kobj))
158 goto err_out_put_module;
159
160 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
161 return data;
162
163 err_out_put_module:
164 module_put(cpufreq_driver->owner);
165 err_out_unlock:
166 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
167 err_out:
168 return NULL;
169 }
170 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
171
172
173 void cpufreq_cpu_put(struct cpufreq_policy *data)
174 {
175 kobject_put(&data->kobj);
176 module_put(cpufreq_driver->owner);
177 }
178 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
179
180
181 /*********************************************************************
182 * UNIFIED DEBUG HELPERS *
183 *********************************************************************/
184 #ifdef CONFIG_CPU_FREQ_DEBUG
185
186 /* what part(s) of the CPUfreq subsystem are debugged? */
187 static unsigned int debug;
188
189 /* is the debug output ratelimit'ed using printk_ratelimit? User can
190 * set or modify this value.
191 */
192 static unsigned int debug_ratelimit = 1;
193
194 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
195 * loading of a cpufreq driver, temporarily disabled when a new policy
196 * is set, and disabled upon cpufreq driver removal
197 */
198 static unsigned int disable_ratelimit = 1;
199 static DEFINE_SPINLOCK(disable_ratelimit_lock);
200
201 static void cpufreq_debug_enable_ratelimit(void)
202 {
203 unsigned long flags;
204
205 spin_lock_irqsave(&disable_ratelimit_lock, flags);
206 if (disable_ratelimit)
207 disable_ratelimit--;
208 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
209 }
210
211 static void cpufreq_debug_disable_ratelimit(void)
212 {
213 unsigned long flags;
214
215 spin_lock_irqsave(&disable_ratelimit_lock, flags);
216 disable_ratelimit++;
217 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
218 }
219
220 void cpufreq_debug_printk(unsigned int type, const char *prefix,
221 const char *fmt, ...)
222 {
223 char s[256];
224 va_list args;
225 unsigned int len;
226 unsigned long flags;
227
228 WARN_ON(!prefix);
229 if (type & debug) {
230 spin_lock_irqsave(&disable_ratelimit_lock, flags);
231 if (!disable_ratelimit && debug_ratelimit
232 && !printk_ratelimit()) {
233 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
234 return;
235 }
236 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
237
238 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
239
240 va_start(args, fmt);
241 len += vsnprintf(&s[len], (256 - len), fmt, args);
242 va_end(args);
243
244 printk(s);
245
246 WARN_ON(len < 5);
247 }
248 }
249 EXPORT_SYMBOL(cpufreq_debug_printk);
250
251
252 module_param(debug, uint, 0644);
253 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
254 " 2 to debug drivers, and 4 to debug governors.");
255
256 module_param(debug_ratelimit, uint, 0644);
257 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
258 " set to 0 to disable ratelimiting.");
259
260 #else /* !CONFIG_CPU_FREQ_DEBUG */
261
262 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
263 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
264
265 #endif /* CONFIG_CPU_FREQ_DEBUG */
266
267
268 /*********************************************************************
269 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
270 *********************************************************************/
271
272 /**
273 * adjust_jiffies - adjust the system "loops_per_jiffy"
274 *
275 * This function alters the system "loops_per_jiffy" for the clock
276 * speed change. Note that loops_per_jiffy cannot be updated on SMP
277 * systems as each CPU might be scaled differently. So, use the arch
278 * per-CPU loops_per_jiffy value wherever possible.
279 */
280 #ifndef CONFIG_SMP
281 static unsigned long l_p_j_ref;
282 static unsigned int l_p_j_ref_freq;
283
284 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
285 {
286 if (ci->flags & CPUFREQ_CONST_LOOPS)
287 return;
288
289 if (!l_p_j_ref_freq) {
290 l_p_j_ref = loops_per_jiffy;
291 l_p_j_ref_freq = ci->old;
292 dprintk("saving %lu as reference value for loops_per_jiffy; "
293 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
294 }
295 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
296 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
297 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
298 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
299 ci->new);
300 dprintk("scaling loops_per_jiffy to %lu "
301 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
302 }
303 }
304 #else
305 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
306 {
307 return;
308 }
309 #endif
310
311
312 /**
313 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
314 * on frequency transition.
315 *
316 * This function calls the transition notifiers and the "adjust_jiffies"
317 * function. It is called twice on all CPU frequency changes that have
318 * external effects.
319 */
320 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
321 {
322 struct cpufreq_policy *policy;
323
324 BUG_ON(irqs_disabled());
325
326 freqs->flags = cpufreq_driver->flags;
327 dprintk("notification %u of frequency transition to %u kHz\n",
328 state, freqs->new);
329
330 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
331 switch (state) {
332
333 case CPUFREQ_PRECHANGE:
334 /* detect if the driver reported a value as "old frequency"
335 * which is not equal to what the cpufreq core thinks is
336 * "old frequency".
337 */
338 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
339 if ((policy) && (policy->cpu == freqs->cpu) &&
340 (policy->cur) && (policy->cur != freqs->old)) {
341 dprintk("Warning: CPU frequency is"
342 " %u, cpufreq assumed %u kHz.\n",
343 freqs->old, policy->cur);
344 freqs->old = policy->cur;
345 }
346 }
347 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
348 CPUFREQ_PRECHANGE, freqs);
349 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
350 break;
351
352 case CPUFREQ_POSTCHANGE:
353 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
354 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
355 CPUFREQ_POSTCHANGE, freqs);
356 if (likely(policy) && likely(policy->cpu == freqs->cpu))
357 policy->cur = freqs->new;
358 break;
359 }
360 }
361 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
362
363
364
365 /*********************************************************************
366 * SYSFS INTERFACE *
367 *********************************************************************/
368
369 static struct cpufreq_governor *__find_governor(const char *str_governor)
370 {
371 struct cpufreq_governor *t;
372
373 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
374 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN))
375 return t;
376
377 return NULL;
378 }
379
380 /**
381 * cpufreq_parse_governor - parse a governor string
382 */
383 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
384 struct cpufreq_governor **governor)
385 {
386 int err = -EINVAL;
387
388 if (!cpufreq_driver)
389 goto out;
390
391 if (cpufreq_driver->setpolicy) {
392 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
393 *policy = CPUFREQ_POLICY_PERFORMANCE;
394 err = 0;
395 } else if (!strnicmp(str_governor, "powersave",
396 CPUFREQ_NAME_LEN)) {
397 *policy = CPUFREQ_POLICY_POWERSAVE;
398 err = 0;
399 }
400 } else if (cpufreq_driver->target) {
401 struct cpufreq_governor *t;
402
403 mutex_lock(&cpufreq_governor_mutex);
404
405 t = __find_governor(str_governor);
406
407 if (t == NULL) {
408 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
409 str_governor);
410
411 if (name) {
412 int ret;
413
414 mutex_unlock(&cpufreq_governor_mutex);
415 ret = request_module("%s", name);
416 mutex_lock(&cpufreq_governor_mutex);
417
418 if (ret == 0)
419 t = __find_governor(str_governor);
420 }
421
422 kfree(name);
423 }
424
425 if (t != NULL) {
426 *governor = t;
427 err = 0;
428 }
429
430 mutex_unlock(&cpufreq_governor_mutex);
431 }
432 out:
433 return err;
434 }
435
436
437 /* drivers/base/cpu.c */
438 extern struct sysdev_class cpu_sysdev_class;
439
440
441 /**
442 * cpufreq_per_cpu_attr_read() / show_##file_name() -
443 * print out cpufreq information
444 *
445 * Write out information from cpufreq_driver->policy[cpu]; object must be
446 * "unsigned int".
447 */
448
449 #define show_one(file_name, object) \
450 static ssize_t show_##file_name \
451 (struct cpufreq_policy *policy, char *buf) \
452 { \
453 return sprintf (buf, "%u\n", policy->object); \
454 }
455
456 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
457 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
458 show_one(scaling_min_freq, min);
459 show_one(scaling_max_freq, max);
460 show_one(scaling_cur_freq, cur);
461
462 static int __cpufreq_set_policy(struct cpufreq_policy *data,
463 struct cpufreq_policy *policy);
464
465 /**
466 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
467 */
468 #define store_one(file_name, object) \
469 static ssize_t store_##file_name \
470 (struct cpufreq_policy *policy, const char *buf, size_t count) \
471 { \
472 unsigned int ret = -EINVAL; \
473 struct cpufreq_policy new_policy; \
474 \
475 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
476 if (ret) \
477 return -EINVAL; \
478 \
479 ret = sscanf (buf, "%u", &new_policy.object); \
480 if (ret != 1) \
481 return -EINVAL; \
482 \
483 ret = __cpufreq_set_policy(policy, &new_policy); \
484 policy->user_policy.object = policy->object; \
485 \
486 return ret ? ret : count; \
487 }
488
489 store_one(scaling_min_freq,min);
490 store_one(scaling_max_freq,max);
491
492 /**
493 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
494 */
495 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
496 char *buf)
497 {
498 unsigned int cur_freq = __cpufreq_get(policy->cpu);
499 if (!cur_freq)
500 return sprintf(buf, "<unknown>");
501 return sprintf(buf, "%u\n", cur_freq);
502 }
503
504
505 /**
506 * show_scaling_governor - show the current policy for the specified CPU
507 */
508 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
509 {
510 if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
511 return sprintf(buf, "powersave\n");
512 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 return sprintf(buf, "performance\n");
514 else if (policy->governor)
515 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
516 return -EINVAL;
517 }
518
519
520 /**
521 * store_scaling_governor - store policy for the specified CPU
522 */
523 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
524 const char *buf, size_t count)
525 {
526 unsigned int ret = -EINVAL;
527 char str_governor[16];
528 struct cpufreq_policy new_policy;
529
530 ret = cpufreq_get_policy(&new_policy, policy->cpu);
531 if (ret)
532 return ret;
533
534 ret = sscanf (buf, "%15s", str_governor);
535 if (ret != 1)
536 return -EINVAL;
537
538 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
539 &new_policy.governor))
540 return -EINVAL;
541
542 /* Do not use cpufreq_set_policy here or the user_policy.max
543 will be wrongly overridden */
544 ret = __cpufreq_set_policy(policy, &new_policy);
545
546 policy->user_policy.policy = policy->policy;
547 policy->user_policy.governor = policy->governor;
548
549 if (ret)
550 return ret;
551 else
552 return count;
553 }
554
555 /**
556 * show_scaling_driver - show the cpufreq driver currently loaded
557 */
558 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
559 {
560 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
561 }
562
563 /**
564 * show_scaling_available_governors - show the available CPUfreq governors
565 */
566 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
567 char *buf)
568 {
569 ssize_t i = 0;
570 struct cpufreq_governor *t;
571
572 if (!cpufreq_driver->target) {
573 i += sprintf(buf, "performance powersave");
574 goto out;
575 }
576
577 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
578 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
579 goto out;
580 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
581 }
582 out:
583 i += sprintf(&buf[i], "\n");
584 return i;
585 }
586
587 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
588 {
589 ssize_t i = 0;
590 unsigned int cpu;
591
592 for_each_cpu(cpu, mask) {
593 if (i)
594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
596 if (i >= (PAGE_SIZE - 5))
597 break;
598 }
599 i += sprintf(&buf[i], "\n");
600 return i;
601 }
602
603 /**
604 * show_related_cpus - show the CPUs affected by each transition even if
605 * hw coordination is in use
606 */
607 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
608 {
609 if (cpumask_empty(policy->related_cpus))
610 return show_cpus(policy->cpus, buf);
611 return show_cpus(policy->related_cpus, buf);
612 }
613
614 /**
615 * show_affected_cpus - show the CPUs affected by each transition
616 */
617 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
618 {
619 return show_cpus(policy->cpus, buf);
620 }
621
622 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
623 const char *buf, size_t count)
624 {
625 unsigned int freq = 0;
626 unsigned int ret;
627
628 if (!policy->governor || !policy->governor->store_setspeed)
629 return -EINVAL;
630
631 ret = sscanf(buf, "%u", &freq);
632 if (ret != 1)
633 return -EINVAL;
634
635 policy->governor->store_setspeed(policy, freq);
636
637 return count;
638 }
639
640 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
641 {
642 if (!policy->governor || !policy->governor->show_setspeed)
643 return sprintf(buf, "<unsupported>\n");
644
645 return policy->governor->show_setspeed(policy, buf);
646 }
647
648 #define define_one_ro(_name) \
649 static struct freq_attr _name = \
650 __ATTR(_name, 0444, show_##_name, NULL)
651
652 #define define_one_ro0400(_name) \
653 static struct freq_attr _name = \
654 __ATTR(_name, 0400, show_##_name, NULL)
655
656 #define define_one_rw(_name) \
657 static struct freq_attr _name = \
658 __ATTR(_name, 0644, show_##_name, store_##_name)
659
660 define_one_ro0400(cpuinfo_cur_freq);
661 define_one_ro(cpuinfo_min_freq);
662 define_one_ro(cpuinfo_max_freq);
663 define_one_ro(scaling_available_governors);
664 define_one_ro(scaling_driver);
665 define_one_ro(scaling_cur_freq);
666 define_one_ro(related_cpus);
667 define_one_ro(affected_cpus);
668 define_one_rw(scaling_min_freq);
669 define_one_rw(scaling_max_freq);
670 define_one_rw(scaling_governor);
671 define_one_rw(scaling_setspeed);
672
673 static struct attribute *default_attrs[] = {
674 &cpuinfo_min_freq.attr,
675 &cpuinfo_max_freq.attr,
676 &scaling_min_freq.attr,
677 &scaling_max_freq.attr,
678 &affected_cpus.attr,
679 &related_cpus.attr,
680 &scaling_governor.attr,
681 &scaling_driver.attr,
682 &scaling_available_governors.attr,
683 &scaling_setspeed.attr,
684 NULL
685 };
686
687 #define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
688 #define to_attr(a) container_of(a,struct freq_attr,attr)
689
690 static ssize_t show(struct kobject *kobj, struct attribute *attr ,char *buf)
691 {
692 struct cpufreq_policy *policy = to_policy(kobj);
693 struct freq_attr *fattr = to_attr(attr);
694 ssize_t ret = -EINVAL;
695 policy = cpufreq_cpu_get(policy->cpu);
696 if (!policy)
697 goto no_policy;
698
699 if (lock_policy_rwsem_read(policy->cpu) < 0)
700 goto fail;
701
702 if (fattr->show)
703 ret = fattr->show(policy, buf);
704 else
705 ret = -EIO;
706
707 unlock_policy_rwsem_read(policy->cpu);
708 fail:
709 cpufreq_cpu_put(policy);
710 no_policy:
711 return ret;
712 }
713
714 static ssize_t store(struct kobject *kobj, struct attribute *attr,
715 const char *buf, size_t count)
716 {
717 struct cpufreq_policy *policy = to_policy(kobj);
718 struct freq_attr *fattr = to_attr(attr);
719 ssize_t ret = -EINVAL;
720 policy = cpufreq_cpu_get(policy->cpu);
721 if (!policy)
722 goto no_policy;
723
724 if (lock_policy_rwsem_write(policy->cpu) < 0)
725 goto fail;
726
727 if (fattr->store)
728 ret = fattr->store(policy, buf, count);
729 else
730 ret = -EIO;
731
732 unlock_policy_rwsem_write(policy->cpu);
733 fail:
734 cpufreq_cpu_put(policy);
735 no_policy:
736 return ret;
737 }
738
739 static void cpufreq_sysfs_release(struct kobject *kobj)
740 {
741 struct cpufreq_policy *policy = to_policy(kobj);
742 dprintk("last reference is dropped\n");
743 complete(&policy->kobj_unregister);
744 }
745
746 static struct sysfs_ops sysfs_ops = {
747 .show = show,
748 .store = store,
749 };
750
751 static struct kobj_type ktype_cpufreq = {
752 .sysfs_ops = &sysfs_ops,
753 .default_attrs = default_attrs,
754 .release = cpufreq_sysfs_release,
755 };
756
757 static struct kobj_type ktype_empty_cpufreq = {
758 .sysfs_ops = &sysfs_ops,
759 .release = cpufreq_sysfs_release,
760 };
761
762
763 /**
764 * cpufreq_add_dev - add a CPU device
765 *
766 * Adds the cpufreq interface for a CPU device.
767 */
768 static int cpufreq_add_dev(struct sys_device *sys_dev)
769 {
770 unsigned int cpu = sys_dev->id;
771 int ret = 0;
772 struct cpufreq_policy new_policy;
773 struct cpufreq_policy *policy;
774 struct freq_attr **drv_attr;
775 struct sys_device *cpu_sys_dev;
776 unsigned long flags;
777 unsigned int j;
778 #ifdef CONFIG_SMP
779 struct cpufreq_policy *managed_policy;
780 #endif
781
782 if (cpu_is_offline(cpu))
783 return 0;
784
785 cpufreq_debug_disable_ratelimit();
786 dprintk("adding CPU %u\n", cpu);
787
788 #ifdef CONFIG_SMP
789 /* check whether a different CPU already registered this
790 * CPU because it is in the same boat. */
791 policy = cpufreq_cpu_get(cpu);
792 if (unlikely(policy)) {
793 cpufreq_cpu_put(policy);
794 cpufreq_debug_enable_ratelimit();
795 return 0;
796 }
797 #endif
798
799 if (!try_module_get(cpufreq_driver->owner)) {
800 ret = -EINVAL;
801 goto module_out;
802 }
803
804 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
805 if (!policy) {
806 ret = -ENOMEM;
807 goto nomem_out;
808 }
809 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
810 kfree(policy);
811 ret = -ENOMEM;
812 goto nomem_out;
813 }
814 if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
815 free_cpumask_var(policy->cpus);
816 kfree(policy);
817 ret = -ENOMEM;
818 goto nomem_out;
819 }
820
821 policy->cpu = cpu;
822 cpumask_copy(policy->cpus, cpumask_of(cpu));
823
824 /* Initially set CPU itself as the policy_cpu */
825 per_cpu(policy_cpu, cpu) = cpu;
826 lock_policy_rwsem_write(cpu);
827
828 init_completion(&policy->kobj_unregister);
829 INIT_WORK(&policy->update, handle_update);
830
831 /* Set governor before ->init, so that driver could check it */
832 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
833 /* call driver. From then on the cpufreq must be able
834 * to accept all calls to ->verify and ->setpolicy for this CPU
835 */
836 ret = cpufreq_driver->init(policy);
837 if (ret) {
838 dprintk("initialization failed\n");
839 goto err_out;
840 }
841 policy->user_policy.min = policy->min;
842 policy->user_policy.max = policy->max;
843
844 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
845 CPUFREQ_START, policy);
846
847 #ifdef CONFIG_SMP
848
849 #ifdef CONFIG_HOTPLUG_CPU
850 if (per_cpu(cpufreq_cpu_governor, cpu)) {
851 policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
852 dprintk("Restoring governor %s for cpu %d\n",
853 policy->governor->name, cpu);
854 }
855 #endif
856
857 for_each_cpu(j, policy->cpus) {
858 if (cpu == j)
859 continue;
860
861 /* check for existing affected CPUs. They may not be aware
862 * of it due to CPU Hotplug.
863 */
864 managed_policy = cpufreq_cpu_get(j); // FIXME: Where is this released? What about error paths?
865 if (unlikely(managed_policy)) {
866
867 /* Set proper policy_cpu */
868 unlock_policy_rwsem_write(cpu);
869 per_cpu(policy_cpu, cpu) = managed_policy->cpu;
870
871 if (lock_policy_rwsem_write(cpu) < 0)
872 goto err_out_driver_exit;
873
874 spin_lock_irqsave(&cpufreq_driver_lock, flags);
875 cpumask_copy(managed_policy->cpus, policy->cpus);
876 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
877 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
878
879 dprintk("CPU already managed, adding link\n");
880 ret = sysfs_create_link(&sys_dev->kobj,
881 &managed_policy->kobj,
882 "cpufreq");
883 if (ret)
884 goto err_out_driver_exit;
885
886 cpufreq_debug_enable_ratelimit();
887 ret = 0;
888 goto err_out_driver_exit; /* call driver->exit() */
889 }
890 }
891 #endif
892 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
893
894 /* prepare interface data */
895 if (!cpufreq_driver->hide_interface) {
896 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
897 &sys_dev->kobj, "cpufreq");
898 if (ret)
899 goto err_out_driver_exit;
900
901 /* set up files for this cpu device */
902 drv_attr = cpufreq_driver->attr;
903 while ((drv_attr) && (*drv_attr)) {
904 ret = sysfs_create_file(&policy->kobj,
905 &((*drv_attr)->attr));
906 if (ret)
907 goto err_out_driver_exit;
908 drv_attr++;
909 }
910 if (cpufreq_driver->get) {
911 ret = sysfs_create_file(&policy->kobj,
912 &cpuinfo_cur_freq.attr);
913 if (ret)
914 goto err_out_driver_exit;
915 }
916 if (cpufreq_driver->target) {
917 ret = sysfs_create_file(&policy->kobj,
918 &scaling_cur_freq.attr);
919 if (ret)
920 goto err_out_driver_exit;
921 }
922 } else {
923 ret = kobject_init_and_add(&policy->kobj, &ktype_empty_cpufreq,
924 &sys_dev->kobj, "cpufreq");
925 if (ret)
926 goto err_out_driver_exit;
927 }
928
929 spin_lock_irqsave(&cpufreq_driver_lock, flags);
930 for_each_cpu(j, policy->cpus) {
931 per_cpu(cpufreq_cpu_data, j) = policy;
932 per_cpu(policy_cpu, j) = policy->cpu;
933 }
934 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
935
936 /* symlink affected CPUs */
937 for_each_cpu(j, policy->cpus) {
938 if (j == cpu)
939 continue;
940 if (!cpu_online(j))
941 continue;
942
943 dprintk("CPU %u already managed, adding link\n", j);
944 cpufreq_cpu_get(cpu);
945 cpu_sys_dev = get_cpu_sysdev(j);
946 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
947 "cpufreq");
948 if (ret)
949 goto err_out_unregister;
950 }
951
952 policy->governor = NULL; /* to assure that the starting sequence is
953 * run in cpufreq_set_policy */
954
955 /* set default policy */
956 ret = __cpufreq_set_policy(policy, &new_policy);
957 policy->user_policy.policy = policy->policy;
958 policy->user_policy.governor = policy->governor;
959
960 if (ret) {
961 dprintk("setting policy failed\n");
962 goto err_out_unregister;
963 }
964
965 unlock_policy_rwsem_write(cpu);
966
967 kobject_uevent(&policy->kobj, KOBJ_ADD);
968 module_put(cpufreq_driver->owner);
969 dprintk("initialization complete\n");
970 cpufreq_debug_enable_ratelimit();
971
972 return 0;
973
974
975 err_out_unregister:
976 spin_lock_irqsave(&cpufreq_driver_lock, flags);
977 for_each_cpu(j, policy->cpus)
978 per_cpu(cpufreq_cpu_data, j) = NULL;
979 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
980
981 kobject_put(&policy->kobj);
982 wait_for_completion(&policy->kobj_unregister);
983
984 err_out_driver_exit:
985 if (cpufreq_driver->exit)
986 cpufreq_driver->exit(policy);
987
988 err_out:
989 unlock_policy_rwsem_write(cpu);
990 kfree(policy);
991
992 nomem_out:
993 module_put(cpufreq_driver->owner);
994 module_out:
995 cpufreq_debug_enable_ratelimit();
996 return ret;
997 }
998
999
1000 /**
1001 * __cpufreq_remove_dev - remove a CPU device
1002 *
1003 * Removes the cpufreq interface for a CPU device.
1004 * Caller should already have policy_rwsem in write mode for this CPU.
1005 * This routine frees the rwsem before returning.
1006 */
1007 static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1008 {
1009 unsigned int cpu = sys_dev->id;
1010 unsigned long flags;
1011 struct cpufreq_policy *data;
1012 #ifdef CONFIG_SMP
1013 struct sys_device *cpu_sys_dev;
1014 unsigned int j;
1015 #endif
1016
1017 cpufreq_debug_disable_ratelimit();
1018 dprintk("unregistering CPU %u\n", cpu);
1019
1020 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1021 data = per_cpu(cpufreq_cpu_data, cpu);
1022
1023 if (!data) {
1024 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1025 cpufreq_debug_enable_ratelimit();
1026 unlock_policy_rwsem_write(cpu);
1027 return -EINVAL;
1028 }
1029 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1030
1031
1032 #ifdef CONFIG_SMP
1033 /* if this isn't the CPU which is the parent of the kobj, we
1034 * only need to unlink, put and exit
1035 */
1036 if (unlikely(cpu != data->cpu)) {
1037 dprintk("removing link\n");
1038 cpumask_clear_cpu(cpu, data->cpus);
1039 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1040 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1041 cpufreq_cpu_put(data);
1042 cpufreq_debug_enable_ratelimit();
1043 unlock_policy_rwsem_write(cpu);
1044 return 0;
1045 }
1046 #endif
1047
1048 #ifdef CONFIG_SMP
1049
1050 #ifdef CONFIG_HOTPLUG_CPU
1051 per_cpu(cpufreq_cpu_governor, cpu) = data->governor;
1052 #endif
1053
1054 /* if we have other CPUs still registered, we need to unlink them,
1055 * or else wait_for_completion below will lock up. Clean the
1056 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1057 * the sysfs links afterwards.
1058 */
1059 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1060 for_each_cpu(j, data->cpus) {
1061 if (j == cpu)
1062 continue;
1063 per_cpu(cpufreq_cpu_data, j) = NULL;
1064 }
1065 }
1066
1067 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1068
1069 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1070 for_each_cpu(j, data->cpus) {
1071 if (j == cpu)
1072 continue;
1073 dprintk("removing link for cpu %u\n", j);
1074 #ifdef CONFIG_HOTPLUG_CPU
1075 per_cpu(cpufreq_cpu_governor, j) = data->governor;
1076 #endif
1077 cpu_sys_dev = get_cpu_sysdev(j);
1078 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
1079 cpufreq_cpu_put(data);
1080 }
1081 }
1082 #else
1083 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1084 #endif
1085
1086 if (cpufreq_driver->target)
1087 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1088
1089 unlock_policy_rwsem_write(cpu);
1090
1091 kobject_put(&data->kobj);
1092
1093 /* we need to make sure that the underlying kobj is actually
1094 * not referenced anymore by anybody before we proceed with
1095 * unloading.
1096 */
1097 dprintk("waiting for dropping of refcount\n");
1098 wait_for_completion(&data->kobj_unregister);
1099 dprintk("wait complete\n");
1100
1101 if (cpufreq_driver->exit)
1102 cpufreq_driver->exit(data);
1103
1104 free_cpumask_var(data->related_cpus);
1105 free_cpumask_var(data->cpus);
1106 kfree(data);
1107 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1108
1109 cpufreq_debug_enable_ratelimit();
1110 return 0;
1111 }
1112
1113
1114 static int cpufreq_remove_dev(struct sys_device *sys_dev)
1115 {
1116 unsigned int cpu = sys_dev->id;
1117 int retval;
1118
1119 if (cpu_is_offline(cpu))
1120 return 0;
1121
1122 if (unlikely(lock_policy_rwsem_write(cpu)))
1123 BUG();
1124
1125 retval = __cpufreq_remove_dev(sys_dev);
1126 return retval;
1127 }
1128
1129
1130 static void handle_update(struct work_struct *work)
1131 {
1132 struct cpufreq_policy *policy =
1133 container_of(work, struct cpufreq_policy, update);
1134 unsigned int cpu = policy->cpu;
1135 dprintk("handle_update for cpu %u called\n", cpu);
1136 cpufreq_update_policy(cpu);
1137 }
1138
1139 /**
1140 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1141 * @cpu: cpu number
1142 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1143 * @new_freq: CPU frequency the CPU actually runs at
1144 *
1145 * We adjust to current frequency first, and need to clean up later. So either call
1146 * to cpufreq_update_policy() or schedule handle_update()).
1147 */
1148 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1149 unsigned int new_freq)
1150 {
1151 struct cpufreq_freqs freqs;
1152
1153 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1154 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1155
1156 freqs.cpu = cpu;
1157 freqs.old = old_freq;
1158 freqs.new = new_freq;
1159 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1160 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1161 }
1162
1163
1164 /**
1165 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1166 * @cpu: CPU number
1167 *
1168 * This is the last known freq, without actually getting it from the driver.
1169 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1170 */
1171 unsigned int cpufreq_quick_get(unsigned int cpu)
1172 {
1173 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1174 unsigned int ret_freq = 0;
1175
1176 if (policy) {
1177 ret_freq = policy->cur;
1178 cpufreq_cpu_put(policy);
1179 }
1180
1181 return ret_freq;
1182 }
1183 EXPORT_SYMBOL(cpufreq_quick_get);
1184
1185
1186 static unsigned int __cpufreq_get(unsigned int cpu)
1187 {
1188 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1189 unsigned int ret_freq = 0;
1190
1191 if (!cpufreq_driver->get)
1192 return ret_freq;
1193
1194 ret_freq = cpufreq_driver->get(cpu);
1195
1196 if (ret_freq && policy->cur &&
1197 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1198 /* verify no discrepancy between actual and
1199 saved value exists */
1200 if (unlikely(ret_freq != policy->cur)) {
1201 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1202 schedule_work(&policy->update);
1203 }
1204 }
1205
1206 return ret_freq;
1207 }
1208
1209 /**
1210 * cpufreq_get - get the current CPU frequency (in kHz)
1211 * @cpu: CPU number
1212 *
1213 * Get the CPU current (static) CPU frequency
1214 */
1215 unsigned int cpufreq_get(unsigned int cpu)
1216 {
1217 unsigned int ret_freq = 0;
1218 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1219
1220 if (!policy)
1221 goto out;
1222
1223 if (unlikely(lock_policy_rwsem_read(cpu)))
1224 goto out_policy;
1225
1226 ret_freq = __cpufreq_get(cpu);
1227
1228 unlock_policy_rwsem_read(cpu);
1229
1230 out_policy:
1231 cpufreq_cpu_put(policy);
1232 out:
1233 return ret_freq;
1234 }
1235 EXPORT_SYMBOL(cpufreq_get);
1236
1237
1238 /**
1239 * cpufreq_suspend - let the low level driver prepare for suspend
1240 */
1241
1242 static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1243 {
1244 int cpu = sysdev->id;
1245 int ret = 0;
1246 unsigned int cur_freq = 0;
1247 struct cpufreq_policy *cpu_policy;
1248
1249 dprintk("suspending cpu %u\n", cpu);
1250
1251 if (!cpu_online(cpu))
1252 return 0;
1253
1254 /* we may be lax here as interrupts are off. Nonetheless
1255 * we need to grab the correct cpu policy, as to check
1256 * whether we really run on this CPU.
1257 */
1258
1259 cpu_policy = cpufreq_cpu_get(cpu);
1260 if (!cpu_policy)
1261 return -EINVAL;
1262
1263 /* only handle each CPU group once */
1264 if (unlikely(cpu_policy->cpu != cpu))
1265 goto out;
1266
1267 if (cpufreq_driver->suspend) {
1268 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
1269 if (ret) {
1270 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1271 "step on CPU %u\n", cpu_policy->cpu);
1272 goto out;
1273 }
1274 }
1275
1276 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
1277 goto out;
1278
1279 if (cpufreq_driver->get)
1280 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1281
1282 if (!cur_freq || !cpu_policy->cur) {
1283 printk(KERN_ERR "cpufreq: suspend failed to assert current "
1284 "frequency is what timing core thinks it is.\n");
1285 goto out;
1286 }
1287
1288 if (unlikely(cur_freq != cpu_policy->cur)) {
1289 struct cpufreq_freqs freqs;
1290
1291 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1292 dprintk("Warning: CPU frequency is %u, "
1293 "cpufreq assumed %u kHz.\n",
1294 cur_freq, cpu_policy->cur);
1295
1296 freqs.cpu = cpu;
1297 freqs.old = cpu_policy->cur;
1298 freqs.new = cur_freq;
1299
1300 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
1301 CPUFREQ_SUSPENDCHANGE, &freqs);
1302 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
1303
1304 cpu_policy->cur = cur_freq;
1305 }
1306
1307 out:
1308 cpufreq_cpu_put(cpu_policy);
1309 return ret;
1310 }
1311
1312 /**
1313 * cpufreq_resume - restore proper CPU frequency handling after resume
1314 *
1315 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1316 * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
1317 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
1318 * restored.
1319 */
1320 static int cpufreq_resume(struct sys_device *sysdev)
1321 {
1322 int cpu = sysdev->id;
1323 int ret = 0;
1324 struct cpufreq_policy *cpu_policy;
1325
1326 dprintk("resuming cpu %u\n", cpu);
1327
1328 if (!cpu_online(cpu))
1329 return 0;
1330
1331 /* we may be lax here as interrupts are off. Nonetheless
1332 * we need to grab the correct cpu policy, as to check
1333 * whether we really run on this CPU.
1334 */
1335
1336 cpu_policy = cpufreq_cpu_get(cpu);
1337 if (!cpu_policy)
1338 return -EINVAL;
1339
1340 /* only handle each CPU group once */
1341 if (unlikely(cpu_policy->cpu != cpu))
1342 goto fail;
1343
1344 if (cpufreq_driver->resume) {
1345 ret = cpufreq_driver->resume(cpu_policy);
1346 if (ret) {
1347 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1348 "step on CPU %u\n", cpu_policy->cpu);
1349 goto fail;
1350 }
1351 }
1352
1353 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1354 unsigned int cur_freq = 0;
1355
1356 if (cpufreq_driver->get)
1357 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1358
1359 if (!cur_freq || !cpu_policy->cur) {
1360 printk(KERN_ERR "cpufreq: resume failed to assert "
1361 "current frequency is what timing core "
1362 "thinks it is.\n");
1363 goto out;
1364 }
1365
1366 if (unlikely(cur_freq != cpu_policy->cur)) {
1367 struct cpufreq_freqs freqs;
1368
1369 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1370 dprintk("Warning: CPU frequency "
1371 "is %u, cpufreq assumed %u kHz.\n",
1372 cur_freq, cpu_policy->cur);
1373
1374 freqs.cpu = cpu;
1375 freqs.old = cpu_policy->cur;
1376 freqs.new = cur_freq;
1377
1378 srcu_notifier_call_chain(
1379 &cpufreq_transition_notifier_list,
1380 CPUFREQ_RESUMECHANGE, &freqs);
1381 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
1382
1383 cpu_policy->cur = cur_freq;
1384 }
1385 }
1386
1387 out:
1388 schedule_work(&cpu_policy->update);
1389 fail:
1390 cpufreq_cpu_put(cpu_policy);
1391 return ret;
1392 }
1393
1394 static struct sysdev_driver cpufreq_sysdev_driver = {
1395 .add = cpufreq_add_dev,
1396 .remove = cpufreq_remove_dev,
1397 .suspend = cpufreq_suspend,
1398 .resume = cpufreq_resume,
1399 };
1400
1401
1402 /*********************************************************************
1403 * NOTIFIER LISTS INTERFACE *
1404 *********************************************************************/
1405
1406 /**
1407 * cpufreq_register_notifier - register a driver with cpufreq
1408 * @nb: notifier function to register
1409 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1410 *
1411 * Add a driver to one of two lists: either a list of drivers that
1412 * are notified about clock rate changes (once before and once after
1413 * the transition), or a list of drivers that are notified about
1414 * changes in cpufreq policy.
1415 *
1416 * This function may sleep, and has the same return conditions as
1417 * blocking_notifier_chain_register.
1418 */
1419 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1420 {
1421 int ret;
1422
1423 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1424
1425 switch (list) {
1426 case CPUFREQ_TRANSITION_NOTIFIER:
1427 ret = srcu_notifier_chain_register(
1428 &cpufreq_transition_notifier_list, nb);
1429 break;
1430 case CPUFREQ_POLICY_NOTIFIER:
1431 ret = blocking_notifier_chain_register(
1432 &cpufreq_policy_notifier_list, nb);
1433 break;
1434 default:
1435 ret = -EINVAL;
1436 }
1437
1438 return ret;
1439 }
1440 EXPORT_SYMBOL(cpufreq_register_notifier);
1441
1442
1443 /**
1444 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1445 * @nb: notifier block to be unregistered
1446 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1447 *
1448 * Remove a driver from the CPU frequency notifier list.
1449 *
1450 * This function may sleep, and has the same return conditions as
1451 * blocking_notifier_chain_unregister.
1452 */
1453 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1454 {
1455 int ret;
1456
1457 switch (list) {
1458 case CPUFREQ_TRANSITION_NOTIFIER:
1459 ret = srcu_notifier_chain_unregister(
1460 &cpufreq_transition_notifier_list, nb);
1461 break;
1462 case CPUFREQ_POLICY_NOTIFIER:
1463 ret = blocking_notifier_chain_unregister(
1464 &cpufreq_policy_notifier_list, nb);
1465 break;
1466 default:
1467 ret = -EINVAL;
1468 }
1469
1470 return ret;
1471 }
1472 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1473
1474
1475 /*********************************************************************
1476 * GOVERNORS *
1477 *********************************************************************/
1478
1479
1480 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1481 unsigned int target_freq,
1482 unsigned int relation)
1483 {
1484 int retval = -EINVAL;
1485
1486 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1487 target_freq, relation);
1488 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1489 retval = cpufreq_driver->target(policy, target_freq, relation);
1490
1491 return retval;
1492 }
1493 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1494
1495 int cpufreq_driver_target(struct cpufreq_policy *policy,
1496 unsigned int target_freq,
1497 unsigned int relation)
1498 {
1499 int ret = -EINVAL;
1500
1501 policy = cpufreq_cpu_get(policy->cpu);
1502 if (!policy)
1503 goto no_policy;
1504
1505 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1506 goto fail;
1507
1508 ret = __cpufreq_driver_target(policy, target_freq, relation);
1509
1510 unlock_policy_rwsem_write(policy->cpu);
1511
1512 fail:
1513 cpufreq_cpu_put(policy);
1514 no_policy:
1515 return ret;
1516 }
1517 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1518
1519 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1520 {
1521 int ret = 0;
1522
1523 policy = cpufreq_cpu_get(policy->cpu);
1524 if (!policy)
1525 return -EINVAL;
1526
1527 if (cpu_online(cpu) && cpufreq_driver->getavg)
1528 ret = cpufreq_driver->getavg(policy, cpu);
1529
1530 cpufreq_cpu_put(policy);
1531 return ret;
1532 }
1533 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1534
1535 /*
1536 * when "event" is CPUFREQ_GOV_LIMITS
1537 */
1538
1539 static int __cpufreq_governor(struct cpufreq_policy *policy,
1540 unsigned int event)
1541 {
1542 int ret;
1543
1544 /* Only must be defined when default governor is known to have latency
1545 restrictions, like e.g. conservative or ondemand.
1546 That this is the case is already ensured in Kconfig
1547 */
1548 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1549 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1550 #else
1551 struct cpufreq_governor *gov = NULL;
1552 #endif
1553
1554 if (policy->governor->max_transition_latency &&
1555 policy->cpuinfo.transition_latency >
1556 policy->governor->max_transition_latency) {
1557 if (!gov)
1558 return -EINVAL;
1559 else {
1560 printk(KERN_WARNING "%s governor failed, too long"
1561 " transition latency of HW, fallback"
1562 " to %s governor\n",
1563 policy->governor->name,
1564 gov->name);
1565 policy->governor = gov;
1566 }
1567 }
1568
1569 if (!try_module_get(policy->governor->owner))
1570 return -EINVAL;
1571
1572 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1573 policy->cpu, event);
1574 ret = policy->governor->governor(policy, event);
1575
1576 /* we keep one module reference alive for
1577 each CPU governed by this CPU */
1578 if ((event != CPUFREQ_GOV_START) || ret)
1579 module_put(policy->governor->owner);
1580 if ((event == CPUFREQ_GOV_STOP) && !ret)
1581 module_put(policy->governor->owner);
1582
1583 return ret;
1584 }
1585
1586
1587 int cpufreq_register_governor(struct cpufreq_governor *governor)
1588 {
1589 int err;
1590
1591 if (!governor)
1592 return -EINVAL;
1593
1594 mutex_lock(&cpufreq_governor_mutex);
1595
1596 err = -EBUSY;
1597 if (__find_governor(governor->name) == NULL) {
1598 err = 0;
1599 list_add(&governor->governor_list, &cpufreq_governor_list);
1600 }
1601
1602 mutex_unlock(&cpufreq_governor_mutex);
1603 return err;
1604 }
1605 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1606
1607
1608 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1609 {
1610 if (!governor)
1611 return;
1612
1613 mutex_lock(&cpufreq_governor_mutex);
1614 list_del(&governor->governor_list);
1615 mutex_unlock(&cpufreq_governor_mutex);
1616 return;
1617 }
1618 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1619
1620
1621
1622 /*********************************************************************
1623 * POLICY INTERFACE *
1624 *********************************************************************/
1625
1626 /**
1627 * cpufreq_get_policy - get the current cpufreq_policy
1628 * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
1629 *
1630 * Reads the current cpufreq policy.
1631 */
1632 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1633 {
1634 struct cpufreq_policy *cpu_policy;
1635 if (!policy)
1636 return -EINVAL;
1637
1638 cpu_policy = cpufreq_cpu_get(cpu);
1639 if (!cpu_policy)
1640 return -EINVAL;
1641
1642 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1643
1644 cpufreq_cpu_put(cpu_policy);
1645 return 0;
1646 }
1647 EXPORT_SYMBOL(cpufreq_get_policy);
1648
1649
1650 /*
1651 * data : current policy.
1652 * policy : policy to be set.
1653 */
1654 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1655 struct cpufreq_policy *policy)
1656 {
1657 int ret = 0;
1658
1659 cpufreq_debug_disable_ratelimit();
1660 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1661 policy->min, policy->max);
1662
1663 memcpy(&policy->cpuinfo, &data->cpuinfo,
1664 sizeof(struct cpufreq_cpuinfo));
1665
1666 if (policy->min > data->max || policy->max < data->min) {
1667 ret = -EINVAL;
1668 goto error_out;
1669 }
1670
1671 /* verify the cpu speed can be set within this limit */
1672 ret = cpufreq_driver->verify(policy);
1673 if (ret)
1674 goto error_out;
1675
1676 /* adjust if necessary - all reasons */
1677 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1678 CPUFREQ_ADJUST, policy);
1679
1680 /* adjust if necessary - hardware incompatibility*/
1681 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1682 CPUFREQ_INCOMPATIBLE, policy);
1683
1684 /* verify the cpu speed can be set within this limit,
1685 which might be different to the first one */
1686 ret = cpufreq_driver->verify(policy);
1687 if (ret)
1688 goto error_out;
1689
1690 /* notification of the new policy */
1691 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1692 CPUFREQ_NOTIFY, policy);
1693
1694 data->min = policy->min;
1695 data->max = policy->max;
1696
1697 dprintk("new min and max freqs are %u - %u kHz\n",
1698 data->min, data->max);
1699
1700 if (cpufreq_driver->setpolicy) {
1701 data->policy = policy->policy;
1702 dprintk("setting range\n");
1703 ret = cpufreq_driver->setpolicy(policy);
1704 } else {
1705 if (policy->governor != data->governor) {
1706 /* save old, working values */
1707 struct cpufreq_governor *old_gov = data->governor;
1708
1709 dprintk("governor switch\n");
1710
1711 /* end old governor */
1712 if (data->governor)
1713 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1714
1715 /* start new governor */
1716 data->governor = policy->governor;
1717 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1718 /* new governor failed, so re-start old one */
1719 dprintk("starting governor %s failed\n",
1720 data->governor->name);
1721 if (old_gov) {
1722 data->governor = old_gov;
1723 __cpufreq_governor(data,
1724 CPUFREQ_GOV_START);
1725 }
1726 ret = -EINVAL;
1727 goto error_out;
1728 }
1729 /* might be a policy change, too, so fall through */
1730 }
1731 dprintk("governor: change or update limits\n");
1732 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1733 }
1734
1735 error_out:
1736 cpufreq_debug_enable_ratelimit();
1737 return ret;
1738 }
1739
1740 /**
1741 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1742 * @cpu: CPU which shall be re-evaluated
1743 *
1744 * Usefull for policy notifiers which have different necessities
1745 * at different times.
1746 */
1747 int cpufreq_update_policy(unsigned int cpu)
1748 {
1749 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1750 struct cpufreq_policy policy;
1751 int ret;
1752
1753 if (!data) {
1754 ret = -ENODEV;
1755 goto no_policy;
1756 }
1757
1758 if (unlikely(lock_policy_rwsem_write(cpu))) {
1759 ret = -EINVAL;
1760 goto fail;
1761 }
1762
1763 dprintk("updating policy for CPU %u\n", cpu);
1764 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1765 policy.min = data->user_policy.min;
1766 policy.max = data->user_policy.max;
1767 policy.policy = data->user_policy.policy;
1768 policy.governor = data->user_policy.governor;
1769
1770 /* BIOS might change freq behind our back
1771 -> ask driver for current freq and notify governors about a change */
1772 if (cpufreq_driver->get) {
1773 policy.cur = cpufreq_driver->get(cpu);
1774 if (!data->cur) {
1775 dprintk("Driver did not initialize current freq");
1776 data->cur = policy.cur;
1777 } else {
1778 if (data->cur != policy.cur)
1779 cpufreq_out_of_sync(cpu, data->cur,
1780 policy.cur);
1781 }
1782 }
1783
1784 ret = __cpufreq_set_policy(data, &policy);
1785
1786 unlock_policy_rwsem_write(cpu);
1787
1788 fail:
1789 cpufreq_cpu_put(data);
1790 no_policy:
1791 return ret;
1792 }
1793 EXPORT_SYMBOL(cpufreq_update_policy);
1794
1795 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1796 unsigned long action, void *hcpu)
1797 {
1798 unsigned int cpu = (unsigned long)hcpu;
1799 struct sys_device *sys_dev;
1800
1801 sys_dev = get_cpu_sysdev(cpu);
1802 if (sys_dev) {
1803 switch (action) {
1804 case CPU_ONLINE:
1805 case CPU_ONLINE_FROZEN:
1806 cpufreq_add_dev(sys_dev);
1807 break;
1808 case CPU_DOWN_PREPARE:
1809 case CPU_DOWN_PREPARE_FROZEN:
1810 if (unlikely(lock_policy_rwsem_write(cpu)))
1811 BUG();
1812
1813 __cpufreq_remove_dev(sys_dev);
1814 break;
1815 case CPU_DOWN_FAILED:
1816 case CPU_DOWN_FAILED_FROZEN:
1817 cpufreq_add_dev(sys_dev);
1818 break;
1819 }
1820 }
1821 return NOTIFY_OK;
1822 }
1823
1824 static struct notifier_block __refdata cpufreq_cpu_notifier =
1825 {
1826 .notifier_call = cpufreq_cpu_callback,
1827 };
1828
1829 /*********************************************************************
1830 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1831 *********************************************************************/
1832
1833 /**
1834 * cpufreq_register_driver - register a CPU Frequency driver
1835 * @driver_data: A struct cpufreq_driver containing the values#
1836 * submitted by the CPU Frequency driver.
1837 *
1838 * Registers a CPU Frequency driver to this core code. This code
1839 * returns zero on success, -EBUSY when another driver got here first
1840 * (and isn't unregistered in the meantime).
1841 *
1842 */
1843 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1844 {
1845 unsigned long flags;
1846 int ret;
1847
1848 if (!driver_data || !driver_data->verify || !driver_data->init ||
1849 ((!driver_data->setpolicy) && (!driver_data->target)))
1850 return -EINVAL;
1851
1852 dprintk("trying to register driver %s\n", driver_data->name);
1853
1854 if (driver_data->setpolicy)
1855 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1856
1857 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1858 if (cpufreq_driver) {
1859 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1860 return -EBUSY;
1861 }
1862 cpufreq_driver = driver_data;
1863 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1864
1865 ret = sysdev_driver_register(&cpu_sysdev_class,
1866 &cpufreq_sysdev_driver);
1867
1868 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1869 int i;
1870 ret = -ENODEV;
1871
1872 /* check for at least one working CPU */
1873 for (i = 0; i < nr_cpu_ids; i++)
1874 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1875 ret = 0;
1876 break;
1877 }
1878
1879 /* if all ->init() calls failed, unregister */
1880 if (ret) {
1881 dprintk("no CPU initialized for driver %s\n",
1882 driver_data->name);
1883 sysdev_driver_unregister(&cpu_sysdev_class,
1884 &cpufreq_sysdev_driver);
1885
1886 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1887 cpufreq_driver = NULL;
1888 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1889 }
1890 }
1891
1892 if (!ret) {
1893 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1894 dprintk("driver %s up and running\n", driver_data->name);
1895 cpufreq_debug_enable_ratelimit();
1896 }
1897
1898 return ret;
1899 }
1900 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1901
1902
1903 /**
1904 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1905 *
1906 * Unregister the current CPUFreq driver. Only call this if you have
1907 * the right to do so, i.e. if you have succeeded in initialising before!
1908 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1909 * currently not initialised.
1910 */
1911 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1912 {
1913 unsigned long flags;
1914
1915 cpufreq_debug_disable_ratelimit();
1916
1917 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1918 cpufreq_debug_enable_ratelimit();
1919 return -EINVAL;
1920 }
1921
1922 dprintk("unregistering driver %s\n", driver->name);
1923
1924 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1925 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1926
1927 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1928 cpufreq_driver = NULL;
1929 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1930
1931 return 0;
1932 }
1933 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1934
1935 static int __init cpufreq_core_init(void)
1936 {
1937 int cpu;
1938
1939 for_each_possible_cpu(cpu) {
1940 per_cpu(policy_cpu, cpu) = -1;
1941 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1942 }
1943 return 0;
1944 }
1945
1946 core_initcall(cpufreq_core_init);