Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
1da177e4
LT
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/notifier.h>
22#include <linux/cpufreq.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/device.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/completion.h>
3fc54d37 30#include <linux/mutex.h>
1da177e4 31
e08f5f5b
GS
32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 "cpufreq-core", msg)
1da177e4
LT
34
35/**
cd878479 36 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
37 * level driver of CPUFreq support, and its spinlock. This lock
38 * also protects the cpufreq_cpu_data array.
39 */
7d5e350f 40static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 41static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
42#ifdef CONFIG_HOTPLUG_CPU
43/* This one keeps track of the previously set governor of a removed CPU */
7a6aedfa 44static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor);
084f3493 45#endif
1da177e4
LT
46static DEFINE_SPINLOCK(cpufreq_driver_lock);
47
5a01f2e8
VP
48/*
49 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50 * all cpufreq/hotplug/workqueue/etc related lock issues.
51 *
52 * The rules for this semaphore:
53 * - Any routine that wants to read from the policy structure will
54 * do a down_read on this semaphore.
55 * - Any routine that will write to the policy structure and/or may take away
56 * the policy altogether (eg. CPU hotplug), will hold this lock in write
57 * mode before doing so.
58 *
59 * Additional rules:
60 * - All holders of the lock should check to make sure that the CPU they
61 * are concerned with are online after they get the lock.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 */
65static DEFINE_PER_CPU(int, policy_cpu);
66static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
67
68#define lock_policy_rwsem(mode, cpu) \
69int lock_policy_rwsem_##mode \
70(int cpu) \
71{ \
72 int policy_cpu = per_cpu(policy_cpu, cpu); \
73 BUG_ON(policy_cpu == -1); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
75 if (unlikely(!cpu_online(cpu))) { \
76 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
77 return -1; \
78 } \
79 \
80 return 0; \
81}
82
83lock_policy_rwsem(read, cpu);
84EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
85
86lock_policy_rwsem(write, cpu);
87EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
88
89void unlock_policy_rwsem_read(int cpu)
90{
91 int policy_cpu = per_cpu(policy_cpu, cpu);
92 BUG_ON(policy_cpu == -1);
93 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
94}
95EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
96
97void unlock_policy_rwsem_write(int cpu)
98{
99 int policy_cpu = per_cpu(policy_cpu, cpu);
100 BUG_ON(policy_cpu == -1);
101 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
102}
103EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
104
105
1da177e4 106/* internal prototypes */
29464f28
DJ
107static int __cpufreq_governor(struct cpufreq_policy *policy,
108 unsigned int event);
5a01f2e8 109static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 110static void handle_update(struct work_struct *work);
1da177e4
LT
111
112/**
32ee8c3e
DJ
113 * Two notifier lists: the "policy" list is involved in the
114 * validation process for a new CPU frequency policy; the
1da177e4
LT
115 * "transition" list for kernel code that needs to handle
116 * changes to devices when the CPU clock speed changes.
117 * The mutex locks both lists.
118 */
e041c683 119static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 120static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 121
74212ca4 122static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
123static int __init init_cpufreq_transition_notifier_list(void)
124{
125 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 126 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
127 return 0;
128}
b3438f82 129pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4
LT
130
131static LIST_HEAD(cpufreq_governor_list);
29464f28 132static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 133
7d5e350f 134struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4
LT
135{
136 struct cpufreq_policy *data;
137 unsigned long flags;
138
7a6aedfa 139 if (cpu >= nr_cpu_ids)
1da177e4
LT
140 goto err_out;
141
142 /* get the cpufreq driver */
143 spin_lock_irqsave(&cpufreq_driver_lock, flags);
144
145 if (!cpufreq_driver)
146 goto err_out_unlock;
147
148 if (!try_module_get(cpufreq_driver->owner))
149 goto err_out_unlock;
150
151
152 /* get the CPU */
7a6aedfa 153 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
154
155 if (!data)
156 goto err_out_put_module;
157
158 if (!kobject_get(&data->kobj))
159 goto err_out_put_module;
160
1da177e4 161 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
162 return data;
163
7d5e350f 164err_out_put_module:
1da177e4 165 module_put(cpufreq_driver->owner);
7d5e350f 166err_out_unlock:
1da177e4 167 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 168err_out:
1da177e4
LT
169 return NULL;
170}
171EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
172
7d5e350f 173
1da177e4
LT
174void cpufreq_cpu_put(struct cpufreq_policy *data)
175{
176 kobject_put(&data->kobj);
177 module_put(cpufreq_driver->owner);
178}
179EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
180
181
182/*********************************************************************
183 * UNIFIED DEBUG HELPERS *
184 *********************************************************************/
185#ifdef CONFIG_CPU_FREQ_DEBUG
186
187/* what part(s) of the CPUfreq subsystem are debugged? */
188static unsigned int debug;
189
190/* is the debug output ratelimit'ed using printk_ratelimit? User can
191 * set or modify this value.
192 */
193static unsigned int debug_ratelimit = 1;
194
195/* is the printk_ratelimit'ing enabled? It's enabled after a successful
196 * loading of a cpufreq driver, temporarily disabled when a new policy
197 * is set, and disabled upon cpufreq driver removal
198 */
199static unsigned int disable_ratelimit = 1;
200static DEFINE_SPINLOCK(disable_ratelimit_lock);
201
858119e1 202static void cpufreq_debug_enable_ratelimit(void)
1da177e4
LT
203{
204 unsigned long flags;
205
206 spin_lock_irqsave(&disable_ratelimit_lock, flags);
207 if (disable_ratelimit)
208 disable_ratelimit--;
209 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
210}
211
858119e1 212static void cpufreq_debug_disable_ratelimit(void)
1da177e4
LT
213{
214 unsigned long flags;
215
216 spin_lock_irqsave(&disable_ratelimit_lock, flags);
217 disable_ratelimit++;
218 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
219}
220
e08f5f5b 221void cpufreq_debug_printk(unsigned int type, const char *prefix,
905d77cd 222 const char *fmt, ...)
1da177e4
LT
223{
224 char s[256];
225 va_list args;
226 unsigned int len;
227 unsigned long flags;
32ee8c3e 228
1da177e4
LT
229 WARN_ON(!prefix);
230 if (type & debug) {
231 spin_lock_irqsave(&disable_ratelimit_lock, flags);
e08f5f5b
GS
232 if (!disable_ratelimit && debug_ratelimit
233 && !printk_ratelimit()) {
1da177e4
LT
234 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
235 return;
236 }
237 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
238
239 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
240
241 va_start(args, fmt);
242 len += vsnprintf(&s[len], (256 - len), fmt, args);
243 va_end(args);
244
245 printk(s);
246
247 WARN_ON(len < 5);
248 }
249}
250EXPORT_SYMBOL(cpufreq_debug_printk);
251
252
253module_param(debug, uint, 0644);
e08f5f5b
GS
254MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
255 " 2 to debug drivers, and 4 to debug governors.");
1da177e4
LT
256
257module_param(debug_ratelimit, uint, 0644);
e08f5f5b
GS
258MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
259 " set to 0 to disable ratelimiting.");
1da177e4
LT
260
261#else /* !CONFIG_CPU_FREQ_DEBUG */
262
263static inline void cpufreq_debug_enable_ratelimit(void) { return; }
264static inline void cpufreq_debug_disable_ratelimit(void) { return; }
265
266#endif /* CONFIG_CPU_FREQ_DEBUG */
267
268
269/*********************************************************************
270 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
271 *********************************************************************/
272
273/**
274 * adjust_jiffies - adjust the system "loops_per_jiffy"
275 *
276 * This function alters the system "loops_per_jiffy" for the clock
277 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 278 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
279 * per-CPU loops_per_jiffy value wherever possible.
280 */
281#ifndef CONFIG_SMP
282static unsigned long l_p_j_ref;
283static unsigned int l_p_j_ref_freq;
284
858119e1 285static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
286{
287 if (ci->flags & CPUFREQ_CONST_LOOPS)
288 return;
289
290 if (!l_p_j_ref_freq) {
291 l_p_j_ref = loops_per_jiffy;
292 l_p_j_ref_freq = ci->old;
a4a9df58 293 dprintk("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 294 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4
LT
295 }
296 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
297 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
42d4dc3f 298 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
299 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
300 ci->new);
a4a9df58 301 dprintk("scaling loops_per_jiffy to %lu "
e08f5f5b 302 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
303 }
304}
305#else
e08f5f5b
GS
306static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
307{
308 return;
309}
1da177e4
LT
310#endif
311
312
313/**
e4472cb3
DJ
314 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
315 * on frequency transition.
1da177e4 316 *
e4472cb3
DJ
317 * This function calls the transition notifiers and the "adjust_jiffies"
318 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 319 * external effects.
1da177e4
LT
320 */
321void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
322{
e4472cb3
DJ
323 struct cpufreq_policy *policy;
324
1da177e4
LT
325 BUG_ON(irqs_disabled());
326
327 freqs->flags = cpufreq_driver->flags;
e4472cb3
DJ
328 dprintk("notification %u of frequency transition to %u kHz\n",
329 state, freqs->new);
1da177e4 330
7a6aedfa 331 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
1da177e4 332 switch (state) {
e4472cb3 333
1da177e4 334 case CPUFREQ_PRECHANGE:
32ee8c3e 335 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
336 * which is not equal to what the cpufreq core thinks is
337 * "old frequency".
1da177e4
LT
338 */
339 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
340 if ((policy) && (policy->cpu == freqs->cpu) &&
341 (policy->cur) && (policy->cur != freqs->old)) {
b10eec22 342 dprintk("Warning: CPU frequency is"
e4472cb3
DJ
343 " %u, cpufreq assumed %u kHz.\n",
344 freqs->old, policy->cur);
345 freqs->old = policy->cur;
1da177e4
LT
346 }
347 }
b4dfdbb3 348 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 349 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
350 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
351 break;
e4472cb3 352
1da177e4
LT
353 case CPUFREQ_POSTCHANGE:
354 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
b4dfdbb3 355 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 356 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
357 if (likely(policy) && likely(policy->cpu == freqs->cpu))
358 policy->cur = freqs->new;
1da177e4
LT
359 break;
360 }
1da177e4
LT
361}
362EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
363
364
365
366/*********************************************************************
367 * SYSFS INTERFACE *
368 *********************************************************************/
369
3bcb09a3
JF
370static struct cpufreq_governor *__find_governor(const char *str_governor)
371{
372 struct cpufreq_governor *t;
373
374 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 375 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
376 return t;
377
378 return NULL;
379}
380
1da177e4
LT
381/**
382 * cpufreq_parse_governor - parse a governor string
383 */
905d77cd 384static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
385 struct cpufreq_governor **governor)
386{
3bcb09a3
JF
387 int err = -EINVAL;
388
1da177e4 389 if (!cpufreq_driver)
3bcb09a3
JF
390 goto out;
391
1da177e4
LT
392 if (cpufreq_driver->setpolicy) {
393 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
394 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 395 err = 0;
e08f5f5b
GS
396 } else if (!strnicmp(str_governor, "powersave",
397 CPUFREQ_NAME_LEN)) {
1da177e4 398 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 399 err = 0;
1da177e4 400 }
3bcb09a3 401 } else if (cpufreq_driver->target) {
1da177e4 402 struct cpufreq_governor *t;
3bcb09a3 403
3fc54d37 404 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
405
406 t = __find_governor(str_governor);
407
ea714970 408 if (t == NULL) {
e08f5f5b
GS
409 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
410 str_governor);
ea714970
JF
411
412 if (name) {
413 int ret;
414
415 mutex_unlock(&cpufreq_governor_mutex);
326f6a5c 416 ret = request_module("%s", name);
ea714970
JF
417 mutex_lock(&cpufreq_governor_mutex);
418
419 if (ret == 0)
420 t = __find_governor(str_governor);
421 }
422
423 kfree(name);
424 }
425
3bcb09a3
JF
426 if (t != NULL) {
427 *governor = t;
428 err = 0;
1da177e4 429 }
3bcb09a3 430
3fc54d37 431 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 432 }
29464f28 433out:
3bcb09a3 434 return err;
1da177e4 435}
1da177e4
LT
436
437
1da177e4 438/**
e08f5f5b
GS
439 * cpufreq_per_cpu_attr_read() / show_##file_name() -
440 * print out cpufreq information
1da177e4
LT
441 *
442 * Write out information from cpufreq_driver->policy[cpu]; object must be
443 * "unsigned int".
444 */
445
32ee8c3e
DJ
446#define show_one(file_name, object) \
447static ssize_t show_##file_name \
905d77cd 448(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 449{ \
29464f28 450 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
451}
452
453show_one(cpuinfo_min_freq, cpuinfo.min_freq);
454show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 455show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
456show_one(scaling_min_freq, min);
457show_one(scaling_max_freq, max);
458show_one(scaling_cur_freq, cur);
459
e08f5f5b
GS
460static int __cpufreq_set_policy(struct cpufreq_policy *data,
461 struct cpufreq_policy *policy);
7970e08b 462
1da177e4
LT
463/**
464 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
465 */
466#define store_one(file_name, object) \
467static ssize_t store_##file_name \
905d77cd 468(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4
LT
469{ \
470 unsigned int ret = -EINVAL; \
471 struct cpufreq_policy new_policy; \
472 \
473 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
474 if (ret) \
475 return -EINVAL; \
476 \
29464f28 477 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
478 if (ret != 1) \
479 return -EINVAL; \
480 \
7970e08b
TR
481 ret = __cpufreq_set_policy(policy, &new_policy); \
482 policy->user_policy.object = policy->object; \
1da177e4
LT
483 \
484 return ret ? ret : count; \
485}
486
29464f28
DJ
487store_one(scaling_min_freq, min);
488store_one(scaling_max_freq, max);
1da177e4
LT
489
490/**
491 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
492 */
905d77cd
DJ
493static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
494 char *buf)
1da177e4 495{
5a01f2e8 496 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
497 if (!cur_freq)
498 return sprintf(buf, "<unknown>");
499 return sprintf(buf, "%u\n", cur_freq);
500}
501
502
503/**
504 * show_scaling_governor - show the current policy for the specified CPU
505 */
905d77cd 506static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 507{
29464f28 508 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
509 return sprintf(buf, "powersave\n");
510 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
511 return sprintf(buf, "performance\n");
512 else if (policy->governor)
29464f28
DJ
513 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
514 policy->governor->name);
1da177e4
LT
515 return -EINVAL;
516}
517
518
519/**
520 * store_scaling_governor - store policy for the specified CPU
521 */
905d77cd
DJ
522static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
523 const char *buf, size_t count)
1da177e4
LT
524{
525 unsigned int ret = -EINVAL;
526 char str_governor[16];
527 struct cpufreq_policy new_policy;
528
529 ret = cpufreq_get_policy(&new_policy, policy->cpu);
530 if (ret)
531 return ret;
532
29464f28 533 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
534 if (ret != 1)
535 return -EINVAL;
536
e08f5f5b
GS
537 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
538 &new_policy.governor))
1da177e4
LT
539 return -EINVAL;
540
7970e08b
TR
541 /* Do not use cpufreq_set_policy here or the user_policy.max
542 will be wrongly overridden */
7970e08b
TR
543 ret = __cpufreq_set_policy(policy, &new_policy);
544
545 policy->user_policy.policy = policy->policy;
546 policy->user_policy.governor = policy->governor;
7970e08b 547
e08f5f5b
GS
548 if (ret)
549 return ret;
550 else
551 return count;
1da177e4
LT
552}
553
554/**
555 * show_scaling_driver - show the cpufreq driver currently loaded
556 */
905d77cd 557static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4
LT
558{
559 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
560}
561
562/**
563 * show_scaling_available_governors - show the available CPUfreq governors
564 */
905d77cd
DJ
565static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
566 char *buf)
1da177e4
LT
567{
568 ssize_t i = 0;
569 struct cpufreq_governor *t;
570
571 if (!cpufreq_driver->target) {
572 i += sprintf(buf, "performance powersave");
573 goto out;
574 }
575
576 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
577 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
578 - (CPUFREQ_NAME_LEN + 2)))
1da177e4
LT
579 goto out;
580 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
581 }
7d5e350f 582out:
1da177e4
LT
583 i += sprintf(&buf[i], "\n");
584 return i;
585}
e8628dd0 586
835481d9 587static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
588{
589 ssize_t i = 0;
590 unsigned int cpu;
591
835481d9 592 for_each_cpu(cpu, mask) {
1da177e4
LT
593 if (i)
594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
596 if (i >= (PAGE_SIZE - 5))
29464f28 597 break;
1da177e4
LT
598 }
599 i += sprintf(&buf[i], "\n");
600 return i;
601}
602
e8628dd0
DW
603/**
604 * show_related_cpus - show the CPUs affected by each transition even if
605 * hw coordination is in use
606 */
607static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
608{
835481d9 609 if (cpumask_empty(policy->related_cpus))
e8628dd0
DW
610 return show_cpus(policy->cpus, buf);
611 return show_cpus(policy->related_cpus, buf);
612}
613
614/**
615 * show_affected_cpus - show the CPUs affected by each transition
616 */
617static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
618{
619 return show_cpus(policy->cpus, buf);
620}
621
9e76988e 622static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 623 const char *buf, size_t count)
9e76988e
VP
624{
625 unsigned int freq = 0;
626 unsigned int ret;
627
879000f9 628 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
629 return -EINVAL;
630
631 ret = sscanf(buf, "%u", &freq);
632 if (ret != 1)
633 return -EINVAL;
634
635 policy->governor->store_setspeed(policy, freq);
636
637 return count;
638}
639
640static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
641{
879000f9 642 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
643 return sprintf(buf, "<unsupported>\n");
644
645 return policy->governor->show_setspeed(policy, buf);
646}
1da177e4
LT
647
648#define define_one_ro(_name) \
649static struct freq_attr _name = \
650__ATTR(_name, 0444, show_##_name, NULL)
651
652#define define_one_ro0400(_name) \
653static struct freq_attr _name = \
654__ATTR(_name, 0400, show_##_name, NULL)
655
656#define define_one_rw(_name) \
657static struct freq_attr _name = \
658__ATTR(_name, 0644, show_##_name, store_##_name)
659
660define_one_ro0400(cpuinfo_cur_freq);
661define_one_ro(cpuinfo_min_freq);
662define_one_ro(cpuinfo_max_freq);
ed129784 663define_one_ro(cpuinfo_transition_latency);
1da177e4
LT
664define_one_ro(scaling_available_governors);
665define_one_ro(scaling_driver);
666define_one_ro(scaling_cur_freq);
e8628dd0 667define_one_ro(related_cpus);
1da177e4
LT
668define_one_ro(affected_cpus);
669define_one_rw(scaling_min_freq);
670define_one_rw(scaling_max_freq);
671define_one_rw(scaling_governor);
9e76988e 672define_one_rw(scaling_setspeed);
1da177e4 673
905d77cd 674static struct attribute *default_attrs[] = {
1da177e4
LT
675 &cpuinfo_min_freq.attr,
676 &cpuinfo_max_freq.attr,
ed129784 677 &cpuinfo_transition_latency.attr,
1da177e4
LT
678 &scaling_min_freq.attr,
679 &scaling_max_freq.attr,
680 &affected_cpus.attr,
e8628dd0 681 &related_cpus.attr,
1da177e4
LT
682 &scaling_governor.attr,
683 &scaling_driver.attr,
684 &scaling_available_governors.attr,
9e76988e 685 &scaling_setspeed.attr,
1da177e4
LT
686 NULL
687};
688
29464f28
DJ
689#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
690#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 691
29464f28 692static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 693{
905d77cd
DJ
694 struct cpufreq_policy *policy = to_policy(kobj);
695 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 696 ssize_t ret = -EINVAL;
1da177e4
LT
697 policy = cpufreq_cpu_get(policy->cpu);
698 if (!policy)
0db4a8a9 699 goto no_policy;
5a01f2e8
VP
700
701 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 702 goto fail;
5a01f2e8 703
e08f5f5b
GS
704 if (fattr->show)
705 ret = fattr->show(policy, buf);
706 else
707 ret = -EIO;
708
5a01f2e8 709 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 710fail:
1da177e4 711 cpufreq_cpu_put(policy);
0db4a8a9 712no_policy:
1da177e4
LT
713 return ret;
714}
715
905d77cd
DJ
716static ssize_t store(struct kobject *kobj, struct attribute *attr,
717 const char *buf, size_t count)
1da177e4 718{
905d77cd
DJ
719 struct cpufreq_policy *policy = to_policy(kobj);
720 struct freq_attr *fattr = to_attr(attr);
a07530b4 721 ssize_t ret = -EINVAL;
1da177e4
LT
722 policy = cpufreq_cpu_get(policy->cpu);
723 if (!policy)
a07530b4 724 goto no_policy;
5a01f2e8
VP
725
726 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 727 goto fail;
5a01f2e8 728
e08f5f5b
GS
729 if (fattr->store)
730 ret = fattr->store(policy, buf, count);
731 else
732 ret = -EIO;
733
5a01f2e8 734 unlock_policy_rwsem_write(policy->cpu);
a07530b4 735fail:
1da177e4 736 cpufreq_cpu_put(policy);
a07530b4 737no_policy:
1da177e4
LT
738 return ret;
739}
740
905d77cd 741static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 742{
905d77cd 743 struct cpufreq_policy *policy = to_policy(kobj);
1da177e4
LT
744 dprintk("last reference is dropped\n");
745 complete(&policy->kobj_unregister);
746}
747
748static struct sysfs_ops sysfs_ops = {
749 .show = show,
750 .store = store,
751};
752
753static struct kobj_type ktype_cpufreq = {
754 .sysfs_ops = &sysfs_ops,
755 .default_attrs = default_attrs,
756 .release = cpufreq_sysfs_release,
757};
758
759
760/**
761 * cpufreq_add_dev - add a CPU device
762 *
32ee8c3e 763 * Adds the cpufreq interface for a CPU device.
1da177e4 764 */
905d77cd 765static int cpufreq_add_dev(struct sys_device *sys_dev)
1da177e4
LT
766{
767 unsigned int cpu = sys_dev->id;
768 int ret = 0;
769 struct cpufreq_policy new_policy;
770 struct cpufreq_policy *policy;
771 struct freq_attr **drv_attr;
8ff69732 772 struct sys_device *cpu_sys_dev;
1da177e4
LT
773 unsigned long flags;
774 unsigned int j;
8ff69732
DJ
775#ifdef CONFIG_SMP
776 struct cpufreq_policy *managed_policy;
777#endif
1da177e4 778
c32b6b8e
AR
779 if (cpu_is_offline(cpu))
780 return 0;
781
1da177e4
LT
782 cpufreq_debug_disable_ratelimit();
783 dprintk("adding CPU %u\n", cpu);
784
785#ifdef CONFIG_SMP
786 /* check whether a different CPU already registered this
787 * CPU because it is in the same boat. */
788 policy = cpufreq_cpu_get(cpu);
789 if (unlikely(policy)) {
8ff69732 790 cpufreq_cpu_put(policy);
1da177e4
LT
791 cpufreq_debug_enable_ratelimit();
792 return 0;
793 }
794#endif
795
796 if (!try_module_get(cpufreq_driver->owner)) {
797 ret = -EINVAL;
798 goto module_out;
799 }
800
e98df50c 801 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
1da177e4
LT
802 if (!policy) {
803 ret = -ENOMEM;
804 goto nomem_out;
805 }
835481d9
RR
806 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
807 kfree(policy);
808 ret = -ENOMEM;
809 goto nomem_out;
810 }
eaa95840 811 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
835481d9
RR
812 free_cpumask_var(policy->cpus);
813 kfree(policy);
814 ret = -ENOMEM;
815 goto nomem_out;
816 }
1da177e4
LT
817
818 policy->cpu = cpu;
835481d9 819 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 820
5a01f2e8
VP
821 /* Initially set CPU itself as the policy_cpu */
822 per_cpu(policy_cpu, cpu) = cpu;
823 lock_policy_rwsem_write(cpu);
824
1da177e4 825 init_completion(&policy->kobj_unregister);
65f27f38 826 INIT_WORK(&policy->update, handle_update);
1da177e4 827
8122c6ce
TR
828 /* Set governor before ->init, so that driver could check it */
829 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1da177e4
LT
830 /* call driver. From then on the cpufreq must be able
831 * to accept all calls to ->verify and ->setpolicy for this CPU
832 */
833 ret = cpufreq_driver->init(policy);
834 if (ret) {
835 dprintk("initialization failed\n");
836 goto err_out;
837 }
187d9f4e
MC
838 policy->user_policy.min = policy->min;
839 policy->user_policy.max = policy->max;
1da177e4 840
a1531acd
TR
841 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
842 CPUFREQ_START, policy);
843
8ff69732 844#ifdef CONFIG_SMP
084f3493
TR
845
846#ifdef CONFIG_HOTPLUG_CPU
7a6aedfa
MT
847 if (per_cpu(cpufreq_cpu_governor, cpu)) {
848 policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
084f3493
TR
849 dprintk("Restoring governor %s for cpu %d\n",
850 policy->governor->name, cpu);
851 }
852#endif
853
835481d9 854 for_each_cpu(j, policy->cpus) {
8ff69732
DJ
855 if (cpu == j)
856 continue;
857
29464f28
DJ
858 /* Check for existing affected CPUs.
859 * They may not be aware of it due to CPU Hotplug.
8ff69732 860 */
29464f28 861 managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */
8ff69732 862 if (unlikely(managed_policy)) {
5a01f2e8
VP
863
864 /* Set proper policy_cpu */
865 unlock_policy_rwsem_write(cpu);
866 per_cpu(policy_cpu, cpu) = managed_policy->cpu;
867
868 if (lock_policy_rwsem_write(cpu) < 0)
869 goto err_out_driver_exit;
870
8ff69732 871 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 872 cpumask_copy(managed_policy->cpus, policy->cpus);
7a6aedfa 873 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
8ff69732
DJ
874 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
875
876 dprintk("CPU already managed, adding link\n");
0142f9dc
AD
877 ret = sysfs_create_link(&sys_dev->kobj,
878 &managed_policy->kobj,
879 "cpufreq");
45709118 880 if (ret)
0142f9dc 881 goto err_out_driver_exit;
8ff69732
DJ
882
883 cpufreq_debug_enable_ratelimit();
8ff69732
DJ
884 ret = 0;
885 goto err_out_driver_exit; /* call driver->exit() */
886 }
887 }
888#endif
1da177e4
LT
889 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
890
891 /* prepare interface data */
129f8ae9
DJ
892 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
893 "cpufreq");
894 if (ret)
895 goto err_out_driver_exit;
896
897 /* set up files for this cpu device */
898 drv_attr = cpufreq_driver->attr;
899 while ((drv_attr) && (*drv_attr)) {
900 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
45709118 901 if (ret)
0a4b2ccc 902 goto err_out_driver_exit;
129f8ae9
DJ
903 drv_attr++;
904 }
905 if (cpufreq_driver->get) {
906 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
907 if (ret)
908 goto err_out_driver_exit;
909 }
910 if (cpufreq_driver->target) {
911 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
45709118 912 if (ret)
0a4b2ccc
TR
913 goto err_out_driver_exit;
914 }
1da177e4
LT
915
916 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 917 for_each_cpu(j, policy->cpus) {
7a6aedfa 918 per_cpu(cpufreq_cpu_data, j) = policy;
5a01f2e8
VP
919 per_cpu(policy_cpu, j) = policy->cpu;
920 }
1da177e4 921 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
8ff69732
DJ
922
923 /* symlink affected CPUs */
835481d9 924 for_each_cpu(j, policy->cpus) {
8ff69732
DJ
925 if (j == cpu)
926 continue;
927 if (!cpu_online(j))
928 continue;
929
1f8b2c9d 930 dprintk("CPU %u already managed, adding link\n", j);
8ff69732
DJ
931 cpufreq_cpu_get(cpu);
932 cpu_sys_dev = get_cpu_sysdev(j);
0142f9dc
AD
933 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
934 "cpufreq");
45709118 935 if (ret)
0142f9dc 936 goto err_out_unregister;
8ff69732
DJ
937 }
938
1da177e4
LT
939 policy->governor = NULL; /* to assure that the starting sequence is
940 * run in cpufreq_set_policy */
87c32271 941
1da177e4 942 /* set default policy */
22c970f3
TR
943 ret = __cpufreq_set_policy(policy, &new_policy);
944 policy->user_policy.policy = policy->policy;
084f3493 945 policy->user_policy.governor = policy->governor;
22c970f3 946
1da177e4
LT
947 if (ret) {
948 dprintk("setting policy failed\n");
949 goto err_out_unregister;
950 }
951
dca02613
LW
952 unlock_policy_rwsem_write(cpu);
953
038c5b3e 954 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 955 module_put(cpufreq_driver->owner);
1da177e4
LT
956 dprintk("initialization complete\n");
957 cpufreq_debug_enable_ratelimit();
87c32271 958
1da177e4
LT
959 return 0;
960
961
962err_out_unregister:
963 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 964 for_each_cpu(j, policy->cpus)
7a6aedfa 965 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
966 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
967
c10997f6 968 kobject_put(&policy->kobj);
1da177e4
LT
969 wait_for_completion(&policy->kobj_unregister);
970
8085e1f1
VP
971err_out_driver_exit:
972 if (cpufreq_driver->exit)
973 cpufreq_driver->exit(policy);
974
1da177e4 975err_out:
45709118 976 unlock_policy_rwsem_write(cpu);
1da177e4
LT
977 kfree(policy);
978
979nomem_out:
980 module_put(cpufreq_driver->owner);
c32b6b8e 981module_out:
1da177e4
LT
982 cpufreq_debug_enable_ratelimit();
983 return ret;
984}
985
986
987/**
5a01f2e8 988 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
989 *
990 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
991 * Caller should already have policy_rwsem in write mode for this CPU.
992 * This routine frees the rwsem before returning.
1da177e4 993 */
905d77cd 994static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1da177e4
LT
995{
996 unsigned int cpu = sys_dev->id;
997 unsigned long flags;
998 struct cpufreq_policy *data;
999#ifdef CONFIG_SMP
e738cf6d 1000 struct sys_device *cpu_sys_dev;
1da177e4
LT
1001 unsigned int j;
1002#endif
1003
1004 cpufreq_debug_disable_ratelimit();
1005 dprintk("unregistering CPU %u\n", cpu);
1006
1007 spin_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 1008 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
1009
1010 if (!data) {
1011 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1012 cpufreq_debug_enable_ratelimit();
5a01f2e8 1013 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1014 return -EINVAL;
1015 }
7a6aedfa 1016 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1017
1018
1019#ifdef CONFIG_SMP
1020 /* if this isn't the CPU which is the parent of the kobj, we
32ee8c3e 1021 * only need to unlink, put and exit
1da177e4
LT
1022 */
1023 if (unlikely(cpu != data->cpu)) {
1024 dprintk("removing link\n");
835481d9 1025 cpumask_clear_cpu(cpu, data->cpus);
1da177e4
LT
1026 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1027 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1da177e4
LT
1028 cpufreq_cpu_put(data);
1029 cpufreq_debug_enable_ratelimit();
5a01f2e8 1030 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1031 return 0;
1032 }
1033#endif
1034
1da177e4 1035#ifdef CONFIG_SMP
084f3493
TR
1036
1037#ifdef CONFIG_HOTPLUG_CPU
7a6aedfa 1038 per_cpu(cpufreq_cpu_governor, cpu) = data->governor;
084f3493
TR
1039#endif
1040
1da177e4
LT
1041 /* if we have other CPUs still registered, we need to unlink them,
1042 * or else wait_for_completion below will lock up. Clean the
7a6aedfa
MT
1043 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1044 * the sysfs links afterwards.
1da177e4 1045 */
835481d9
RR
1046 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1047 for_each_cpu(j, data->cpus) {
1da177e4
LT
1048 if (j == cpu)
1049 continue;
7a6aedfa 1050 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
1051 }
1052 }
1053
1054 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1055
835481d9
RR
1056 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1057 for_each_cpu(j, data->cpus) {
1da177e4
LT
1058 if (j == cpu)
1059 continue;
1060 dprintk("removing link for cpu %u\n", j);
084f3493 1061#ifdef CONFIG_HOTPLUG_CPU
7a6aedfa 1062 per_cpu(cpufreq_cpu_governor, j) = data->governor;
084f3493 1063#endif
d434fca7
AR
1064 cpu_sys_dev = get_cpu_sysdev(j);
1065 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
1da177e4
LT
1066 cpufreq_cpu_put(data);
1067 }
1068 }
1069#else
1070 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1071#endif
1072
42a06f21
MD
1073 unlock_policy_rwsem_write(cpu);
1074
1da177e4
LT
1075 if (cpufreq_driver->target)
1076 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 1077
1da177e4
LT
1078 kobject_put(&data->kobj);
1079
1080 /* we need to make sure that the underlying kobj is actually
32ee8c3e 1081 * not referenced anymore by anybody before we proceed with
1da177e4
LT
1082 * unloading.
1083 */
1084 dprintk("waiting for dropping of refcount\n");
1085 wait_for_completion(&data->kobj_unregister);
1086 dprintk("wait complete\n");
1087
1088 if (cpufreq_driver->exit)
1089 cpufreq_driver->exit(data);
1090
835481d9
RR
1091 free_cpumask_var(data->related_cpus);
1092 free_cpumask_var(data->cpus);
1da177e4 1093 kfree(data);
835481d9 1094 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1095
1096 cpufreq_debug_enable_ratelimit();
1da177e4
LT
1097 return 0;
1098}
1099
1100
905d77cd 1101static int cpufreq_remove_dev(struct sys_device *sys_dev)
5a01f2e8
VP
1102{
1103 unsigned int cpu = sys_dev->id;
1104 int retval;
ec28297a
VP
1105
1106 if (cpu_is_offline(cpu))
1107 return 0;
1108
5a01f2e8
VP
1109 if (unlikely(lock_policy_rwsem_write(cpu)))
1110 BUG();
1111
1112 retval = __cpufreq_remove_dev(sys_dev);
1113 return retval;
1114}
1115
1116
65f27f38 1117static void handle_update(struct work_struct *work)
1da177e4 1118{
65f27f38
DH
1119 struct cpufreq_policy *policy =
1120 container_of(work, struct cpufreq_policy, update);
1121 unsigned int cpu = policy->cpu;
1da177e4
LT
1122 dprintk("handle_update for cpu %u called\n", cpu);
1123 cpufreq_update_policy(cpu);
1124}
1125
1126/**
1127 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1128 * @cpu: cpu number
1129 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1130 * @new_freq: CPU frequency the CPU actually runs at
1131 *
29464f28
DJ
1132 * We adjust to current frequency first, and need to clean up later.
1133 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1134 */
e08f5f5b
GS
1135static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1136 unsigned int new_freq)
1da177e4
LT
1137{
1138 struct cpufreq_freqs freqs;
1139
b10eec22 1140 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1141 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1142
1143 freqs.cpu = cpu;
1144 freqs.old = old_freq;
1145 freqs.new = new_freq;
1146 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1147 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1148}
1149
1150
32ee8c3e 1151/**
4ab70df4 1152 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1153 * @cpu: CPU number
1154 *
1155 * This is the last known freq, without actually getting it from the driver.
1156 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1157 */
1158unsigned int cpufreq_quick_get(unsigned int cpu)
1159{
1160 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
e08f5f5b 1161 unsigned int ret_freq = 0;
95235ca2
VP
1162
1163 if (policy) {
e08f5f5b 1164 ret_freq = policy->cur;
95235ca2
VP
1165 cpufreq_cpu_put(policy);
1166 }
1167
4d34a67d 1168 return ret_freq;
95235ca2
VP
1169}
1170EXPORT_SYMBOL(cpufreq_quick_get);
1171
1172
5a01f2e8 1173static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1174{
7a6aedfa 1175 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1176 unsigned int ret_freq = 0;
1da177e4 1177
1da177e4 1178 if (!cpufreq_driver->get)
4d34a67d 1179 return ret_freq;
1da177e4 1180
e08f5f5b 1181 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1182
e08f5f5b
GS
1183 if (ret_freq && policy->cur &&
1184 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1185 /* verify no discrepancy between actual and
1186 saved value exists */
1187 if (unlikely(ret_freq != policy->cur)) {
1188 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1189 schedule_work(&policy->update);
1190 }
1191 }
1192
4d34a67d 1193 return ret_freq;
5a01f2e8 1194}
1da177e4 1195
5a01f2e8
VP
1196/**
1197 * cpufreq_get - get the current CPU frequency (in kHz)
1198 * @cpu: CPU number
1199 *
1200 * Get the CPU current (static) CPU frequency
1201 */
1202unsigned int cpufreq_get(unsigned int cpu)
1203{
1204 unsigned int ret_freq = 0;
1205 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1206
1207 if (!policy)
1208 goto out;
1209
1210 if (unlikely(lock_policy_rwsem_read(cpu)))
1211 goto out_policy;
1212
1213 ret_freq = __cpufreq_get(cpu);
1214
1215 unlock_policy_rwsem_read(cpu);
1da177e4 1216
5a01f2e8
VP
1217out_policy:
1218 cpufreq_cpu_put(policy);
1219out:
4d34a67d 1220 return ret_freq;
1da177e4
LT
1221}
1222EXPORT_SYMBOL(cpufreq_get);
1223
1224
42d4dc3f
BH
1225/**
1226 * cpufreq_suspend - let the low level driver prepare for suspend
1227 */
1228
905d77cd 1229static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
42d4dc3f
BH
1230{
1231 int cpu = sysdev->id;
e08f5f5b 1232 int ret = 0;
42d4dc3f
BH
1233 unsigned int cur_freq = 0;
1234 struct cpufreq_policy *cpu_policy;
1235
0e37b159 1236 dprintk("suspending cpu %u\n", cpu);
42d4dc3f
BH
1237
1238 if (!cpu_online(cpu))
1239 return 0;
1240
1241 /* we may be lax here as interrupts are off. Nonetheless
1242 * we need to grab the correct cpu policy, as to check
1243 * whether we really run on this CPU.
1244 */
1245
1246 cpu_policy = cpufreq_cpu_get(cpu);
1247 if (!cpu_policy)
1248 return -EINVAL;
1249
1250 /* only handle each CPU group once */
c9060494
DJ
1251 if (unlikely(cpu_policy->cpu != cpu))
1252 goto out;
42d4dc3f
BH
1253
1254 if (cpufreq_driver->suspend) {
e00d9967 1255 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
42d4dc3f
BH
1256 if (ret) {
1257 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1258 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1259 goto out;
42d4dc3f
BH
1260 }
1261 }
1262
42d4dc3f
BH
1263 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
1264 goto out;
1265
1266 if (cpufreq_driver->get)
1267 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1268
1269 if (!cur_freq || !cpu_policy->cur) {
1270 printk(KERN_ERR "cpufreq: suspend failed to assert current "
1271 "frequency is what timing core thinks it is.\n");
1272 goto out;
1273 }
1274
1275 if (unlikely(cur_freq != cpu_policy->cur)) {
1276 struct cpufreq_freqs freqs;
1277
1278 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
b10eec22 1279 dprintk("Warning: CPU frequency is %u, "
42d4dc3f
BH
1280 "cpufreq assumed %u kHz.\n",
1281 cur_freq, cpu_policy->cur);
1282
1283 freqs.cpu = cpu;
1284 freqs.old = cpu_policy->cur;
1285 freqs.new = cur_freq;
1286
b4dfdbb3 1287 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
42d4dc3f
BH
1288 CPUFREQ_SUSPENDCHANGE, &freqs);
1289 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
1290
1291 cpu_policy->cur = cur_freq;
1292 }
1293
7d5e350f 1294out:
42d4dc3f 1295 cpufreq_cpu_put(cpu_policy);
c9060494 1296 return ret;
42d4dc3f
BH
1297}
1298
1da177e4
LT
1299/**
1300 * cpufreq_resume - restore proper CPU frequency handling after resume
1301 *
1302 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1303 * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
42d4dc3f
BH
1304 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
1305 * restored.
1da177e4 1306 */
905d77cd 1307static int cpufreq_resume(struct sys_device *sysdev)
1da177e4
LT
1308{
1309 int cpu = sysdev->id;
e08f5f5b 1310 int ret = 0;
1da177e4
LT
1311 struct cpufreq_policy *cpu_policy;
1312
1313 dprintk("resuming cpu %u\n", cpu);
1314
1315 if (!cpu_online(cpu))
1316 return 0;
1317
1318 /* we may be lax here as interrupts are off. Nonetheless
1319 * we need to grab the correct cpu policy, as to check
1320 * whether we really run on this CPU.
1321 */
1322
1323 cpu_policy = cpufreq_cpu_get(cpu);
1324 if (!cpu_policy)
1325 return -EINVAL;
1326
1327 /* only handle each CPU group once */
c9060494
DJ
1328 if (unlikely(cpu_policy->cpu != cpu))
1329 goto fail;
1da177e4
LT
1330
1331 if (cpufreq_driver->resume) {
1332 ret = cpufreq_driver->resume(cpu_policy);
1333 if (ret) {
1334 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1335 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1336 goto fail;
1da177e4
LT
1337 }
1338 }
1339
1340 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1341 unsigned int cur_freq = 0;
1342
1343 if (cpufreq_driver->get)
1344 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1345
1346 if (!cur_freq || !cpu_policy->cur) {
42d4dc3f
BH
1347 printk(KERN_ERR "cpufreq: resume failed to assert "
1348 "current frequency is what timing core "
1349 "thinks it is.\n");
1da177e4
LT
1350 goto out;
1351 }
1352
1353 if (unlikely(cur_freq != cpu_policy->cur)) {
1354 struct cpufreq_freqs freqs;
1355
ac09f698 1356 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
a4a9df58 1357 dprintk("Warning: CPU frequency "
ac09f698
BH
1358 "is %u, cpufreq assumed %u kHz.\n",
1359 cur_freq, cpu_policy->cur);
1da177e4
LT
1360
1361 freqs.cpu = cpu;
1362 freqs.old = cpu_policy->cur;
1363 freqs.new = cur_freq;
1364
b4dfdbb3 1365 srcu_notifier_call_chain(
e041c683 1366 &cpufreq_transition_notifier_list,
42d4dc3f 1367 CPUFREQ_RESUMECHANGE, &freqs);
1da177e4
LT
1368 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
1369
1370 cpu_policy->cur = cur_freq;
1371 }
1372 }
1373
1374out:
1375 schedule_work(&cpu_policy->update);
c9060494 1376fail:
1da177e4
LT
1377 cpufreq_cpu_put(cpu_policy);
1378 return ret;
1379}
1380
1381static struct sysdev_driver cpufreq_sysdev_driver = {
1382 .add = cpufreq_add_dev,
1383 .remove = cpufreq_remove_dev,
42d4dc3f 1384 .suspend = cpufreq_suspend,
1da177e4
LT
1385 .resume = cpufreq_resume,
1386};
1387
1388
1389/*********************************************************************
1390 * NOTIFIER LISTS INTERFACE *
1391 *********************************************************************/
1392
1393/**
1394 * cpufreq_register_notifier - register a driver with cpufreq
1395 * @nb: notifier function to register
1396 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1397 *
32ee8c3e 1398 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1399 * are notified about clock rate changes (once before and once after
1400 * the transition), or a list of drivers that are notified about
1401 * changes in cpufreq policy.
1402 *
1403 * This function may sleep, and has the same return conditions as
e041c683 1404 * blocking_notifier_chain_register.
1da177e4
LT
1405 */
1406int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1407{
1408 int ret;
1409
74212ca4
CEB
1410 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1411
1da177e4
LT
1412 switch (list) {
1413 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1414 ret = srcu_notifier_chain_register(
e041c683 1415 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1416 break;
1417 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1418 ret = blocking_notifier_chain_register(
1419 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1420 break;
1421 default:
1422 ret = -EINVAL;
1423 }
1da177e4
LT
1424
1425 return ret;
1426}
1427EXPORT_SYMBOL(cpufreq_register_notifier);
1428
1429
1430/**
1431 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1432 * @nb: notifier block to be unregistered
1433 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1434 *
1435 * Remove a driver from the CPU frequency notifier list.
1436 *
1437 * This function may sleep, and has the same return conditions as
e041c683 1438 * blocking_notifier_chain_unregister.
1da177e4
LT
1439 */
1440int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1441{
1442 int ret;
1443
1da177e4
LT
1444 switch (list) {
1445 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1446 ret = srcu_notifier_chain_unregister(
e041c683 1447 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1448 break;
1449 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1450 ret = blocking_notifier_chain_unregister(
1451 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1452 break;
1453 default:
1454 ret = -EINVAL;
1455 }
1da177e4
LT
1456
1457 return ret;
1458}
1459EXPORT_SYMBOL(cpufreq_unregister_notifier);
1460
1461
1462/*********************************************************************
1463 * GOVERNORS *
1464 *********************************************************************/
1465
1466
1467int __cpufreq_driver_target(struct cpufreq_policy *policy,
1468 unsigned int target_freq,
1469 unsigned int relation)
1470{
1471 int retval = -EINVAL;
c32b6b8e 1472
1da177e4
LT
1473 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1474 target_freq, relation);
1475 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1476 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1477
1da177e4
LT
1478 return retval;
1479}
1480EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1481
1da177e4
LT
1482int cpufreq_driver_target(struct cpufreq_policy *policy,
1483 unsigned int target_freq,
1484 unsigned int relation)
1485{
f1829e4a 1486 int ret = -EINVAL;
1da177e4
LT
1487
1488 policy = cpufreq_cpu_get(policy->cpu);
1489 if (!policy)
f1829e4a 1490 goto no_policy;
1da177e4 1491
5a01f2e8 1492 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1493 goto fail;
1da177e4
LT
1494
1495 ret = __cpufreq_driver_target(policy, target_freq, relation);
1496
5a01f2e8 1497 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1498
f1829e4a 1499fail:
1da177e4 1500 cpufreq_cpu_put(policy);
f1829e4a 1501no_policy:
1da177e4
LT
1502 return ret;
1503}
1504EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1505
bf0b90e3 1506int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1507{
1508 int ret = 0;
1509
1510 policy = cpufreq_cpu_get(policy->cpu);
1511 if (!policy)
1512 return -EINVAL;
1513
bf0b90e3 1514 if (cpu_online(cpu) && cpufreq_driver->getavg)
1515 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1516
dfde5d62
VP
1517 cpufreq_cpu_put(policy);
1518 return ret;
1519}
5a01f2e8 1520EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1521
153d7f3f 1522/*
153d7f3f
AV
1523 * when "event" is CPUFREQ_GOV_LIMITS
1524 */
1da177e4 1525
e08f5f5b
GS
1526static int __cpufreq_governor(struct cpufreq_policy *policy,
1527 unsigned int event)
1da177e4 1528{
cc993cab 1529 int ret;
6afde10c
TR
1530
1531 /* Only must be defined when default governor is known to have latency
1532 restrictions, like e.g. conservative or ondemand.
1533 That this is the case is already ensured in Kconfig
1534 */
1535#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1536 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1537#else
1538 struct cpufreq_governor *gov = NULL;
1539#endif
1c256245
TR
1540
1541 if (policy->governor->max_transition_latency &&
1542 policy->cpuinfo.transition_latency >
1543 policy->governor->max_transition_latency) {
6afde10c
TR
1544 if (!gov)
1545 return -EINVAL;
1546 else {
1547 printk(KERN_WARNING "%s governor failed, too long"
1548 " transition latency of HW, fallback"
1549 " to %s governor\n",
1550 policy->governor->name,
1551 gov->name);
1552 policy->governor = gov;
1553 }
1c256245 1554 }
1da177e4
LT
1555
1556 if (!try_module_get(policy->governor->owner))
1557 return -EINVAL;
1558
e08f5f5b
GS
1559 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1560 policy->cpu, event);
1da177e4
LT
1561 ret = policy->governor->governor(policy, event);
1562
e08f5f5b
GS
1563 /* we keep one module reference alive for
1564 each CPU governed by this CPU */
1da177e4
LT
1565 if ((event != CPUFREQ_GOV_START) || ret)
1566 module_put(policy->governor->owner);
1567 if ((event == CPUFREQ_GOV_STOP) && !ret)
1568 module_put(policy->governor->owner);
1569
1570 return ret;
1571}
1572
1573
1da177e4
LT
1574int cpufreq_register_governor(struct cpufreq_governor *governor)
1575{
3bcb09a3 1576 int err;
1da177e4
LT
1577
1578 if (!governor)
1579 return -EINVAL;
1580
3fc54d37 1581 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1582
3bcb09a3
JF
1583 err = -EBUSY;
1584 if (__find_governor(governor->name) == NULL) {
1585 err = 0;
1586 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1587 }
1da177e4 1588
32ee8c3e 1589 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1590 return err;
1da177e4
LT
1591}
1592EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1593
1594
1595void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1596{
1597 if (!governor)
1598 return;
1599
3fc54d37 1600 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1601 list_del(&governor->governor_list);
3fc54d37 1602 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1603 return;
1604}
1605EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1606
1607
1608
1609/*********************************************************************
1610 * POLICY INTERFACE *
1611 *********************************************************************/
1612
1613/**
1614 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1615 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1616 * is written
1da177e4
LT
1617 *
1618 * Reads the current cpufreq policy.
1619 */
1620int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1621{
1622 struct cpufreq_policy *cpu_policy;
1623 if (!policy)
1624 return -EINVAL;
1625
1626 cpu_policy = cpufreq_cpu_get(cpu);
1627 if (!cpu_policy)
1628 return -EINVAL;
1629
1da177e4 1630 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1631
1632 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1633 return 0;
1634}
1635EXPORT_SYMBOL(cpufreq_get_policy);
1636
1637
153d7f3f 1638/*
e08f5f5b
GS
1639 * data : current policy.
1640 * policy : policy to be set.
153d7f3f 1641 */
e08f5f5b
GS
1642static int __cpufreq_set_policy(struct cpufreq_policy *data,
1643 struct cpufreq_policy *policy)
1da177e4
LT
1644{
1645 int ret = 0;
1646
1647 cpufreq_debug_disable_ratelimit();
1648 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1649 policy->min, policy->max);
1650
e08f5f5b
GS
1651 memcpy(&policy->cpuinfo, &data->cpuinfo,
1652 sizeof(struct cpufreq_cpuinfo));
1da177e4 1653
53391fa2 1654 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1655 ret = -EINVAL;
1656 goto error_out;
1657 }
1658
1da177e4
LT
1659 /* verify the cpu speed can be set within this limit */
1660 ret = cpufreq_driver->verify(policy);
1661 if (ret)
1662 goto error_out;
1663
1da177e4 1664 /* adjust if necessary - all reasons */
e041c683
AS
1665 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1666 CPUFREQ_ADJUST, policy);
1da177e4
LT
1667
1668 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1669 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1670 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1671
1672 /* verify the cpu speed can be set within this limit,
1673 which might be different to the first one */
1674 ret = cpufreq_driver->verify(policy);
e041c683 1675 if (ret)
1da177e4 1676 goto error_out;
1da177e4
LT
1677
1678 /* notification of the new policy */
e041c683
AS
1679 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1680 CPUFREQ_NOTIFY, policy);
1da177e4 1681
7d5e350f
DJ
1682 data->min = policy->min;
1683 data->max = policy->max;
1da177e4 1684
e08f5f5b
GS
1685 dprintk("new min and max freqs are %u - %u kHz\n",
1686 data->min, data->max);
1da177e4
LT
1687
1688 if (cpufreq_driver->setpolicy) {
1689 data->policy = policy->policy;
1690 dprintk("setting range\n");
1691 ret = cpufreq_driver->setpolicy(policy);
1692 } else {
1693 if (policy->governor != data->governor) {
1694 /* save old, working values */
1695 struct cpufreq_governor *old_gov = data->governor;
1696
1697 dprintk("governor switch\n");
1698
1699 /* end old governor */
1700 if (data->governor)
1701 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1702
1703 /* start new governor */
1704 data->governor = policy->governor;
1705 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1706 /* new governor failed, so re-start old one */
e08f5f5b
GS
1707 dprintk("starting governor %s failed\n",
1708 data->governor->name);
1da177e4
LT
1709 if (old_gov) {
1710 data->governor = old_gov;
e08f5f5b
GS
1711 __cpufreq_governor(data,
1712 CPUFREQ_GOV_START);
1da177e4
LT
1713 }
1714 ret = -EINVAL;
1715 goto error_out;
1716 }
1717 /* might be a policy change, too, so fall through */
1718 }
1719 dprintk("governor: change or update limits\n");
1720 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1721 }
1722
7d5e350f 1723error_out:
1da177e4
LT
1724 cpufreq_debug_enable_ratelimit();
1725 return ret;
1726}
1727
1da177e4
LT
1728/**
1729 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1730 * @cpu: CPU which shall be re-evaluated
1731 *
1732 * Usefull for policy notifiers which have different necessities
1733 * at different times.
1734 */
1735int cpufreq_update_policy(unsigned int cpu)
1736{
1737 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1738 struct cpufreq_policy policy;
f1829e4a 1739 int ret;
1da177e4 1740
f1829e4a
JL
1741 if (!data) {
1742 ret = -ENODEV;
1743 goto no_policy;
1744 }
1da177e4 1745
f1829e4a
JL
1746 if (unlikely(lock_policy_rwsem_write(cpu))) {
1747 ret = -EINVAL;
1748 goto fail;
1749 }
1da177e4
LT
1750
1751 dprintk("updating policy for CPU %u\n", cpu);
7d5e350f 1752 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1753 policy.min = data->user_policy.min;
1754 policy.max = data->user_policy.max;
1755 policy.policy = data->user_policy.policy;
1756 policy.governor = data->user_policy.governor;
1757
0961dd0d
TR
1758 /* BIOS might change freq behind our back
1759 -> ask driver for current freq and notify governors about a change */
1760 if (cpufreq_driver->get) {
1761 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3
TR
1762 if (!data->cur) {
1763 dprintk("Driver did not initialize current freq");
1764 data->cur = policy.cur;
1765 } else {
1766 if (data->cur != policy.cur)
e08f5f5b
GS
1767 cpufreq_out_of_sync(cpu, data->cur,
1768 policy.cur);
a85f7bd3 1769 }
0961dd0d
TR
1770 }
1771
1da177e4
LT
1772 ret = __cpufreq_set_policy(data, &policy);
1773
5a01f2e8
VP
1774 unlock_policy_rwsem_write(cpu);
1775
f1829e4a 1776fail:
1da177e4 1777 cpufreq_cpu_put(data);
f1829e4a 1778no_policy:
1da177e4
LT
1779 return ret;
1780}
1781EXPORT_SYMBOL(cpufreq_update_policy);
1782
dd184a01 1783static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1784 unsigned long action, void *hcpu)
1785{
1786 unsigned int cpu = (unsigned long)hcpu;
c32b6b8e
AR
1787 struct sys_device *sys_dev;
1788
1789 sys_dev = get_cpu_sysdev(cpu);
c32b6b8e
AR
1790 if (sys_dev) {
1791 switch (action) {
1792 case CPU_ONLINE:
8bb78442 1793 case CPU_ONLINE_FROZEN:
c32b6b8e
AR
1794 cpufreq_add_dev(sys_dev);
1795 break;
1796 case CPU_DOWN_PREPARE:
8bb78442 1797 case CPU_DOWN_PREPARE_FROZEN:
5a01f2e8
VP
1798 if (unlikely(lock_policy_rwsem_write(cpu)))
1799 BUG();
1800
5a01f2e8 1801 __cpufreq_remove_dev(sys_dev);
c32b6b8e 1802 break;
5a01f2e8 1803 case CPU_DOWN_FAILED:
8bb78442 1804 case CPU_DOWN_FAILED_FROZEN:
5a01f2e8 1805 cpufreq_add_dev(sys_dev);
c32b6b8e
AR
1806 break;
1807 }
1808 }
1809 return NOTIFY_OK;
1810}
1811
f6ebef30 1812static struct notifier_block __refdata cpufreq_cpu_notifier =
c32b6b8e
AR
1813{
1814 .notifier_call = cpufreq_cpu_callback,
1815};
1da177e4
LT
1816
1817/*********************************************************************
1818 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1819 *********************************************************************/
1820
1821/**
1822 * cpufreq_register_driver - register a CPU Frequency driver
1823 * @driver_data: A struct cpufreq_driver containing the values#
1824 * submitted by the CPU Frequency driver.
1825 *
32ee8c3e 1826 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1827 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1828 * (and isn't unregistered in the meantime).
1da177e4
LT
1829 *
1830 */
221dee28 1831int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1832{
1833 unsigned long flags;
1834 int ret;
1835
1836 if (!driver_data || !driver_data->verify || !driver_data->init ||
1837 ((!driver_data->setpolicy) && (!driver_data->target)))
1838 return -EINVAL;
1839
1840 dprintk("trying to register driver %s\n", driver_data->name);
1841
1842 if (driver_data->setpolicy)
1843 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1844
1845 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1846 if (cpufreq_driver) {
1847 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1848 return -EBUSY;
1849 }
1850 cpufreq_driver = driver_data;
1851 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1852
7a6aedfa
MT
1853 ret = sysdev_driver_register(&cpu_sysdev_class,
1854 &cpufreq_sysdev_driver);
1da177e4
LT
1855
1856 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1857 int i;
1858 ret = -ENODEV;
1859
1860 /* check for at least one working CPU */
7a6aedfa
MT
1861 for (i = 0; i < nr_cpu_ids; i++)
1862 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1863 ret = 0;
7a6aedfa
MT
1864 break;
1865 }
1da177e4
LT
1866
1867 /* if all ->init() calls failed, unregister */
1868 if (ret) {
e08f5f5b
GS
1869 dprintk("no CPU initialized for driver %s\n",
1870 driver_data->name);
1871 sysdev_driver_unregister(&cpu_sysdev_class,
1872 &cpufreq_sysdev_driver);
1da177e4
LT
1873
1874 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1875 cpufreq_driver = NULL;
1876 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1877 }
1878 }
1879
1880 if (!ret) {
65edc68c 1881 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1882 dprintk("driver %s up and running\n", driver_data->name);
1883 cpufreq_debug_enable_ratelimit();
1884 }
1885
4d34a67d 1886 return ret;
1da177e4
LT
1887}
1888EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1889
1890
1891/**
1892 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1893 *
32ee8c3e 1894 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1895 * the right to do so, i.e. if you have succeeded in initialising before!
1896 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1897 * currently not initialised.
1898 */
221dee28 1899int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1900{
1901 unsigned long flags;
1902
1903 cpufreq_debug_disable_ratelimit();
1904
1905 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1906 cpufreq_debug_enable_ratelimit();
1907 return -EINVAL;
1908 }
1909
1910 dprintk("unregistering driver %s\n", driver->name);
1911
1912 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
65edc68c 1913 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1914
1915 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1916 cpufreq_driver = NULL;
1917 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1918
1919 return 0;
1920}
1921EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1922
1923static int __init cpufreq_core_init(void)
1924{
1925 int cpu;
1926
1927 for_each_possible_cpu(cpu) {
1928 per_cpu(policy_cpu, cpu) = -1;
1929 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1930 }
1931 return 0;
1932}
1933
1934core_initcall(cpufreq_core_init);