Merge branch 'irq-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
1da177e4
LT
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/notifier.h>
22#include <linux/cpufreq.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/device.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/completion.h>
3fc54d37 30#include <linux/mutex.h>
e00e56df 31#include <linux/syscore_ops.h>
1da177e4 32
6f4f2723
TR
33#include <trace/events/power.h>
34
e08f5f5b
GS
35#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
36 "cpufreq-core", msg)
1da177e4
LT
37
38/**
cd878479 39 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
40 * level driver of CPUFreq support, and its spinlock. This lock
41 * also protects the cpufreq_cpu_data array.
42 */
7d5e350f 43static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 44static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
45#ifdef CONFIG_HOTPLUG_CPU
46/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 48#endif
1da177e4
LT
49static DEFINE_SPINLOCK(cpufreq_driver_lock);
50
5a01f2e8
VP
51/*
52 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
53 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 *
55 * The rules for this semaphore:
56 * - Any routine that wants to read from the policy structure will
57 * do a down_read on this semaphore.
58 * - Any routine that will write to the policy structure and/or may take away
59 * the policy altogether (eg. CPU hotplug), will hold this lock in write
60 * mode before doing so.
61 *
62 * Additional rules:
63 * - All holders of the lock should check to make sure that the CPU they
64 * are concerned with are online after they get the lock.
65 * - Governor routines that can be called in cpufreq hotplug path should not
66 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
67 * - Lock should not be held across
68 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 69 */
f1625066 70static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
71static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
72
73#define lock_policy_rwsem(mode, cpu) \
226528c6 74static int lock_policy_rwsem_##mode \
5a01f2e8
VP
75(int cpu) \
76{ \
f1625066 77 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
78 BUG_ON(policy_cpu == -1); \
79 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
80 if (unlikely(!cpu_online(cpu))) { \
81 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
82 return -1; \
83 } \
84 \
85 return 0; \
86}
87
88lock_policy_rwsem(read, cpu);
5a01f2e8
VP
89
90lock_policy_rwsem(write, cpu);
5a01f2e8 91
226528c6 92static void unlock_policy_rwsem_read(int cpu)
5a01f2e8 93{
f1625066 94 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
95 BUG_ON(policy_cpu == -1);
96 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
97}
5a01f2e8 98
226528c6 99static void unlock_policy_rwsem_write(int cpu)
5a01f2e8 100{
f1625066 101 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
102 BUG_ON(policy_cpu == -1);
103 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
104}
5a01f2e8
VP
105
106
1da177e4 107/* internal prototypes */
29464f28
DJ
108static int __cpufreq_governor(struct cpufreq_policy *policy,
109 unsigned int event);
5a01f2e8 110static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 111static void handle_update(struct work_struct *work);
1da177e4
LT
112
113/**
32ee8c3e
DJ
114 * Two notifier lists: the "policy" list is involved in the
115 * validation process for a new CPU frequency policy; the
1da177e4
LT
116 * "transition" list for kernel code that needs to handle
117 * changes to devices when the CPU clock speed changes.
118 * The mutex locks both lists.
119 */
e041c683 120static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 121static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 122
74212ca4 123static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
124static int __init init_cpufreq_transition_notifier_list(void)
125{
126 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 127 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
128 return 0;
129}
b3438f82 130pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4
LT
131
132static LIST_HEAD(cpufreq_governor_list);
29464f28 133static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 134
7d5e350f 135struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4
LT
136{
137 struct cpufreq_policy *data;
138 unsigned long flags;
139
7a6aedfa 140 if (cpu >= nr_cpu_ids)
1da177e4
LT
141 goto err_out;
142
143 /* get the cpufreq driver */
144 spin_lock_irqsave(&cpufreq_driver_lock, flags);
145
146 if (!cpufreq_driver)
147 goto err_out_unlock;
148
149 if (!try_module_get(cpufreq_driver->owner))
150 goto err_out_unlock;
151
152
153 /* get the CPU */
7a6aedfa 154 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
155
156 if (!data)
157 goto err_out_put_module;
158
159 if (!kobject_get(&data->kobj))
160 goto err_out_put_module;
161
1da177e4 162 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
163 return data;
164
7d5e350f 165err_out_put_module:
1da177e4 166 module_put(cpufreq_driver->owner);
7d5e350f 167err_out_unlock:
1da177e4 168 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 169err_out:
1da177e4
LT
170 return NULL;
171}
172EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
173
7d5e350f 174
1da177e4
LT
175void cpufreq_cpu_put(struct cpufreq_policy *data)
176{
177 kobject_put(&data->kobj);
178 module_put(cpufreq_driver->owner);
179}
180EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
181
182
183/*********************************************************************
184 * UNIFIED DEBUG HELPERS *
185 *********************************************************************/
186#ifdef CONFIG_CPU_FREQ_DEBUG
187
188/* what part(s) of the CPUfreq subsystem are debugged? */
189static unsigned int debug;
190
191/* is the debug output ratelimit'ed using printk_ratelimit? User can
192 * set or modify this value.
193 */
194static unsigned int debug_ratelimit = 1;
195
196/* is the printk_ratelimit'ing enabled? It's enabled after a successful
197 * loading of a cpufreq driver, temporarily disabled when a new policy
198 * is set, and disabled upon cpufreq driver removal
199 */
200static unsigned int disable_ratelimit = 1;
201static DEFINE_SPINLOCK(disable_ratelimit_lock);
202
858119e1 203static void cpufreq_debug_enable_ratelimit(void)
1da177e4
LT
204{
205 unsigned long flags;
206
207 spin_lock_irqsave(&disable_ratelimit_lock, flags);
208 if (disable_ratelimit)
209 disable_ratelimit--;
210 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
211}
212
858119e1 213static void cpufreq_debug_disable_ratelimit(void)
1da177e4
LT
214{
215 unsigned long flags;
216
217 spin_lock_irqsave(&disable_ratelimit_lock, flags);
218 disable_ratelimit++;
219 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
220}
221
e08f5f5b 222void cpufreq_debug_printk(unsigned int type, const char *prefix,
905d77cd 223 const char *fmt, ...)
1da177e4
LT
224{
225 char s[256];
226 va_list args;
227 unsigned int len;
228 unsigned long flags;
32ee8c3e 229
1da177e4
LT
230 WARN_ON(!prefix);
231 if (type & debug) {
232 spin_lock_irqsave(&disable_ratelimit_lock, flags);
e08f5f5b
GS
233 if (!disable_ratelimit && debug_ratelimit
234 && !printk_ratelimit()) {
1da177e4
LT
235 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
236 return;
237 }
238 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
239
240 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
241
242 va_start(args, fmt);
243 len += vsnprintf(&s[len], (256 - len), fmt, args);
244 va_end(args);
245
246 printk(s);
247
248 WARN_ON(len < 5);
249 }
250}
251EXPORT_SYMBOL(cpufreq_debug_printk);
252
253
254module_param(debug, uint, 0644);
e08f5f5b
GS
255MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
256 " 2 to debug drivers, and 4 to debug governors.");
1da177e4
LT
257
258module_param(debug_ratelimit, uint, 0644);
e08f5f5b
GS
259MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
260 " set to 0 to disable ratelimiting.");
1da177e4
LT
261
262#else /* !CONFIG_CPU_FREQ_DEBUG */
263
264static inline void cpufreq_debug_enable_ratelimit(void) { return; }
265static inline void cpufreq_debug_disable_ratelimit(void) { return; }
266
267#endif /* CONFIG_CPU_FREQ_DEBUG */
268
269
270/*********************************************************************
271 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
272 *********************************************************************/
273
274/**
275 * adjust_jiffies - adjust the system "loops_per_jiffy"
276 *
277 * This function alters the system "loops_per_jiffy" for the clock
278 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 279 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
280 * per-CPU loops_per_jiffy value wherever possible.
281 */
282#ifndef CONFIG_SMP
283static unsigned long l_p_j_ref;
284static unsigned int l_p_j_ref_freq;
285
858119e1 286static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
287{
288 if (ci->flags & CPUFREQ_CONST_LOOPS)
289 return;
290
291 if (!l_p_j_ref_freq) {
292 l_p_j_ref = loops_per_jiffy;
293 l_p_j_ref_freq = ci->old;
a4a9df58 294 dprintk("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 295 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4
LT
296 }
297 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
298 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
42d4dc3f 299 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
300 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
301 ci->new);
a4a9df58 302 dprintk("scaling loops_per_jiffy to %lu "
e08f5f5b 303 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
304 }
305}
306#else
e08f5f5b
GS
307static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
308{
309 return;
310}
1da177e4
LT
311#endif
312
313
314/**
e4472cb3
DJ
315 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
316 * on frequency transition.
1da177e4 317 *
e4472cb3
DJ
318 * This function calls the transition notifiers and the "adjust_jiffies"
319 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 320 * external effects.
1da177e4
LT
321 */
322void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
323{
e4472cb3
DJ
324 struct cpufreq_policy *policy;
325
1da177e4
LT
326 BUG_ON(irqs_disabled());
327
328 freqs->flags = cpufreq_driver->flags;
e4472cb3
DJ
329 dprintk("notification %u of frequency transition to %u kHz\n",
330 state, freqs->new);
1da177e4 331
7a6aedfa 332 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
1da177e4 333 switch (state) {
e4472cb3 334
1da177e4 335 case CPUFREQ_PRECHANGE:
32ee8c3e 336 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
337 * which is not equal to what the cpufreq core thinks is
338 * "old frequency".
1da177e4
LT
339 */
340 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
341 if ((policy) && (policy->cpu == freqs->cpu) &&
342 (policy->cur) && (policy->cur != freqs->old)) {
b10eec22 343 dprintk("Warning: CPU frequency is"
e4472cb3
DJ
344 " %u, cpufreq assumed %u kHz.\n",
345 freqs->old, policy->cur);
346 freqs->old = policy->cur;
1da177e4
LT
347 }
348 }
b4dfdbb3 349 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 350 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
351 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
352 break;
e4472cb3 353
1da177e4
LT
354 case CPUFREQ_POSTCHANGE:
355 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
6f4f2723
TR
356 dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
357 (unsigned long)freqs->cpu);
358 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
25e41933 359 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 360 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 361 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
362 if (likely(policy) && likely(policy->cpu == freqs->cpu))
363 policy->cur = freqs->new;
1da177e4
LT
364 break;
365 }
1da177e4
LT
366}
367EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
368
369
370
371/*********************************************************************
372 * SYSFS INTERFACE *
373 *********************************************************************/
374
3bcb09a3
JF
375static struct cpufreq_governor *__find_governor(const char *str_governor)
376{
377 struct cpufreq_governor *t;
378
379 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 380 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
381 return t;
382
383 return NULL;
384}
385
1da177e4
LT
386/**
387 * cpufreq_parse_governor - parse a governor string
388 */
905d77cd 389static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
390 struct cpufreq_governor **governor)
391{
3bcb09a3
JF
392 int err = -EINVAL;
393
1da177e4 394 if (!cpufreq_driver)
3bcb09a3
JF
395 goto out;
396
1da177e4
LT
397 if (cpufreq_driver->setpolicy) {
398 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
399 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 400 err = 0;
e08f5f5b
GS
401 } else if (!strnicmp(str_governor, "powersave",
402 CPUFREQ_NAME_LEN)) {
1da177e4 403 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 404 err = 0;
1da177e4 405 }
3bcb09a3 406 } else if (cpufreq_driver->target) {
1da177e4 407 struct cpufreq_governor *t;
3bcb09a3 408
3fc54d37 409 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
410
411 t = __find_governor(str_governor);
412
ea714970 413 if (t == NULL) {
e08f5f5b
GS
414 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
415 str_governor);
ea714970
JF
416
417 if (name) {
418 int ret;
419
420 mutex_unlock(&cpufreq_governor_mutex);
326f6a5c 421 ret = request_module("%s", name);
ea714970
JF
422 mutex_lock(&cpufreq_governor_mutex);
423
424 if (ret == 0)
425 t = __find_governor(str_governor);
426 }
427
428 kfree(name);
429 }
430
3bcb09a3
JF
431 if (t != NULL) {
432 *governor = t;
433 err = 0;
1da177e4 434 }
3bcb09a3 435
3fc54d37 436 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 437 }
29464f28 438out:
3bcb09a3 439 return err;
1da177e4 440}
1da177e4
LT
441
442
1da177e4 443/**
e08f5f5b
GS
444 * cpufreq_per_cpu_attr_read() / show_##file_name() -
445 * print out cpufreq information
1da177e4
LT
446 *
447 * Write out information from cpufreq_driver->policy[cpu]; object must be
448 * "unsigned int".
449 */
450
32ee8c3e
DJ
451#define show_one(file_name, object) \
452static ssize_t show_##file_name \
905d77cd 453(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 454{ \
29464f28 455 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
456}
457
458show_one(cpuinfo_min_freq, cpuinfo.min_freq);
459show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 460show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
461show_one(scaling_min_freq, min);
462show_one(scaling_max_freq, max);
463show_one(scaling_cur_freq, cur);
464
e08f5f5b
GS
465static int __cpufreq_set_policy(struct cpufreq_policy *data,
466 struct cpufreq_policy *policy);
7970e08b 467
1da177e4
LT
468/**
469 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
470 */
471#define store_one(file_name, object) \
472static ssize_t store_##file_name \
905d77cd 473(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4
LT
474{ \
475 unsigned int ret = -EINVAL; \
476 struct cpufreq_policy new_policy; \
477 \
478 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
479 if (ret) \
480 return -EINVAL; \
481 \
29464f28 482 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
483 if (ret != 1) \
484 return -EINVAL; \
485 \
7970e08b
TR
486 ret = __cpufreq_set_policy(policy, &new_policy); \
487 policy->user_policy.object = policy->object; \
1da177e4
LT
488 \
489 return ret ? ret : count; \
490}
491
29464f28
DJ
492store_one(scaling_min_freq, min);
493store_one(scaling_max_freq, max);
1da177e4
LT
494
495/**
496 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
497 */
905d77cd
DJ
498static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
499 char *buf)
1da177e4 500{
5a01f2e8 501 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
502 if (!cur_freq)
503 return sprintf(buf, "<unknown>");
504 return sprintf(buf, "%u\n", cur_freq);
505}
506
507
508/**
509 * show_scaling_governor - show the current policy for the specified CPU
510 */
905d77cd 511static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 512{
29464f28 513 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
514 return sprintf(buf, "powersave\n");
515 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
516 return sprintf(buf, "performance\n");
517 else if (policy->governor)
29464f28
DJ
518 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
519 policy->governor->name);
1da177e4
LT
520 return -EINVAL;
521}
522
523
524/**
525 * store_scaling_governor - store policy for the specified CPU
526 */
905d77cd
DJ
527static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
528 const char *buf, size_t count)
1da177e4
LT
529{
530 unsigned int ret = -EINVAL;
531 char str_governor[16];
532 struct cpufreq_policy new_policy;
533
534 ret = cpufreq_get_policy(&new_policy, policy->cpu);
535 if (ret)
536 return ret;
537
29464f28 538 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
539 if (ret != 1)
540 return -EINVAL;
541
e08f5f5b
GS
542 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
543 &new_policy.governor))
1da177e4
LT
544 return -EINVAL;
545
7970e08b
TR
546 /* Do not use cpufreq_set_policy here or the user_policy.max
547 will be wrongly overridden */
7970e08b
TR
548 ret = __cpufreq_set_policy(policy, &new_policy);
549
550 policy->user_policy.policy = policy->policy;
551 policy->user_policy.governor = policy->governor;
7970e08b 552
e08f5f5b
GS
553 if (ret)
554 return ret;
555 else
556 return count;
1da177e4
LT
557}
558
559/**
560 * show_scaling_driver - show the cpufreq driver currently loaded
561 */
905d77cd 562static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4
LT
563{
564 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
565}
566
567/**
568 * show_scaling_available_governors - show the available CPUfreq governors
569 */
905d77cd
DJ
570static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
571 char *buf)
1da177e4
LT
572{
573 ssize_t i = 0;
574 struct cpufreq_governor *t;
575
576 if (!cpufreq_driver->target) {
577 i += sprintf(buf, "performance powersave");
578 goto out;
579 }
580
581 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
582 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
583 - (CPUFREQ_NAME_LEN + 2)))
1da177e4
LT
584 goto out;
585 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
586 }
7d5e350f 587out:
1da177e4
LT
588 i += sprintf(&buf[i], "\n");
589 return i;
590}
e8628dd0 591
835481d9 592static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
593{
594 ssize_t i = 0;
595 unsigned int cpu;
596
835481d9 597 for_each_cpu(cpu, mask) {
1da177e4
LT
598 if (i)
599 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
600 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
601 if (i >= (PAGE_SIZE - 5))
29464f28 602 break;
1da177e4
LT
603 }
604 i += sprintf(&buf[i], "\n");
605 return i;
606}
607
e8628dd0
DW
608/**
609 * show_related_cpus - show the CPUs affected by each transition even if
610 * hw coordination is in use
611 */
612static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
613{
835481d9 614 if (cpumask_empty(policy->related_cpus))
e8628dd0
DW
615 return show_cpus(policy->cpus, buf);
616 return show_cpus(policy->related_cpus, buf);
617}
618
619/**
620 * show_affected_cpus - show the CPUs affected by each transition
621 */
622static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
623{
624 return show_cpus(policy->cpus, buf);
625}
626
9e76988e 627static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 628 const char *buf, size_t count)
9e76988e
VP
629{
630 unsigned int freq = 0;
631 unsigned int ret;
632
879000f9 633 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
634 return -EINVAL;
635
636 ret = sscanf(buf, "%u", &freq);
637 if (ret != 1)
638 return -EINVAL;
639
640 policy->governor->store_setspeed(policy, freq);
641
642 return count;
643}
644
645static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
646{
879000f9 647 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
648 return sprintf(buf, "<unsupported>\n");
649
650 return policy->governor->show_setspeed(policy, buf);
651}
1da177e4 652
e2f74f35
TR
653/**
654 * show_scaling_driver - show the current cpufreq HW/BIOS limitation
655 */
656static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
657{
658 unsigned int limit;
659 int ret;
660 if (cpufreq_driver->bios_limit) {
661 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
662 if (!ret)
663 return sprintf(buf, "%u\n", limit);
664 }
665 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
666}
667
6dad2a29
BP
668cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
669cpufreq_freq_attr_ro(cpuinfo_min_freq);
670cpufreq_freq_attr_ro(cpuinfo_max_freq);
671cpufreq_freq_attr_ro(cpuinfo_transition_latency);
672cpufreq_freq_attr_ro(scaling_available_governors);
673cpufreq_freq_attr_ro(scaling_driver);
674cpufreq_freq_attr_ro(scaling_cur_freq);
675cpufreq_freq_attr_ro(bios_limit);
676cpufreq_freq_attr_ro(related_cpus);
677cpufreq_freq_attr_ro(affected_cpus);
678cpufreq_freq_attr_rw(scaling_min_freq);
679cpufreq_freq_attr_rw(scaling_max_freq);
680cpufreq_freq_attr_rw(scaling_governor);
681cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 682
905d77cd 683static struct attribute *default_attrs[] = {
1da177e4
LT
684 &cpuinfo_min_freq.attr,
685 &cpuinfo_max_freq.attr,
ed129784 686 &cpuinfo_transition_latency.attr,
1da177e4
LT
687 &scaling_min_freq.attr,
688 &scaling_max_freq.attr,
689 &affected_cpus.attr,
e8628dd0 690 &related_cpus.attr,
1da177e4
LT
691 &scaling_governor.attr,
692 &scaling_driver.attr,
693 &scaling_available_governors.attr,
9e76988e 694 &scaling_setspeed.attr,
1da177e4
LT
695 NULL
696};
697
8aa84ad8
TR
698struct kobject *cpufreq_global_kobject;
699EXPORT_SYMBOL(cpufreq_global_kobject);
700
29464f28
DJ
701#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
702#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 703
29464f28 704static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 705{
905d77cd
DJ
706 struct cpufreq_policy *policy = to_policy(kobj);
707 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 708 ssize_t ret = -EINVAL;
1da177e4
LT
709 policy = cpufreq_cpu_get(policy->cpu);
710 if (!policy)
0db4a8a9 711 goto no_policy;
5a01f2e8
VP
712
713 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 714 goto fail;
5a01f2e8 715
e08f5f5b
GS
716 if (fattr->show)
717 ret = fattr->show(policy, buf);
718 else
719 ret = -EIO;
720
5a01f2e8 721 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 722fail:
1da177e4 723 cpufreq_cpu_put(policy);
0db4a8a9 724no_policy:
1da177e4
LT
725 return ret;
726}
727
905d77cd
DJ
728static ssize_t store(struct kobject *kobj, struct attribute *attr,
729 const char *buf, size_t count)
1da177e4 730{
905d77cd
DJ
731 struct cpufreq_policy *policy = to_policy(kobj);
732 struct freq_attr *fattr = to_attr(attr);
a07530b4 733 ssize_t ret = -EINVAL;
1da177e4
LT
734 policy = cpufreq_cpu_get(policy->cpu);
735 if (!policy)
a07530b4 736 goto no_policy;
5a01f2e8
VP
737
738 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 739 goto fail;
5a01f2e8 740
e08f5f5b
GS
741 if (fattr->store)
742 ret = fattr->store(policy, buf, count);
743 else
744 ret = -EIO;
745
5a01f2e8 746 unlock_policy_rwsem_write(policy->cpu);
a07530b4 747fail:
1da177e4 748 cpufreq_cpu_put(policy);
a07530b4 749no_policy:
1da177e4
LT
750 return ret;
751}
752
905d77cd 753static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 754{
905d77cd 755 struct cpufreq_policy *policy = to_policy(kobj);
1da177e4
LT
756 dprintk("last reference is dropped\n");
757 complete(&policy->kobj_unregister);
758}
759
52cf25d0 760static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
761 .show = show,
762 .store = store,
763};
764
765static struct kobj_type ktype_cpufreq = {
766 .sysfs_ops = &sysfs_ops,
767 .default_attrs = default_attrs,
768 .release = cpufreq_sysfs_release,
769};
770
4bfa042c
TR
771/*
772 * Returns:
773 * Negative: Failure
774 * 0: Success
775 * Positive: When we have a managed CPU and the sysfs got symlinked
776 */
cf3289d0
AC
777static int cpufreq_add_dev_policy(unsigned int cpu,
778 struct cpufreq_policy *policy,
779 struct sys_device *sys_dev)
ecf7e461
DJ
780{
781 int ret = 0;
782#ifdef CONFIG_SMP
783 unsigned long flags;
784 unsigned int j;
ecf7e461 785#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
786 struct cpufreq_governor *gov;
787
788 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
789 if (gov) {
790 policy->governor = gov;
ecf7e461
DJ
791 dprintk("Restoring governor %s for cpu %d\n",
792 policy->governor->name, cpu);
793 }
794#endif
795
796 for_each_cpu(j, policy->cpus) {
797 struct cpufreq_policy *managed_policy;
798
799 if (cpu == j)
800 continue;
801
802 /* Check for existing affected CPUs.
803 * They may not be aware of it due to CPU Hotplug.
804 * cpufreq_cpu_put is called when the device is removed
805 * in __cpufreq_remove_dev()
806 */
807 managed_policy = cpufreq_cpu_get(j);
808 if (unlikely(managed_policy)) {
809
810 /* Set proper policy_cpu */
811 unlock_policy_rwsem_write(cpu);
f1625066 812 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
ecf7e461
DJ
813
814 if (lock_policy_rwsem_write(cpu) < 0) {
815 /* Should not go through policy unlock path */
816 if (cpufreq_driver->exit)
817 cpufreq_driver->exit(policy);
818 cpufreq_cpu_put(managed_policy);
819 return -EBUSY;
820 }
821
822 spin_lock_irqsave(&cpufreq_driver_lock, flags);
823 cpumask_copy(managed_policy->cpus, policy->cpus);
824 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
825 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
826
827 dprintk("CPU already managed, adding link\n");
828 ret = sysfs_create_link(&sys_dev->kobj,
829 &managed_policy->kobj,
830 "cpufreq");
831 if (ret)
832 cpufreq_cpu_put(managed_policy);
833 /*
834 * Success. We only needed to be added to the mask.
835 * Call driver->exit() because only the cpu parent of
836 * the kobj needed to call init().
837 */
838 if (cpufreq_driver->exit)
839 cpufreq_driver->exit(policy);
4bfa042c
TR
840
841 if (!ret)
842 return 1;
843 else
844 return ret;
ecf7e461
DJ
845 }
846 }
847#endif
848 return ret;
849}
850
851
19d6f7ec 852/* symlink affected CPUs */
cf3289d0
AC
853static int cpufreq_add_dev_symlink(unsigned int cpu,
854 struct cpufreq_policy *policy)
19d6f7ec
DJ
855{
856 unsigned int j;
857 int ret = 0;
858
859 for_each_cpu(j, policy->cpus) {
860 struct cpufreq_policy *managed_policy;
861 struct sys_device *cpu_sys_dev;
862
863 if (j == cpu)
864 continue;
865 if (!cpu_online(j))
866 continue;
867
868 dprintk("CPU %u already managed, adding link\n", j);
869 managed_policy = cpufreq_cpu_get(cpu);
870 cpu_sys_dev = get_cpu_sysdev(j);
871 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
872 "cpufreq");
873 if (ret) {
874 cpufreq_cpu_put(managed_policy);
875 return ret;
876 }
877 }
878 return ret;
879}
880
cf3289d0
AC
881static int cpufreq_add_dev_interface(unsigned int cpu,
882 struct cpufreq_policy *policy,
883 struct sys_device *sys_dev)
909a694e 884{
ecf7e461 885 struct cpufreq_policy new_policy;
909a694e
DJ
886 struct freq_attr **drv_attr;
887 unsigned long flags;
888 int ret = 0;
889 unsigned int j;
890
891 /* prepare interface data */
892 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
893 &sys_dev->kobj, "cpufreq");
894 if (ret)
895 return ret;
896
897 /* set up files for this cpu device */
898 drv_attr = cpufreq_driver->attr;
899 while ((drv_attr) && (*drv_attr)) {
900 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
901 if (ret)
902 goto err_out_kobj_put;
903 drv_attr++;
904 }
905 if (cpufreq_driver->get) {
906 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
907 if (ret)
908 goto err_out_kobj_put;
909 }
910 if (cpufreq_driver->target) {
911 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
912 if (ret)
913 goto err_out_kobj_put;
914 }
e2f74f35
TR
915 if (cpufreq_driver->bios_limit) {
916 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
917 if (ret)
918 goto err_out_kobj_put;
919 }
909a694e
DJ
920
921 spin_lock_irqsave(&cpufreq_driver_lock, flags);
922 for_each_cpu(j, policy->cpus) {
bec037aa
JL
923 if (!cpu_online(j))
924 continue;
909a694e 925 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 926 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e
DJ
927 }
928 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
929
930 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
931 if (ret)
932 goto err_out_kobj_put;
933
934 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
935 /* assure that the starting sequence is run in __cpufreq_set_policy */
936 policy->governor = NULL;
937
938 /* set default policy */
939 ret = __cpufreq_set_policy(policy, &new_policy);
940 policy->user_policy.policy = policy->policy;
941 policy->user_policy.governor = policy->governor;
942
943 if (ret) {
944 dprintk("setting policy failed\n");
945 if (cpufreq_driver->exit)
946 cpufreq_driver->exit(policy);
947 }
909a694e
DJ
948 return ret;
949
950err_out_kobj_put:
951 kobject_put(&policy->kobj);
952 wait_for_completion(&policy->kobj_unregister);
953 return ret;
954}
955
1da177e4
LT
956
957/**
958 * cpufreq_add_dev - add a CPU device
959 *
32ee8c3e 960 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
961 *
962 * The Oracle says: try running cpufreq registration/unregistration concurrently
963 * with with cpu hotplugging and all hell will break loose. Tried to clean this
964 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 965 */
905d77cd 966static int cpufreq_add_dev(struct sys_device *sys_dev)
1da177e4
LT
967{
968 unsigned int cpu = sys_dev->id;
90e41bac 969 int ret = 0, found = 0;
1da177e4 970 struct cpufreq_policy *policy;
1da177e4
LT
971 unsigned long flags;
972 unsigned int j;
90e41bac
PB
973#ifdef CONFIG_HOTPLUG_CPU
974 int sibling;
975#endif
1da177e4 976
c32b6b8e
AR
977 if (cpu_is_offline(cpu))
978 return 0;
979
1da177e4
LT
980 cpufreq_debug_disable_ratelimit();
981 dprintk("adding CPU %u\n", cpu);
982
983#ifdef CONFIG_SMP
984 /* check whether a different CPU already registered this
985 * CPU because it is in the same boat. */
986 policy = cpufreq_cpu_get(cpu);
987 if (unlikely(policy)) {
8ff69732 988 cpufreq_cpu_put(policy);
1da177e4
LT
989 cpufreq_debug_enable_ratelimit();
990 return 0;
991 }
992#endif
993
994 if (!try_module_get(cpufreq_driver->owner)) {
995 ret = -EINVAL;
996 goto module_out;
997 }
998
059019a3 999 ret = -ENOMEM;
e98df50c 1000 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 1001 if (!policy)
1da177e4 1002 goto nomem_out;
059019a3
DJ
1003
1004 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 1005 goto err_free_policy;
059019a3
DJ
1006
1007 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 1008 goto err_free_cpumask;
1da177e4
LT
1009
1010 policy->cpu = cpu;
835481d9 1011 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1012
5a01f2e8 1013 /* Initially set CPU itself as the policy_cpu */
f1625066 1014 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
3f4a782b
MD
1015 ret = (lock_policy_rwsem_write(cpu) < 0);
1016 WARN_ON(ret);
5a01f2e8 1017
1da177e4 1018 init_completion(&policy->kobj_unregister);
65f27f38 1019 INIT_WORK(&policy->update, handle_update);
1da177e4 1020
8122c6ce 1021 /* Set governor before ->init, so that driver could check it */
90e41bac
PB
1022#ifdef CONFIG_HOTPLUG_CPU
1023 for_each_online_cpu(sibling) {
1024 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1025 if (cp && cp->governor &&
1026 (cpumask_test_cpu(cpu, cp->related_cpus))) {
1027 policy->governor = cp->governor;
1028 found = 1;
1029 break;
1030 }
1031 }
1032#endif
1033 if (!found)
1034 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1da177e4
LT
1035 /* call driver. From then on the cpufreq must be able
1036 * to accept all calls to ->verify and ->setpolicy for this CPU
1037 */
1038 ret = cpufreq_driver->init(policy);
1039 if (ret) {
1040 dprintk("initialization failed\n");
3f4a782b 1041 goto err_unlock_policy;
1da177e4 1042 }
187d9f4e
MC
1043 policy->user_policy.min = policy->min;
1044 policy->user_policy.max = policy->max;
1da177e4 1045
a1531acd
TR
1046 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1047 CPUFREQ_START, policy);
1048
ecf7e461 1049 ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
4bfa042c
TR
1050 if (ret) {
1051 if (ret > 0)
1052 /* This is a managed cpu, symlink created,
1053 exit with 0 */
1054 ret = 0;
ecf7e461 1055 goto err_unlock_policy;
4bfa042c 1056 }
1da177e4 1057
909a694e 1058 ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
19d6f7ec
DJ
1059 if (ret)
1060 goto err_out_unregister;
8ff69732 1061
dca02613
LW
1062 unlock_policy_rwsem_write(cpu);
1063
038c5b3e 1064 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 1065 module_put(cpufreq_driver->owner);
1da177e4
LT
1066 dprintk("initialization complete\n");
1067 cpufreq_debug_enable_ratelimit();
87c32271 1068
1da177e4
LT
1069 return 0;
1070
1071
1072err_out_unregister:
1073 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1074 for_each_cpu(j, policy->cpus)
7a6aedfa 1075 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
1076 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1077
c10997f6 1078 kobject_put(&policy->kobj);
1da177e4
LT
1079 wait_for_completion(&policy->kobj_unregister);
1080
3f4a782b 1081err_unlock_policy:
45709118 1082 unlock_policy_rwsem_write(cpu);
cad70a6a 1083 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
1084err_free_cpumask:
1085 free_cpumask_var(policy->cpus);
1086err_free_policy:
1da177e4 1087 kfree(policy);
1da177e4
LT
1088nomem_out:
1089 module_put(cpufreq_driver->owner);
c32b6b8e 1090module_out:
1da177e4
LT
1091 cpufreq_debug_enable_ratelimit();
1092 return ret;
1093}
1094
1095
1096/**
5a01f2e8 1097 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1098 *
1099 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1100 * Caller should already have policy_rwsem in write mode for this CPU.
1101 * This routine frees the rwsem before returning.
1da177e4 1102 */
905d77cd 1103static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1da177e4
LT
1104{
1105 unsigned int cpu = sys_dev->id;
1106 unsigned long flags;
1107 struct cpufreq_policy *data;
499bca9b
AW
1108 struct kobject *kobj;
1109 struct completion *cmp;
1da177e4 1110#ifdef CONFIG_SMP
e738cf6d 1111 struct sys_device *cpu_sys_dev;
1da177e4
LT
1112 unsigned int j;
1113#endif
1114
1115 cpufreq_debug_disable_ratelimit();
1116 dprintk("unregistering CPU %u\n", cpu);
1117
1118 spin_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 1119 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
1120
1121 if (!data) {
1122 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1123 cpufreq_debug_enable_ratelimit();
5a01f2e8 1124 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1125 return -EINVAL;
1126 }
7a6aedfa 1127 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1128
1129
1130#ifdef CONFIG_SMP
1131 /* if this isn't the CPU which is the parent of the kobj, we
32ee8c3e 1132 * only need to unlink, put and exit
1da177e4
LT
1133 */
1134 if (unlikely(cpu != data->cpu)) {
1135 dprintk("removing link\n");
835481d9 1136 cpumask_clear_cpu(cpu, data->cpus);
1da177e4 1137 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
499bca9b 1138 kobj = &sys_dev->kobj;
1da177e4
LT
1139 cpufreq_cpu_put(data);
1140 cpufreq_debug_enable_ratelimit();
5a01f2e8 1141 unlock_policy_rwsem_write(cpu);
499bca9b 1142 sysfs_remove_link(kobj, "cpufreq");
1da177e4
LT
1143 return 0;
1144 }
1145#endif
1146
1da177e4 1147#ifdef CONFIG_SMP
084f3493
TR
1148
1149#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1150 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1151 CPUFREQ_NAME_LEN);
084f3493
TR
1152#endif
1153
1da177e4
LT
1154 /* if we have other CPUs still registered, we need to unlink them,
1155 * or else wait_for_completion below will lock up. Clean the
7a6aedfa
MT
1156 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1157 * the sysfs links afterwards.
1da177e4 1158 */
835481d9
RR
1159 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1160 for_each_cpu(j, data->cpus) {
1da177e4
LT
1161 if (j == cpu)
1162 continue;
7a6aedfa 1163 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
1164 }
1165 }
1166
1167 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1168
835481d9
RR
1169 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1170 for_each_cpu(j, data->cpus) {
1da177e4
LT
1171 if (j == cpu)
1172 continue;
1173 dprintk("removing link for cpu %u\n", j);
084f3493 1174#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1175 strncpy(per_cpu(cpufreq_cpu_governor, j),
1176 data->governor->name, CPUFREQ_NAME_LEN);
084f3493 1177#endif
d434fca7 1178 cpu_sys_dev = get_cpu_sysdev(j);
499bca9b
AW
1179 kobj = &cpu_sys_dev->kobj;
1180 unlock_policy_rwsem_write(cpu);
1181 sysfs_remove_link(kobj, "cpufreq");
1182 lock_policy_rwsem_write(cpu);
1da177e4
LT
1183 cpufreq_cpu_put(data);
1184 }
1185 }
1186#else
1187 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1188#endif
1189
1da177e4
LT
1190 if (cpufreq_driver->target)
1191 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 1192
499bca9b
AW
1193 kobj = &data->kobj;
1194 cmp = &data->kobj_unregister;
1195 unlock_policy_rwsem_write(cpu);
1196 kobject_put(kobj);
1da177e4
LT
1197
1198 /* we need to make sure that the underlying kobj is actually
32ee8c3e 1199 * not referenced anymore by anybody before we proceed with
1da177e4
LT
1200 * unloading.
1201 */
1202 dprintk("waiting for dropping of refcount\n");
499bca9b 1203 wait_for_completion(cmp);
1da177e4
LT
1204 dprintk("wait complete\n");
1205
499bca9b 1206 lock_policy_rwsem_write(cpu);
1da177e4
LT
1207 if (cpufreq_driver->exit)
1208 cpufreq_driver->exit(data);
7d26e2d5 1209 unlock_policy_rwsem_write(cpu);
1210
835481d9
RR
1211 free_cpumask_var(data->related_cpus);
1212 free_cpumask_var(data->cpus);
1da177e4 1213 kfree(data);
835481d9 1214 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1215
1216 cpufreq_debug_enable_ratelimit();
1da177e4
LT
1217 return 0;
1218}
1219
1220
905d77cd 1221static int cpufreq_remove_dev(struct sys_device *sys_dev)
5a01f2e8
VP
1222{
1223 unsigned int cpu = sys_dev->id;
1224 int retval;
ec28297a
VP
1225
1226 if (cpu_is_offline(cpu))
1227 return 0;
1228
5a01f2e8
VP
1229 if (unlikely(lock_policy_rwsem_write(cpu)))
1230 BUG();
1231
1232 retval = __cpufreq_remove_dev(sys_dev);
1233 return retval;
1234}
1235
1236
65f27f38 1237static void handle_update(struct work_struct *work)
1da177e4 1238{
65f27f38
DH
1239 struct cpufreq_policy *policy =
1240 container_of(work, struct cpufreq_policy, update);
1241 unsigned int cpu = policy->cpu;
1da177e4
LT
1242 dprintk("handle_update for cpu %u called\n", cpu);
1243 cpufreq_update_policy(cpu);
1244}
1245
1246/**
1247 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1248 * @cpu: cpu number
1249 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1250 * @new_freq: CPU frequency the CPU actually runs at
1251 *
29464f28
DJ
1252 * We adjust to current frequency first, and need to clean up later.
1253 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1254 */
e08f5f5b
GS
1255static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1256 unsigned int new_freq)
1da177e4
LT
1257{
1258 struct cpufreq_freqs freqs;
1259
b10eec22 1260 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1261 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1262
1263 freqs.cpu = cpu;
1264 freqs.old = old_freq;
1265 freqs.new = new_freq;
1266 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1267 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1268}
1269
1270
32ee8c3e 1271/**
4ab70df4 1272 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1273 * @cpu: CPU number
1274 *
1275 * This is the last known freq, without actually getting it from the driver.
1276 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1277 */
1278unsigned int cpufreq_quick_get(unsigned int cpu)
1279{
1280 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
e08f5f5b 1281 unsigned int ret_freq = 0;
95235ca2
VP
1282
1283 if (policy) {
e08f5f5b 1284 ret_freq = policy->cur;
95235ca2
VP
1285 cpufreq_cpu_put(policy);
1286 }
1287
4d34a67d 1288 return ret_freq;
95235ca2
VP
1289}
1290EXPORT_SYMBOL(cpufreq_quick_get);
1291
1292
5a01f2e8 1293static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1294{
7a6aedfa 1295 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1296 unsigned int ret_freq = 0;
1da177e4 1297
1da177e4 1298 if (!cpufreq_driver->get)
4d34a67d 1299 return ret_freq;
1da177e4 1300
e08f5f5b 1301 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1302
e08f5f5b
GS
1303 if (ret_freq && policy->cur &&
1304 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1305 /* verify no discrepancy between actual and
1306 saved value exists */
1307 if (unlikely(ret_freq != policy->cur)) {
1308 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1309 schedule_work(&policy->update);
1310 }
1311 }
1312
4d34a67d 1313 return ret_freq;
5a01f2e8 1314}
1da177e4 1315
5a01f2e8
VP
1316/**
1317 * cpufreq_get - get the current CPU frequency (in kHz)
1318 * @cpu: CPU number
1319 *
1320 * Get the CPU current (static) CPU frequency
1321 */
1322unsigned int cpufreq_get(unsigned int cpu)
1323{
1324 unsigned int ret_freq = 0;
1325 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1326
1327 if (!policy)
1328 goto out;
1329
1330 if (unlikely(lock_policy_rwsem_read(cpu)))
1331 goto out_policy;
1332
1333 ret_freq = __cpufreq_get(cpu);
1334
1335 unlock_policy_rwsem_read(cpu);
1da177e4 1336
5a01f2e8
VP
1337out_policy:
1338 cpufreq_cpu_put(policy);
1339out:
4d34a67d 1340 return ret_freq;
1da177e4
LT
1341}
1342EXPORT_SYMBOL(cpufreq_get);
1343
e00e56df
RW
1344static struct sysdev_driver cpufreq_sysdev_driver = {
1345 .add = cpufreq_add_dev,
1346 .remove = cpufreq_remove_dev,
1347};
1348
1da177e4 1349
42d4dc3f 1350/**
e00e56df
RW
1351 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1352 *
1353 * This function is only executed for the boot processor. The other CPUs
1354 * have been put offline by means of CPU hotplug.
42d4dc3f 1355 */
e00e56df 1356static int cpufreq_bp_suspend(void)
42d4dc3f 1357{
e08f5f5b 1358 int ret = 0;
4bc5d341 1359
e00e56df 1360 int cpu = smp_processor_id();
42d4dc3f
BH
1361 struct cpufreq_policy *cpu_policy;
1362
0e37b159 1363 dprintk("suspending cpu %u\n", cpu);
42d4dc3f 1364
e00e56df 1365 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1366 cpu_policy = cpufreq_cpu_get(cpu);
1367 if (!cpu_policy)
e00e56df 1368 return 0;
42d4dc3f
BH
1369
1370 if (cpufreq_driver->suspend) {
7ca64e2d 1371 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1372 if (ret)
42d4dc3f
BH
1373 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1374 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1375 }
1376
42d4dc3f 1377 cpufreq_cpu_put(cpu_policy);
c9060494 1378 return ret;
42d4dc3f
BH
1379}
1380
1da177e4 1381/**
e00e56df 1382 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1383 *
1384 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1385 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1386 * restored. It will verify that the current freq is in sync with
1387 * what we believe it to be. This is a bit later than when it
1388 * should be, but nonethteless it's better than calling
1389 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1390 *
1391 * This function is only executed for the boot CPU. The other CPUs have not
1392 * been turned on yet.
1da177e4 1393 */
e00e56df 1394static void cpufreq_bp_resume(void)
1da177e4 1395{
e08f5f5b 1396 int ret = 0;
4bc5d341 1397
e00e56df 1398 int cpu = smp_processor_id();
1da177e4
LT
1399 struct cpufreq_policy *cpu_policy;
1400
1401 dprintk("resuming cpu %u\n", cpu);
1402
e00e56df 1403 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1404 cpu_policy = cpufreq_cpu_get(cpu);
1405 if (!cpu_policy)
e00e56df 1406 return;
1da177e4
LT
1407
1408 if (cpufreq_driver->resume) {
1409 ret = cpufreq_driver->resume(cpu_policy);
1410 if (ret) {
1411 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1412 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1413 goto fail;
1da177e4
LT
1414 }
1415 }
1416
1da177e4 1417 schedule_work(&cpu_policy->update);
ce6c3997 1418
c9060494 1419fail:
1da177e4 1420 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1421}
1422
e00e56df
RW
1423static struct syscore_ops cpufreq_syscore_ops = {
1424 .suspend = cpufreq_bp_suspend,
1425 .resume = cpufreq_bp_resume,
1da177e4
LT
1426};
1427
1428
1429/*********************************************************************
1430 * NOTIFIER LISTS INTERFACE *
1431 *********************************************************************/
1432
1433/**
1434 * cpufreq_register_notifier - register a driver with cpufreq
1435 * @nb: notifier function to register
1436 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1437 *
32ee8c3e 1438 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1439 * are notified about clock rate changes (once before and once after
1440 * the transition), or a list of drivers that are notified about
1441 * changes in cpufreq policy.
1442 *
1443 * This function may sleep, and has the same return conditions as
e041c683 1444 * blocking_notifier_chain_register.
1da177e4
LT
1445 */
1446int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1447{
1448 int ret;
1449
74212ca4
CEB
1450 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1451
1da177e4
LT
1452 switch (list) {
1453 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1454 ret = srcu_notifier_chain_register(
e041c683 1455 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1456 break;
1457 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1458 ret = blocking_notifier_chain_register(
1459 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1460 break;
1461 default:
1462 ret = -EINVAL;
1463 }
1da177e4
LT
1464
1465 return ret;
1466}
1467EXPORT_SYMBOL(cpufreq_register_notifier);
1468
1469
1470/**
1471 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1472 * @nb: notifier block to be unregistered
1473 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1474 *
1475 * Remove a driver from the CPU frequency notifier list.
1476 *
1477 * This function may sleep, and has the same return conditions as
e041c683 1478 * blocking_notifier_chain_unregister.
1da177e4
LT
1479 */
1480int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1481{
1482 int ret;
1483
1da177e4
LT
1484 switch (list) {
1485 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1486 ret = srcu_notifier_chain_unregister(
e041c683 1487 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1488 break;
1489 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1490 ret = blocking_notifier_chain_unregister(
1491 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1492 break;
1493 default:
1494 ret = -EINVAL;
1495 }
1da177e4
LT
1496
1497 return ret;
1498}
1499EXPORT_SYMBOL(cpufreq_unregister_notifier);
1500
1501
1502/*********************************************************************
1503 * GOVERNORS *
1504 *********************************************************************/
1505
1506
1507int __cpufreq_driver_target(struct cpufreq_policy *policy,
1508 unsigned int target_freq,
1509 unsigned int relation)
1510{
1511 int retval = -EINVAL;
c32b6b8e 1512
1da177e4
LT
1513 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1514 target_freq, relation);
1515 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1516 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1517
1da177e4
LT
1518 return retval;
1519}
1520EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1521
1da177e4
LT
1522int cpufreq_driver_target(struct cpufreq_policy *policy,
1523 unsigned int target_freq,
1524 unsigned int relation)
1525{
f1829e4a 1526 int ret = -EINVAL;
1da177e4
LT
1527
1528 policy = cpufreq_cpu_get(policy->cpu);
1529 if (!policy)
f1829e4a 1530 goto no_policy;
1da177e4 1531
5a01f2e8 1532 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1533 goto fail;
1da177e4
LT
1534
1535 ret = __cpufreq_driver_target(policy, target_freq, relation);
1536
5a01f2e8 1537 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1538
f1829e4a 1539fail:
1da177e4 1540 cpufreq_cpu_put(policy);
f1829e4a 1541no_policy:
1da177e4
LT
1542 return ret;
1543}
1544EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1545
bf0b90e3 1546int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1547{
1548 int ret = 0;
1549
1550 policy = cpufreq_cpu_get(policy->cpu);
1551 if (!policy)
1552 return -EINVAL;
1553
bf0b90e3 1554 if (cpu_online(cpu) && cpufreq_driver->getavg)
1555 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1556
dfde5d62
VP
1557 cpufreq_cpu_put(policy);
1558 return ret;
1559}
5a01f2e8 1560EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1561
153d7f3f 1562/*
153d7f3f
AV
1563 * when "event" is CPUFREQ_GOV_LIMITS
1564 */
1da177e4 1565
e08f5f5b
GS
1566static int __cpufreq_governor(struct cpufreq_policy *policy,
1567 unsigned int event)
1da177e4 1568{
cc993cab 1569 int ret;
6afde10c
TR
1570
1571 /* Only must be defined when default governor is known to have latency
1572 restrictions, like e.g. conservative or ondemand.
1573 That this is the case is already ensured in Kconfig
1574 */
1575#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1576 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1577#else
1578 struct cpufreq_governor *gov = NULL;
1579#endif
1c256245
TR
1580
1581 if (policy->governor->max_transition_latency &&
1582 policy->cpuinfo.transition_latency >
1583 policy->governor->max_transition_latency) {
6afde10c
TR
1584 if (!gov)
1585 return -EINVAL;
1586 else {
1587 printk(KERN_WARNING "%s governor failed, too long"
1588 " transition latency of HW, fallback"
1589 " to %s governor\n",
1590 policy->governor->name,
1591 gov->name);
1592 policy->governor = gov;
1593 }
1c256245 1594 }
1da177e4
LT
1595
1596 if (!try_module_get(policy->governor->owner))
1597 return -EINVAL;
1598
e08f5f5b
GS
1599 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1600 policy->cpu, event);
1da177e4
LT
1601 ret = policy->governor->governor(policy, event);
1602
e08f5f5b
GS
1603 /* we keep one module reference alive for
1604 each CPU governed by this CPU */
1da177e4
LT
1605 if ((event != CPUFREQ_GOV_START) || ret)
1606 module_put(policy->governor->owner);
1607 if ((event == CPUFREQ_GOV_STOP) && !ret)
1608 module_put(policy->governor->owner);
1609
1610 return ret;
1611}
1612
1613
1da177e4
LT
1614int cpufreq_register_governor(struct cpufreq_governor *governor)
1615{
3bcb09a3 1616 int err;
1da177e4
LT
1617
1618 if (!governor)
1619 return -EINVAL;
1620
3fc54d37 1621 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1622
3bcb09a3
JF
1623 err = -EBUSY;
1624 if (__find_governor(governor->name) == NULL) {
1625 err = 0;
1626 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1627 }
1da177e4 1628
32ee8c3e 1629 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1630 return err;
1da177e4
LT
1631}
1632EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1633
1634
1635void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1636{
90e41bac
PB
1637#ifdef CONFIG_HOTPLUG_CPU
1638 int cpu;
1639#endif
1640
1da177e4
LT
1641 if (!governor)
1642 return;
1643
90e41bac
PB
1644#ifdef CONFIG_HOTPLUG_CPU
1645 for_each_present_cpu(cpu) {
1646 if (cpu_online(cpu))
1647 continue;
1648 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1649 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1650 }
1651#endif
1652
3fc54d37 1653 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1654 list_del(&governor->governor_list);
3fc54d37 1655 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1656 return;
1657}
1658EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1659
1660
1661
1662/*********************************************************************
1663 * POLICY INTERFACE *
1664 *********************************************************************/
1665
1666/**
1667 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1668 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1669 * is written
1da177e4
LT
1670 *
1671 * Reads the current cpufreq policy.
1672 */
1673int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1674{
1675 struct cpufreq_policy *cpu_policy;
1676 if (!policy)
1677 return -EINVAL;
1678
1679 cpu_policy = cpufreq_cpu_get(cpu);
1680 if (!cpu_policy)
1681 return -EINVAL;
1682
1da177e4 1683 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1684
1685 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1686 return 0;
1687}
1688EXPORT_SYMBOL(cpufreq_get_policy);
1689
1690
153d7f3f 1691/*
e08f5f5b
GS
1692 * data : current policy.
1693 * policy : policy to be set.
153d7f3f 1694 */
e08f5f5b
GS
1695static int __cpufreq_set_policy(struct cpufreq_policy *data,
1696 struct cpufreq_policy *policy)
1da177e4
LT
1697{
1698 int ret = 0;
1699
1700 cpufreq_debug_disable_ratelimit();
1701 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1702 policy->min, policy->max);
1703
e08f5f5b
GS
1704 memcpy(&policy->cpuinfo, &data->cpuinfo,
1705 sizeof(struct cpufreq_cpuinfo));
1da177e4 1706
53391fa2 1707 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1708 ret = -EINVAL;
1709 goto error_out;
1710 }
1711
1da177e4
LT
1712 /* verify the cpu speed can be set within this limit */
1713 ret = cpufreq_driver->verify(policy);
1714 if (ret)
1715 goto error_out;
1716
1da177e4 1717 /* adjust if necessary - all reasons */
e041c683
AS
1718 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1719 CPUFREQ_ADJUST, policy);
1da177e4
LT
1720
1721 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1722 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1723 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1724
1725 /* verify the cpu speed can be set within this limit,
1726 which might be different to the first one */
1727 ret = cpufreq_driver->verify(policy);
e041c683 1728 if (ret)
1da177e4 1729 goto error_out;
1da177e4
LT
1730
1731 /* notification of the new policy */
e041c683
AS
1732 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1733 CPUFREQ_NOTIFY, policy);
1da177e4 1734
7d5e350f
DJ
1735 data->min = policy->min;
1736 data->max = policy->max;
1da177e4 1737
e08f5f5b
GS
1738 dprintk("new min and max freqs are %u - %u kHz\n",
1739 data->min, data->max);
1da177e4
LT
1740
1741 if (cpufreq_driver->setpolicy) {
1742 data->policy = policy->policy;
1743 dprintk("setting range\n");
1744 ret = cpufreq_driver->setpolicy(policy);
1745 } else {
1746 if (policy->governor != data->governor) {
1747 /* save old, working values */
1748 struct cpufreq_governor *old_gov = data->governor;
1749
1750 dprintk("governor switch\n");
1751
1752 /* end old governor */
ffe6275f 1753 if (data->governor)
1da177e4
LT
1754 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1755
1756 /* start new governor */
1757 data->governor = policy->governor;
1758 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1759 /* new governor failed, so re-start old one */
e08f5f5b
GS
1760 dprintk("starting governor %s failed\n",
1761 data->governor->name);
1da177e4
LT
1762 if (old_gov) {
1763 data->governor = old_gov;
e08f5f5b
GS
1764 __cpufreq_governor(data,
1765 CPUFREQ_GOV_START);
1da177e4
LT
1766 }
1767 ret = -EINVAL;
1768 goto error_out;
1769 }
1770 /* might be a policy change, too, so fall through */
1771 }
1772 dprintk("governor: change or update limits\n");
1773 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1774 }
1775
7d5e350f 1776error_out:
1da177e4
LT
1777 cpufreq_debug_enable_ratelimit();
1778 return ret;
1779}
1780
1da177e4
LT
1781/**
1782 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1783 * @cpu: CPU which shall be re-evaluated
1784 *
1785 * Usefull for policy notifiers which have different necessities
1786 * at different times.
1787 */
1788int cpufreq_update_policy(unsigned int cpu)
1789{
1790 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1791 struct cpufreq_policy policy;
f1829e4a 1792 int ret;
1da177e4 1793
f1829e4a
JL
1794 if (!data) {
1795 ret = -ENODEV;
1796 goto no_policy;
1797 }
1da177e4 1798
f1829e4a
JL
1799 if (unlikely(lock_policy_rwsem_write(cpu))) {
1800 ret = -EINVAL;
1801 goto fail;
1802 }
1da177e4
LT
1803
1804 dprintk("updating policy for CPU %u\n", cpu);
7d5e350f 1805 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1806 policy.min = data->user_policy.min;
1807 policy.max = data->user_policy.max;
1808 policy.policy = data->user_policy.policy;
1809 policy.governor = data->user_policy.governor;
1810
0961dd0d
TR
1811 /* BIOS might change freq behind our back
1812 -> ask driver for current freq and notify governors about a change */
1813 if (cpufreq_driver->get) {
1814 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3
TR
1815 if (!data->cur) {
1816 dprintk("Driver did not initialize current freq");
1817 data->cur = policy.cur;
1818 } else {
1819 if (data->cur != policy.cur)
e08f5f5b
GS
1820 cpufreq_out_of_sync(cpu, data->cur,
1821 policy.cur);
a85f7bd3 1822 }
0961dd0d
TR
1823 }
1824
1da177e4
LT
1825 ret = __cpufreq_set_policy(data, &policy);
1826
5a01f2e8
VP
1827 unlock_policy_rwsem_write(cpu);
1828
f1829e4a 1829fail:
1da177e4 1830 cpufreq_cpu_put(data);
f1829e4a 1831no_policy:
1da177e4
LT
1832 return ret;
1833}
1834EXPORT_SYMBOL(cpufreq_update_policy);
1835
dd184a01 1836static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1837 unsigned long action, void *hcpu)
1838{
1839 unsigned int cpu = (unsigned long)hcpu;
c32b6b8e
AR
1840 struct sys_device *sys_dev;
1841
1842 sys_dev = get_cpu_sysdev(cpu);
c32b6b8e
AR
1843 if (sys_dev) {
1844 switch (action) {
1845 case CPU_ONLINE:
8bb78442 1846 case CPU_ONLINE_FROZEN:
c32b6b8e
AR
1847 cpufreq_add_dev(sys_dev);
1848 break;
1849 case CPU_DOWN_PREPARE:
8bb78442 1850 case CPU_DOWN_PREPARE_FROZEN:
5a01f2e8
VP
1851 if (unlikely(lock_policy_rwsem_write(cpu)))
1852 BUG();
1853
5a01f2e8 1854 __cpufreq_remove_dev(sys_dev);
c32b6b8e 1855 break;
5a01f2e8 1856 case CPU_DOWN_FAILED:
8bb78442 1857 case CPU_DOWN_FAILED_FROZEN:
5a01f2e8 1858 cpufreq_add_dev(sys_dev);
c32b6b8e
AR
1859 break;
1860 }
1861 }
1862 return NOTIFY_OK;
1863}
1864
9c36f746 1865static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1866 .notifier_call = cpufreq_cpu_callback,
1867};
1da177e4
LT
1868
1869/*********************************************************************
1870 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1871 *********************************************************************/
1872
1873/**
1874 * cpufreq_register_driver - register a CPU Frequency driver
1875 * @driver_data: A struct cpufreq_driver containing the values#
1876 * submitted by the CPU Frequency driver.
1877 *
32ee8c3e 1878 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1879 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1880 * (and isn't unregistered in the meantime).
1da177e4
LT
1881 *
1882 */
221dee28 1883int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1884{
1885 unsigned long flags;
1886 int ret;
1887
1888 if (!driver_data || !driver_data->verify || !driver_data->init ||
1889 ((!driver_data->setpolicy) && (!driver_data->target)))
1890 return -EINVAL;
1891
1892 dprintk("trying to register driver %s\n", driver_data->name);
1893
1894 if (driver_data->setpolicy)
1895 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1896
1897 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1898 if (cpufreq_driver) {
1899 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1900 return -EBUSY;
1901 }
1902 cpufreq_driver = driver_data;
1903 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1904
7a6aedfa
MT
1905 ret = sysdev_driver_register(&cpu_sysdev_class,
1906 &cpufreq_sysdev_driver);
8f5bc2ab
JS
1907 if (ret)
1908 goto err_null_driver;
1da177e4 1909
8f5bc2ab 1910 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1911 int i;
1912 ret = -ENODEV;
1913
1914 /* check for at least one working CPU */
7a6aedfa
MT
1915 for (i = 0; i < nr_cpu_ids; i++)
1916 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1917 ret = 0;
7a6aedfa
MT
1918 break;
1919 }
1da177e4
LT
1920
1921 /* if all ->init() calls failed, unregister */
1922 if (ret) {
e08f5f5b
GS
1923 dprintk("no CPU initialized for driver %s\n",
1924 driver_data->name);
8f5bc2ab 1925 goto err_sysdev_unreg;
1da177e4
LT
1926 }
1927 }
1928
8f5bc2ab
JS
1929 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1930 dprintk("driver %s up and running\n", driver_data->name);
1931 cpufreq_debug_enable_ratelimit();
1da177e4 1932
8f5bc2ab
JS
1933 return 0;
1934err_sysdev_unreg:
1935 sysdev_driver_unregister(&cpu_sysdev_class,
1936 &cpufreq_sysdev_driver);
1937err_null_driver:
1938 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1939 cpufreq_driver = NULL;
1940 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1941 return ret;
1da177e4
LT
1942}
1943EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1944
1945
1946/**
1947 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1948 *
32ee8c3e 1949 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1950 * the right to do so, i.e. if you have succeeded in initialising before!
1951 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1952 * currently not initialised.
1953 */
221dee28 1954int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1955{
1956 unsigned long flags;
1957
1958 cpufreq_debug_disable_ratelimit();
1959
1960 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1961 cpufreq_debug_enable_ratelimit();
1962 return -EINVAL;
1963 }
1964
1965 dprintk("unregistering driver %s\n", driver->name);
1966
1967 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
65edc68c 1968 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1969
1970 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1971 cpufreq_driver = NULL;
1972 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1973
1974 return 0;
1975}
1976EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1977
1978static int __init cpufreq_core_init(void)
1979{
1980 int cpu;
1981
1982 for_each_possible_cpu(cpu) {
f1625066 1983 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1984 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1985 }
8aa84ad8
TR
1986
1987 cpufreq_global_kobject = kobject_create_and_add("cpufreq",
1988 &cpu_sysdev_class.kset.kobj);
1989 BUG_ON(!cpufreq_global_kobject);
e00e56df 1990 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1991
5a01f2e8
VP
1992 return 0;
1993}
5a01f2e8 1994core_initcall(cpufreq_core_init);