cpufreq: Fix governor start/stop race condition
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
1c3d85dd 42static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
0d1857a1 48static DEFINE_RWLOCK(cpufreq_driver_lock);
ba17ca46 49static DEFINE_MUTEX(cpufreq_governor_lock);
1da177e4 50
5a01f2e8
VP
51/*
52 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
53 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 *
55 * The rules for this semaphore:
56 * - Any routine that wants to read from the policy structure will
57 * do a down_read on this semaphore.
58 * - Any routine that will write to the policy structure and/or may take away
59 * the policy altogether (eg. CPU hotplug), will hold this lock in write
60 * mode before doing so.
61 *
62 * Additional rules:
5a01f2e8
VP
63 * - Governor routines that can be called in cpufreq hotplug path should not
64 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
65 * - Lock should not be held across
66 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 67 */
f1625066 68static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
69static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
70
71#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 72static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 73{ \
f1625066 74 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
75 BUG_ON(policy_cpu == -1); \
76 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
77 \
78 return 0; \
79}
80
81lock_policy_rwsem(read, cpu);
5a01f2e8 82lock_policy_rwsem(write, cpu);
5a01f2e8 83
fa1d8af4
VK
84#define unlock_policy_rwsem(mode, cpu) \
85static void unlock_policy_rwsem_##mode(int cpu) \
86{ \
87 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
88 BUG_ON(policy_cpu == -1); \
89 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 90}
5a01f2e8 91
fa1d8af4
VK
92unlock_policy_rwsem(read, cpu);
93unlock_policy_rwsem(write, cpu);
5a01f2e8 94
1da177e4 95/* internal prototypes */
29464f28
DJ
96static int __cpufreq_governor(struct cpufreq_policy *policy,
97 unsigned int event);
5a01f2e8 98static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 99static void handle_update(struct work_struct *work);
1da177e4
LT
100
101/**
32ee8c3e
DJ
102 * Two notifier lists: the "policy" list is involved in the
103 * validation process for a new CPU frequency policy; the
1da177e4
LT
104 * "transition" list for kernel code that needs to handle
105 * changes to devices when the CPU clock speed changes.
106 * The mutex locks both lists.
107 */
e041c683 108static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 109static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 110
74212ca4 111static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
112static int __init init_cpufreq_transition_notifier_list(void)
113{
114 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 115 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
116 return 0;
117}
b3438f82 118pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 119
a7b422cd 120static int off __read_mostly;
da584455 121static int cpufreq_disabled(void)
a7b422cd
KRW
122{
123 return off;
124}
125void disable_cpufreq(void)
126{
127 off = 1;
128}
1da177e4 129static LIST_HEAD(cpufreq_governor_list);
29464f28 130static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 131
4d5dcc42
VK
132bool have_governor_per_policy(void)
133{
1c3d85dd 134 return cpufreq_driver->have_governor_per_policy;
4d5dcc42
VK
135}
136
a9144436 137static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
138{
139 struct cpufreq_policy *data;
140 unsigned long flags;
141
7a6aedfa 142 if (cpu >= nr_cpu_ids)
1da177e4
LT
143 goto err_out;
144
145 /* get the cpufreq driver */
1c3d85dd 146 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 147
1c3d85dd 148 if (!cpufreq_driver)
1da177e4
LT
149 goto err_out_unlock;
150
1c3d85dd 151 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
152 goto err_out_unlock;
153
154
155 /* get the CPU */
7a6aedfa 156 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
157
158 if (!data)
159 goto err_out_put_module;
160
a9144436 161 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
162 goto err_out_put_module;
163
0d1857a1 164 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
165 return data;
166
7d5e350f 167err_out_put_module:
1c3d85dd 168 module_put(cpufreq_driver->owner);
5800043b 169err_out_unlock:
1c3d85dd 170 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 171err_out:
1da177e4
LT
172 return NULL;
173}
a9144436
SB
174
175struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
176{
d5aaffa9
DB
177 if (cpufreq_disabled())
178 return NULL;
179
a9144436
SB
180 return __cpufreq_cpu_get(cpu, false);
181}
1da177e4
LT
182EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
183
a9144436
SB
184static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
185{
186 return __cpufreq_cpu_get(cpu, true);
187}
188
189static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
190{
191 if (!sysfs)
192 kobject_put(&data->kobj);
1c3d85dd 193 module_put(cpufreq_driver->owner);
a9144436 194}
7d5e350f 195
1da177e4
LT
196void cpufreq_cpu_put(struct cpufreq_policy *data)
197{
d5aaffa9
DB
198 if (cpufreq_disabled())
199 return;
200
a9144436 201 __cpufreq_cpu_put(data, false);
1da177e4
LT
202}
203EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
204
a9144436
SB
205static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
206{
207 __cpufreq_cpu_put(data, true);
208}
1da177e4 209
1da177e4
LT
210/*********************************************************************
211 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
212 *********************************************************************/
213
214/**
215 * adjust_jiffies - adjust the system "loops_per_jiffy"
216 *
217 * This function alters the system "loops_per_jiffy" for the clock
218 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 219 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
220 * per-CPU loops_per_jiffy value wherever possible.
221 */
222#ifndef CONFIG_SMP
223static unsigned long l_p_j_ref;
224static unsigned int l_p_j_ref_freq;
225
858119e1 226static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
227{
228 if (ci->flags & CPUFREQ_CONST_LOOPS)
229 return;
230
231 if (!l_p_j_ref_freq) {
232 l_p_j_ref = loops_per_jiffy;
233 l_p_j_ref_freq = ci->old;
2d06d8c4 234 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 235 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 236 }
d08de0c1 237 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 238 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
239 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
240 ci->new);
2d06d8c4 241 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 242 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
243 }
244}
245#else
e08f5f5b
GS
246static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
247{
248 return;
249}
1da177e4
LT
250#endif
251
252
b43a7ffb
VK
253void __cpufreq_notify_transition(struct cpufreq_policy *policy,
254 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
255{
256 BUG_ON(irqs_disabled());
257
d5aaffa9
DB
258 if (cpufreq_disabled())
259 return;
260
1c3d85dd 261 freqs->flags = cpufreq_driver->flags;
2d06d8c4 262 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 263 state, freqs->new);
1da177e4 264
1da177e4 265 switch (state) {
e4472cb3 266
1da177e4 267 case CPUFREQ_PRECHANGE:
32ee8c3e 268 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
269 * which is not equal to what the cpufreq core thinks is
270 * "old frequency".
1da177e4 271 */
1c3d85dd 272 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
273 if ((policy) && (policy->cpu == freqs->cpu) &&
274 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 275 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
276 " %u, cpufreq assumed %u kHz.\n",
277 freqs->old, policy->cur);
278 freqs->old = policy->cur;
1da177e4
LT
279 }
280 }
b4dfdbb3 281 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 282 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
283 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
284 break;
e4472cb3 285
1da177e4
LT
286 case CPUFREQ_POSTCHANGE:
287 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 288 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 289 (unsigned long)freqs->cpu);
25e41933 290 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 291 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 292 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
293 if (likely(policy) && likely(policy->cpu == freqs->cpu))
294 policy->cur = freqs->new;
1da177e4
LT
295 break;
296 }
1da177e4 297}
b43a7ffb
VK
298/**
299 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
300 * on frequency transition.
301 *
302 * This function calls the transition notifiers and the "adjust_jiffies"
303 * function. It is called twice on all CPU frequency changes that have
304 * external effects.
305 */
306void cpufreq_notify_transition(struct cpufreq_policy *policy,
307 struct cpufreq_freqs *freqs, unsigned int state)
308{
309 for_each_cpu(freqs->cpu, policy->cpus)
310 __cpufreq_notify_transition(policy, freqs, state);
311}
1da177e4
LT
312EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
313
314
315
316/*********************************************************************
317 * SYSFS INTERFACE *
318 *********************************************************************/
319
3bcb09a3
JF
320static struct cpufreq_governor *__find_governor(const char *str_governor)
321{
322 struct cpufreq_governor *t;
323
324 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 325 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
326 return t;
327
328 return NULL;
329}
330
1da177e4
LT
331/**
332 * cpufreq_parse_governor - parse a governor string
333 */
905d77cd 334static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
335 struct cpufreq_governor **governor)
336{
3bcb09a3 337 int err = -EINVAL;
1c3d85dd
RW
338
339 if (!cpufreq_driver)
3bcb09a3
JF
340 goto out;
341
1c3d85dd 342 if (cpufreq_driver->setpolicy) {
1da177e4
LT
343 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
344 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 345 err = 0;
e08f5f5b
GS
346 } else if (!strnicmp(str_governor, "powersave",
347 CPUFREQ_NAME_LEN)) {
1da177e4 348 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 349 err = 0;
1da177e4 350 }
1c3d85dd 351 } else if (cpufreq_driver->target) {
1da177e4 352 struct cpufreq_governor *t;
3bcb09a3 353
3fc54d37 354 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
355
356 t = __find_governor(str_governor);
357
ea714970 358 if (t == NULL) {
1a8e1463 359 int ret;
ea714970 360
1a8e1463
KC
361 mutex_unlock(&cpufreq_governor_mutex);
362 ret = request_module("cpufreq_%s", str_governor);
363 mutex_lock(&cpufreq_governor_mutex);
ea714970 364
1a8e1463
KC
365 if (ret == 0)
366 t = __find_governor(str_governor);
ea714970
JF
367 }
368
3bcb09a3
JF
369 if (t != NULL) {
370 *governor = t;
371 err = 0;
1da177e4 372 }
3bcb09a3 373
3fc54d37 374 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 375 }
29464f28 376out:
3bcb09a3 377 return err;
1da177e4 378}
1da177e4
LT
379
380
1da177e4 381/**
e08f5f5b
GS
382 * cpufreq_per_cpu_attr_read() / show_##file_name() -
383 * print out cpufreq information
1da177e4
LT
384 *
385 * Write out information from cpufreq_driver->policy[cpu]; object must be
386 * "unsigned int".
387 */
388
32ee8c3e
DJ
389#define show_one(file_name, object) \
390static ssize_t show_##file_name \
905d77cd 391(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 392{ \
29464f28 393 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
394}
395
396show_one(cpuinfo_min_freq, cpuinfo.min_freq);
397show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 398show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
399show_one(scaling_min_freq, min);
400show_one(scaling_max_freq, max);
401show_one(scaling_cur_freq, cur);
402
e08f5f5b
GS
403static int __cpufreq_set_policy(struct cpufreq_policy *data,
404 struct cpufreq_policy *policy);
7970e08b 405
1da177e4
LT
406/**
407 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
408 */
409#define store_one(file_name, object) \
410static ssize_t store_##file_name \
905d77cd 411(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 412{ \
f55c9c26 413 unsigned int ret; \
1da177e4
LT
414 struct cpufreq_policy new_policy; \
415 \
416 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
417 if (ret) \
418 return -EINVAL; \
419 \
29464f28 420 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
421 if (ret != 1) \
422 return -EINVAL; \
423 \
7970e08b
TR
424 ret = __cpufreq_set_policy(policy, &new_policy); \
425 policy->user_policy.object = policy->object; \
1da177e4
LT
426 \
427 return ret ? ret : count; \
428}
429
29464f28
DJ
430store_one(scaling_min_freq, min);
431store_one(scaling_max_freq, max);
1da177e4
LT
432
433/**
434 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
435 */
905d77cd
DJ
436static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
437 char *buf)
1da177e4 438{
5a01f2e8 439 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
440 if (!cur_freq)
441 return sprintf(buf, "<unknown>");
442 return sprintf(buf, "%u\n", cur_freq);
443}
444
445
446/**
447 * show_scaling_governor - show the current policy for the specified CPU
448 */
905d77cd 449static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 450{
29464f28 451 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
452 return sprintf(buf, "powersave\n");
453 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
454 return sprintf(buf, "performance\n");
455 else if (policy->governor)
4b972f0b 456 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 457 policy->governor->name);
1da177e4
LT
458 return -EINVAL;
459}
460
461
462/**
463 * store_scaling_governor - store policy for the specified CPU
464 */
905d77cd
DJ
465static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
466 const char *buf, size_t count)
1da177e4 467{
f55c9c26 468 unsigned int ret;
1da177e4
LT
469 char str_governor[16];
470 struct cpufreq_policy new_policy;
471
472 ret = cpufreq_get_policy(&new_policy, policy->cpu);
473 if (ret)
474 return ret;
475
29464f28 476 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
477 if (ret != 1)
478 return -EINVAL;
479
e08f5f5b
GS
480 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
481 &new_policy.governor))
1da177e4
LT
482 return -EINVAL;
483
7970e08b
TR
484 /* Do not use cpufreq_set_policy here or the user_policy.max
485 will be wrongly overridden */
7970e08b
TR
486 ret = __cpufreq_set_policy(policy, &new_policy);
487
488 policy->user_policy.policy = policy->policy;
489 policy->user_policy.governor = policy->governor;
7970e08b 490
e08f5f5b
GS
491 if (ret)
492 return ret;
493 else
494 return count;
1da177e4
LT
495}
496
497/**
498 * show_scaling_driver - show the cpufreq driver currently loaded
499 */
905d77cd 500static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 501{
1c3d85dd 502 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
503}
504
505/**
506 * show_scaling_available_governors - show the available CPUfreq governors
507 */
905d77cd
DJ
508static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
509 char *buf)
1da177e4
LT
510{
511 ssize_t i = 0;
512 struct cpufreq_governor *t;
513
1c3d85dd 514 if (!cpufreq_driver->target) {
1da177e4
LT
515 i += sprintf(buf, "performance powersave");
516 goto out;
517 }
518
519 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
520 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
521 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 522 goto out;
4b972f0b 523 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 524 }
7d5e350f 525out:
1da177e4
LT
526 i += sprintf(&buf[i], "\n");
527 return i;
528}
e8628dd0 529
835481d9 530static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
531{
532 ssize_t i = 0;
533 unsigned int cpu;
534
835481d9 535 for_each_cpu(cpu, mask) {
1da177e4
LT
536 if (i)
537 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
538 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
539 if (i >= (PAGE_SIZE - 5))
29464f28 540 break;
1da177e4
LT
541 }
542 i += sprintf(&buf[i], "\n");
543 return i;
544}
545
e8628dd0
DW
546/**
547 * show_related_cpus - show the CPUs affected by each transition even if
548 * hw coordination is in use
549 */
550static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
551{
e8628dd0
DW
552 return show_cpus(policy->related_cpus, buf);
553}
554
555/**
556 * show_affected_cpus - show the CPUs affected by each transition
557 */
558static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
559{
560 return show_cpus(policy->cpus, buf);
561}
562
9e76988e 563static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 564 const char *buf, size_t count)
9e76988e
VP
565{
566 unsigned int freq = 0;
567 unsigned int ret;
568
879000f9 569 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
570 return -EINVAL;
571
572 ret = sscanf(buf, "%u", &freq);
573 if (ret != 1)
574 return -EINVAL;
575
576 policy->governor->store_setspeed(policy, freq);
577
578 return count;
579}
580
581static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
582{
879000f9 583 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
584 return sprintf(buf, "<unsupported>\n");
585
586 return policy->governor->show_setspeed(policy, buf);
587}
1da177e4 588
e2f74f35 589/**
8bf1ac72 590 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
591 */
592static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
593{
594 unsigned int limit;
595 int ret;
1c3d85dd
RW
596 if (cpufreq_driver->bios_limit) {
597 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
598 if (!ret)
599 return sprintf(buf, "%u\n", limit);
600 }
601 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
602}
603
6dad2a29
BP
604cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
605cpufreq_freq_attr_ro(cpuinfo_min_freq);
606cpufreq_freq_attr_ro(cpuinfo_max_freq);
607cpufreq_freq_attr_ro(cpuinfo_transition_latency);
608cpufreq_freq_attr_ro(scaling_available_governors);
609cpufreq_freq_attr_ro(scaling_driver);
610cpufreq_freq_attr_ro(scaling_cur_freq);
611cpufreq_freq_attr_ro(bios_limit);
612cpufreq_freq_attr_ro(related_cpus);
613cpufreq_freq_attr_ro(affected_cpus);
614cpufreq_freq_attr_rw(scaling_min_freq);
615cpufreq_freq_attr_rw(scaling_max_freq);
616cpufreq_freq_attr_rw(scaling_governor);
617cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 618
905d77cd 619static struct attribute *default_attrs[] = {
1da177e4
LT
620 &cpuinfo_min_freq.attr,
621 &cpuinfo_max_freq.attr,
ed129784 622 &cpuinfo_transition_latency.attr,
1da177e4
LT
623 &scaling_min_freq.attr,
624 &scaling_max_freq.attr,
625 &affected_cpus.attr,
e8628dd0 626 &related_cpus.attr,
1da177e4
LT
627 &scaling_governor.attr,
628 &scaling_driver.attr,
629 &scaling_available_governors.attr,
9e76988e 630 &scaling_setspeed.attr,
1da177e4
LT
631 NULL
632};
633
8aa84ad8
TR
634struct kobject *cpufreq_global_kobject;
635EXPORT_SYMBOL(cpufreq_global_kobject);
636
29464f28
DJ
637#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
638#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 639
29464f28 640static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 641{
905d77cd
DJ
642 struct cpufreq_policy *policy = to_policy(kobj);
643 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 644 ssize_t ret = -EINVAL;
a9144436 645 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 646 if (!policy)
0db4a8a9 647 goto no_policy;
5a01f2e8
VP
648
649 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 650 goto fail;
5a01f2e8 651
e08f5f5b
GS
652 if (fattr->show)
653 ret = fattr->show(policy, buf);
654 else
655 ret = -EIO;
656
5a01f2e8 657 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 658fail:
a9144436 659 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 660no_policy:
1da177e4
LT
661 return ret;
662}
663
905d77cd
DJ
664static ssize_t store(struct kobject *kobj, struct attribute *attr,
665 const char *buf, size_t count)
1da177e4 666{
905d77cd
DJ
667 struct cpufreq_policy *policy = to_policy(kobj);
668 struct freq_attr *fattr = to_attr(attr);
a07530b4 669 ssize_t ret = -EINVAL;
a9144436 670 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 671 if (!policy)
a07530b4 672 goto no_policy;
5a01f2e8
VP
673
674 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 675 goto fail;
5a01f2e8 676
e08f5f5b
GS
677 if (fattr->store)
678 ret = fattr->store(policy, buf, count);
679 else
680 ret = -EIO;
681
5a01f2e8 682 unlock_policy_rwsem_write(policy->cpu);
a07530b4 683fail:
a9144436 684 cpufreq_cpu_put_sysfs(policy);
a07530b4 685no_policy:
1da177e4
LT
686 return ret;
687}
688
905d77cd 689static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 690{
905d77cd 691 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 692 pr_debug("last reference is dropped\n");
1da177e4
LT
693 complete(&policy->kobj_unregister);
694}
695
52cf25d0 696static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
697 .show = show,
698 .store = store,
699};
700
701static struct kobj_type ktype_cpufreq = {
702 .sysfs_ops = &sysfs_ops,
703 .default_attrs = default_attrs,
704 .release = cpufreq_sysfs_release,
705};
706
19d6f7ec 707/* symlink affected CPUs */
cf3289d0
AC
708static int cpufreq_add_dev_symlink(unsigned int cpu,
709 struct cpufreq_policy *policy)
19d6f7ec
DJ
710{
711 unsigned int j;
712 int ret = 0;
713
714 for_each_cpu(j, policy->cpus) {
715 struct cpufreq_policy *managed_policy;
8a25a2fd 716 struct device *cpu_dev;
19d6f7ec
DJ
717
718 if (j == cpu)
719 continue;
19d6f7ec 720
2d06d8c4 721 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 722 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
723 cpu_dev = get_cpu_device(j);
724 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
725 "cpufreq");
726 if (ret) {
727 cpufreq_cpu_put(managed_policy);
728 return ret;
729 }
730 }
731 return ret;
732}
733
cf3289d0
AC
734static int cpufreq_add_dev_interface(unsigned int cpu,
735 struct cpufreq_policy *policy,
8a25a2fd 736 struct device *dev)
909a694e 737{
ecf7e461 738 struct cpufreq_policy new_policy;
909a694e
DJ
739 struct freq_attr **drv_attr;
740 unsigned long flags;
741 int ret = 0;
742 unsigned int j;
743
744 /* prepare interface data */
745 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 746 &dev->kobj, "cpufreq");
909a694e
DJ
747 if (ret)
748 return ret;
749
750 /* set up files for this cpu device */
1c3d85dd 751 drv_attr = cpufreq_driver->attr;
909a694e
DJ
752 while ((drv_attr) && (*drv_attr)) {
753 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
754 if (ret)
1c3d85dd 755 goto err_out_kobj_put;
909a694e
DJ
756 drv_attr++;
757 }
1c3d85dd 758 if (cpufreq_driver->get) {
909a694e
DJ
759 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
760 if (ret)
1c3d85dd 761 goto err_out_kobj_put;
909a694e 762 }
1c3d85dd 763 if (cpufreq_driver->target) {
909a694e
DJ
764 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
765 if (ret)
1c3d85dd 766 goto err_out_kobj_put;
909a694e 767 }
1c3d85dd 768 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
769 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
770 if (ret)
1c3d85dd 771 goto err_out_kobj_put;
e2f74f35 772 }
909a694e 773
0d1857a1 774 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 775 for_each_cpu(j, policy->cpus) {
909a694e 776 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 777 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 778 }
0d1857a1 779 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
780
781 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
782 if (ret)
783 goto err_out_kobj_put;
784
785 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
786 /* assure that the starting sequence is run in __cpufreq_set_policy */
787 policy->governor = NULL;
788
789 /* set default policy */
790 ret = __cpufreq_set_policy(policy, &new_policy);
791 policy->user_policy.policy = policy->policy;
792 policy->user_policy.governor = policy->governor;
793
794 if (ret) {
2d06d8c4 795 pr_debug("setting policy failed\n");
1c3d85dd
RW
796 if (cpufreq_driver->exit)
797 cpufreq_driver->exit(policy);
ecf7e461 798 }
909a694e
DJ
799 return ret;
800
801err_out_kobj_put:
802 kobject_put(&policy->kobj);
803 wait_for_completion(&policy->kobj_unregister);
804 return ret;
805}
806
fcf80582
VK
807#ifdef CONFIG_HOTPLUG_CPU
808static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
809 struct device *dev)
810{
811 struct cpufreq_policy *policy;
1c3d85dd 812 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
813 unsigned long flags;
814
815 policy = cpufreq_cpu_get(sibling);
816 WARN_ON(!policy);
817
820c6ca2
VK
818 if (has_target)
819 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 820
2eaa3e2d
VK
821 lock_policy_rwsem_write(sibling);
822
0d1857a1 823 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 824
fcf80582 825 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 826 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 827 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 828 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 829
2eaa3e2d
VK
830 unlock_policy_rwsem_write(sibling);
831
820c6ca2
VK
832 if (has_target) {
833 __cpufreq_governor(policy, CPUFREQ_GOV_START);
834 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
835 }
fcf80582 836
fcf80582
VK
837 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
838 if (ret) {
839 cpufreq_cpu_put(policy);
840 return ret;
841 }
842
843 return 0;
844}
845#endif
1da177e4
LT
846
847/**
848 * cpufreq_add_dev - add a CPU device
849 *
32ee8c3e 850 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
851 *
852 * The Oracle says: try running cpufreq registration/unregistration concurrently
853 * with with cpu hotplugging and all hell will break loose. Tried to clean this
854 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 855 */
8a25a2fd 856static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 857{
fcf80582 858 unsigned int j, cpu = dev->id;
65922465 859 int ret = -ENOMEM;
1da177e4 860 struct cpufreq_policy *policy;
1da177e4 861 unsigned long flags;
90e41bac 862#ifdef CONFIG_HOTPLUG_CPU
fcf80582 863 struct cpufreq_governor *gov;
90e41bac
PB
864 int sibling;
865#endif
1da177e4 866
c32b6b8e
AR
867 if (cpu_is_offline(cpu))
868 return 0;
869
2d06d8c4 870 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
871
872#ifdef CONFIG_SMP
873 /* check whether a different CPU already registered this
874 * CPU because it is in the same boat. */
875 policy = cpufreq_cpu_get(cpu);
876 if (unlikely(policy)) {
8ff69732 877 cpufreq_cpu_put(policy);
1da177e4
LT
878 return 0;
879 }
fcf80582
VK
880
881#ifdef CONFIG_HOTPLUG_CPU
882 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 883 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
884 for_each_online_cpu(sibling) {
885 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 886 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 887 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 888 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 889 }
fcf80582 890 }
0d1857a1 891 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 892#endif
1da177e4
LT
893#endif
894
1c3d85dd 895 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
896 ret = -EINVAL;
897 goto module_out;
898 }
899
e98df50c 900 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 901 if (!policy)
1da177e4 902 goto nomem_out;
059019a3
DJ
903
904 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 905 goto err_free_policy;
059019a3
DJ
906
907 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 908 goto err_free_cpumask;
1da177e4
LT
909
910 policy->cpu = cpu;
65922465 911 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 912 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 913
5a01f2e8 914 /* Initially set CPU itself as the policy_cpu */
f1625066 915 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 916
1da177e4 917 init_completion(&policy->kobj_unregister);
65f27f38 918 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
919
920 /* call driver. From then on the cpufreq must be able
921 * to accept all calls to ->verify and ->setpolicy for this CPU
922 */
1c3d85dd 923 ret = cpufreq_driver->init(policy);
1da177e4 924 if (ret) {
2d06d8c4 925 pr_debug("initialization failed\n");
2eaa3e2d 926 goto err_set_policy_cpu;
1da177e4 927 }
643ae6e8 928
fcf80582
VK
929 /* related cpus should atleast have policy->cpus */
930 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
931
643ae6e8
VK
932 /*
933 * affected cpus must always be the one, which are online. We aren't
934 * managing offline cpus here.
935 */
936 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
937
187d9f4e
MC
938 policy->user_policy.min = policy->min;
939 policy->user_policy.max = policy->max;
1da177e4 940
a1531acd
TR
941 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
942 CPUFREQ_START, policy);
943
fcf80582
VK
944#ifdef CONFIG_HOTPLUG_CPU
945 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
946 if (gov) {
947 policy->governor = gov;
948 pr_debug("Restoring governor %s for cpu %d\n",
949 policy->governor->name, cpu);
4bfa042c 950 }
fcf80582 951#endif
1da177e4 952
8a25a2fd 953 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
954 if (ret)
955 goto err_out_unregister;
8ff69732 956
038c5b3e 957 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 958 module_put(cpufreq_driver->owner);
2d06d8c4 959 pr_debug("initialization complete\n");
87c32271 960
1da177e4
LT
961 return 0;
962
1da177e4 963err_out_unregister:
0d1857a1 964 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 965 for_each_cpu(j, policy->cpus)
7a6aedfa 966 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 967 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 968
c10997f6 969 kobject_put(&policy->kobj);
1da177e4
LT
970 wait_for_completion(&policy->kobj_unregister);
971
2eaa3e2d
VK
972err_set_policy_cpu:
973 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 974 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
975err_free_cpumask:
976 free_cpumask_var(policy->cpus);
977err_free_policy:
1da177e4 978 kfree(policy);
1da177e4 979nomem_out:
1c3d85dd 980 module_put(cpufreq_driver->owner);
c32b6b8e 981module_out:
1da177e4
LT
982 return ret;
983}
984
b8eed8af
VK
985static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
986{
987 int j;
988
989 policy->last_cpu = policy->cpu;
990 policy->cpu = cpu;
991
3361b7b1 992 for_each_cpu(j, policy->cpus)
b8eed8af 993 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
994
995#ifdef CONFIG_CPU_FREQ_TABLE
996 cpufreq_frequency_table_update_policy_cpu(policy);
997#endif
998 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
999 CPUFREQ_UPDATE_POLICY_CPU, policy);
1000}
1da177e4
LT
1001
1002/**
5a01f2e8 1003 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1004 *
1005 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1006 * Caller should already have policy_rwsem in write mode for this CPU.
1007 * This routine frees the rwsem before returning.
1da177e4 1008 */
8a25a2fd 1009static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1010{
b8eed8af 1011 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1012 unsigned long flags;
1013 struct cpufreq_policy *data;
499bca9b
AW
1014 struct kobject *kobj;
1015 struct completion *cmp;
8a25a2fd 1016 struct device *cpu_dev;
1da177e4 1017
b8eed8af 1018 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1019
0d1857a1 1020 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1021
7a6aedfa 1022 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1023 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1024
0d1857a1 1025 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1026
1027 if (!data) {
b8eed8af 1028 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1029 return -EINVAL;
1030 }
1da177e4 1031
1c3d85dd 1032 if (cpufreq_driver->target)
f6a7409c 1033 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1034
084f3493 1035#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1036 if (!cpufreq_driver->setpolicy)
fa69e33f
DB
1037 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1038 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1039#endif
1040
2eaa3e2d 1041 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1042 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1043
1044 if (cpus > 1)
1045 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1046 unlock_policy_rwsem_write(cpu);
084f3493 1047
73bf0fc2
VK
1048 if (cpu != data->cpu) {
1049 sysfs_remove_link(&dev->kobj, "cpufreq");
1050 } else if (cpus > 1) {
b8eed8af
VK
1051 /* first sibling now owns the new sysfs dir */
1052 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1053 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1054 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1055 if (ret) {
1056 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1057
2eaa3e2d 1058 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1059 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1060
0d1857a1 1061 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1062 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1063 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1064
499bca9b 1065 unlock_policy_rwsem_write(cpu);
1da177e4 1066
2eaa3e2d
VK
1067 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1068 "cpufreq");
b8eed8af 1069 return -EINVAL;
1da177e4 1070 }
5a01f2e8 1071
2eaa3e2d 1072 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1073 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1074 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1075 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1076 __func__, cpu_dev->id, cpu);
1da177e4 1077 }
1da177e4 1078
b8eed8af
VK
1079 /* If cpu is last user of policy, free policy */
1080 if (cpus == 1) {
e9ef4410
RW
1081 if (cpufreq_driver->target)
1082 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1083
2eaa3e2d 1084 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1085 kobj = &data->kobj;
1086 cmp = &data->kobj_unregister;
2eaa3e2d 1087 unlock_policy_rwsem_read(cpu);
b8eed8af 1088 kobject_put(kobj);
7d26e2d5 1089
b8eed8af
VK
1090 /* we need to make sure that the underlying kobj is actually
1091 * not referenced anymore by anybody before we proceed with
1092 * unloading.
1093 */
1094 pr_debug("waiting for dropping of refcount\n");
1095 wait_for_completion(cmp);
1096 pr_debug("wait complete\n");
7d26e2d5 1097
1c3d85dd
RW
1098 if (cpufreq_driver->exit)
1099 cpufreq_driver->exit(data);
27ecddc2 1100
b8eed8af
VK
1101 free_cpumask_var(data->related_cpus);
1102 free_cpumask_var(data->cpus);
1103 kfree(data);
e9ef4410
RW
1104 } else {
1105 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1106 cpufreq_cpu_put(data);
1107 if (cpufreq_driver->target) {
1108 __cpufreq_governor(data, CPUFREQ_GOV_START);
1109 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1110 }
27ecddc2 1111 }
1da177e4 1112
2eaa3e2d 1113 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1114 return 0;
1115}
1116
1117
8a25a2fd 1118static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1119{
8a25a2fd 1120 unsigned int cpu = dev->id;
5a01f2e8 1121 int retval;
ec28297a
VP
1122
1123 if (cpu_is_offline(cpu))
1124 return 0;
1125
8a25a2fd 1126 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1127 return retval;
1128}
1129
1130
65f27f38 1131static void handle_update(struct work_struct *work)
1da177e4 1132{
65f27f38
DH
1133 struct cpufreq_policy *policy =
1134 container_of(work, struct cpufreq_policy, update);
1135 unsigned int cpu = policy->cpu;
2d06d8c4 1136 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1137 cpufreq_update_policy(cpu);
1138}
1139
1140/**
1141 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1142 * @cpu: cpu number
1143 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1144 * @new_freq: CPU frequency the CPU actually runs at
1145 *
29464f28
DJ
1146 * We adjust to current frequency first, and need to clean up later.
1147 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1148 */
e08f5f5b
GS
1149static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1150 unsigned int new_freq)
1da177e4 1151{
b43a7ffb 1152 struct cpufreq_policy *policy;
1da177e4 1153 struct cpufreq_freqs freqs;
b43a7ffb
VK
1154 unsigned long flags;
1155
1da177e4 1156
2d06d8c4 1157 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1158 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1159
1da177e4
LT
1160 freqs.old = old_freq;
1161 freqs.new = new_freq;
b43a7ffb
VK
1162
1163 read_lock_irqsave(&cpufreq_driver_lock, flags);
1164 policy = per_cpu(cpufreq_cpu_data, cpu);
1165 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1166
1167 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1168 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1169}
1170
1171
32ee8c3e 1172/**
4ab70df4 1173 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1174 * @cpu: CPU number
1175 *
1176 * This is the last known freq, without actually getting it from the driver.
1177 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1178 */
1179unsigned int cpufreq_quick_get(unsigned int cpu)
1180{
9e21ba8b 1181 struct cpufreq_policy *policy;
e08f5f5b 1182 unsigned int ret_freq = 0;
95235ca2 1183
1c3d85dd
RW
1184 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1185 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1186
1187 policy = cpufreq_cpu_get(cpu);
95235ca2 1188 if (policy) {
e08f5f5b 1189 ret_freq = policy->cur;
95235ca2
VP
1190 cpufreq_cpu_put(policy);
1191 }
1192
4d34a67d 1193 return ret_freq;
95235ca2
VP
1194}
1195EXPORT_SYMBOL(cpufreq_quick_get);
1196
3d737108
JB
1197/**
1198 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1199 * @cpu: CPU number
1200 *
1201 * Just return the max possible frequency for a given CPU.
1202 */
1203unsigned int cpufreq_quick_get_max(unsigned int cpu)
1204{
1205 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1206 unsigned int ret_freq = 0;
1207
1208 if (policy) {
1209 ret_freq = policy->max;
1210 cpufreq_cpu_put(policy);
1211 }
1212
1213 return ret_freq;
1214}
1215EXPORT_SYMBOL(cpufreq_quick_get_max);
1216
95235ca2 1217
5a01f2e8 1218static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1219{
7a6aedfa 1220 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1221 unsigned int ret_freq = 0;
5800043b 1222
1c3d85dd 1223 if (!cpufreq_driver->get)
4d34a67d 1224 return ret_freq;
1da177e4 1225
1c3d85dd 1226 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1227
e08f5f5b 1228 if (ret_freq && policy->cur &&
1c3d85dd 1229 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1230 /* verify no discrepancy between actual and
1231 saved value exists */
1232 if (unlikely(ret_freq != policy->cur)) {
1233 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1234 schedule_work(&policy->update);
1235 }
1236 }
1237
4d34a67d 1238 return ret_freq;
5a01f2e8 1239}
1da177e4 1240
5a01f2e8
VP
1241/**
1242 * cpufreq_get - get the current CPU frequency (in kHz)
1243 * @cpu: CPU number
1244 *
1245 * Get the CPU current (static) CPU frequency
1246 */
1247unsigned int cpufreq_get(unsigned int cpu)
1248{
1249 unsigned int ret_freq = 0;
1250 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1251
1252 if (!policy)
1253 goto out;
1254
1255 if (unlikely(lock_policy_rwsem_read(cpu)))
1256 goto out_policy;
1257
1258 ret_freq = __cpufreq_get(cpu);
1259
1260 unlock_policy_rwsem_read(cpu);
1da177e4 1261
5a01f2e8
VP
1262out_policy:
1263 cpufreq_cpu_put(policy);
1264out:
4d34a67d 1265 return ret_freq;
1da177e4
LT
1266}
1267EXPORT_SYMBOL(cpufreq_get);
1268
8a25a2fd
KS
1269static struct subsys_interface cpufreq_interface = {
1270 .name = "cpufreq",
1271 .subsys = &cpu_subsys,
1272 .add_dev = cpufreq_add_dev,
1273 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1274};
1275
1da177e4 1276
42d4dc3f 1277/**
e00e56df
RW
1278 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1279 *
1280 * This function is only executed for the boot processor. The other CPUs
1281 * have been put offline by means of CPU hotplug.
42d4dc3f 1282 */
e00e56df 1283static int cpufreq_bp_suspend(void)
42d4dc3f 1284{
e08f5f5b 1285 int ret = 0;
4bc5d341 1286
e00e56df 1287 int cpu = smp_processor_id();
42d4dc3f
BH
1288 struct cpufreq_policy *cpu_policy;
1289
2d06d8c4 1290 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1291
e00e56df 1292 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1293 cpu_policy = cpufreq_cpu_get(cpu);
1294 if (!cpu_policy)
e00e56df 1295 return 0;
42d4dc3f 1296
1c3d85dd
RW
1297 if (cpufreq_driver->suspend) {
1298 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1299 if (ret)
42d4dc3f
BH
1300 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1301 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1302 }
1303
42d4dc3f 1304 cpufreq_cpu_put(cpu_policy);
c9060494 1305 return ret;
42d4dc3f
BH
1306}
1307
1da177e4 1308/**
e00e56df 1309 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1310 *
1311 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1312 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1313 * restored. It will verify that the current freq is in sync with
1314 * what we believe it to be. This is a bit later than when it
1315 * should be, but nonethteless it's better than calling
1316 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1317 *
1318 * This function is only executed for the boot CPU. The other CPUs have not
1319 * been turned on yet.
1da177e4 1320 */
e00e56df 1321static void cpufreq_bp_resume(void)
1da177e4 1322{
e08f5f5b 1323 int ret = 0;
4bc5d341 1324
e00e56df 1325 int cpu = smp_processor_id();
1da177e4
LT
1326 struct cpufreq_policy *cpu_policy;
1327
2d06d8c4 1328 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1329
e00e56df 1330 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1331 cpu_policy = cpufreq_cpu_get(cpu);
1332 if (!cpu_policy)
e00e56df 1333 return;
1da177e4 1334
1c3d85dd
RW
1335 if (cpufreq_driver->resume) {
1336 ret = cpufreq_driver->resume(cpu_policy);
1da177e4
LT
1337 if (ret) {
1338 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1339 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1340 goto fail;
1da177e4
LT
1341 }
1342 }
1343
1da177e4 1344 schedule_work(&cpu_policy->update);
ce6c3997 1345
c9060494 1346fail:
1da177e4 1347 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1348}
1349
e00e56df
RW
1350static struct syscore_ops cpufreq_syscore_ops = {
1351 .suspend = cpufreq_bp_suspend,
1352 .resume = cpufreq_bp_resume,
1da177e4
LT
1353};
1354
9d95046e
BP
1355/**
1356 * cpufreq_get_current_driver - return current driver's name
1357 *
1358 * Return the name string of the currently loaded cpufreq driver
1359 * or NULL, if none.
1360 */
1361const char *cpufreq_get_current_driver(void)
1362{
1c3d85dd
RW
1363 if (cpufreq_driver)
1364 return cpufreq_driver->name;
1365
1366 return NULL;
9d95046e
BP
1367}
1368EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1369
1370/*********************************************************************
1371 * NOTIFIER LISTS INTERFACE *
1372 *********************************************************************/
1373
1374/**
1375 * cpufreq_register_notifier - register a driver with cpufreq
1376 * @nb: notifier function to register
1377 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1378 *
32ee8c3e 1379 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1380 * are notified about clock rate changes (once before and once after
1381 * the transition), or a list of drivers that are notified about
1382 * changes in cpufreq policy.
1383 *
1384 * This function may sleep, and has the same return conditions as
e041c683 1385 * blocking_notifier_chain_register.
1da177e4
LT
1386 */
1387int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1388{
1389 int ret;
1390
d5aaffa9
DB
1391 if (cpufreq_disabled())
1392 return -EINVAL;
1393
74212ca4
CEB
1394 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1395
1da177e4
LT
1396 switch (list) {
1397 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1398 ret = srcu_notifier_chain_register(
e041c683 1399 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1400 break;
1401 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1402 ret = blocking_notifier_chain_register(
1403 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1404 break;
1405 default:
1406 ret = -EINVAL;
1407 }
1da177e4
LT
1408
1409 return ret;
1410}
1411EXPORT_SYMBOL(cpufreq_register_notifier);
1412
1413
1414/**
1415 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1416 * @nb: notifier block to be unregistered
1417 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1418 *
1419 * Remove a driver from the CPU frequency notifier list.
1420 *
1421 * This function may sleep, and has the same return conditions as
e041c683 1422 * blocking_notifier_chain_unregister.
1da177e4
LT
1423 */
1424int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1425{
1426 int ret;
1427
d5aaffa9
DB
1428 if (cpufreq_disabled())
1429 return -EINVAL;
1430
1da177e4
LT
1431 switch (list) {
1432 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1433 ret = srcu_notifier_chain_unregister(
e041c683 1434 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1435 break;
1436 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1437 ret = blocking_notifier_chain_unregister(
1438 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1439 break;
1440 default:
1441 ret = -EINVAL;
1442 }
1da177e4
LT
1443
1444 return ret;
1445}
1446EXPORT_SYMBOL(cpufreq_unregister_notifier);
1447
1448
1449/*********************************************************************
1450 * GOVERNORS *
1451 *********************************************************************/
1452
1453
1454int __cpufreq_driver_target(struct cpufreq_policy *policy,
1455 unsigned int target_freq,
1456 unsigned int relation)
1457{
1458 int retval = -EINVAL;
7249924e 1459 unsigned int old_target_freq = target_freq;
c32b6b8e 1460
a7b422cd
KRW
1461 if (cpufreq_disabled())
1462 return -ENODEV;
1463
7249924e
VK
1464 /* Make sure that target_freq is within supported range */
1465 if (target_freq > policy->max)
1466 target_freq = policy->max;
1467 if (target_freq < policy->min)
1468 target_freq = policy->min;
1469
1470 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1471 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1472
1473 if (target_freq == policy->cur)
1474 return 0;
1475
1c3d85dd
RW
1476 if (cpufreq_driver->target)
1477 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1478
1da177e4
LT
1479 return retval;
1480}
1481EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1482
1da177e4
LT
1483int cpufreq_driver_target(struct cpufreq_policy *policy,
1484 unsigned int target_freq,
1485 unsigned int relation)
1486{
f1829e4a 1487 int ret = -EINVAL;
1da177e4
LT
1488
1489 policy = cpufreq_cpu_get(policy->cpu);
1490 if (!policy)
f1829e4a 1491 goto no_policy;
1da177e4 1492
5a01f2e8 1493 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1494 goto fail;
1da177e4
LT
1495
1496 ret = __cpufreq_driver_target(policy, target_freq, relation);
1497
5a01f2e8 1498 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1499
f1829e4a 1500fail:
1da177e4 1501 cpufreq_cpu_put(policy);
f1829e4a 1502no_policy:
1da177e4
LT
1503 return ret;
1504}
1505EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1506
bf0b90e3 1507int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1508{
1509 int ret = 0;
1510
d5aaffa9
DB
1511 if (cpufreq_disabled())
1512 return ret;
1513
1c3d85dd 1514 if (!cpufreq_driver->getavg)
0676f7f2
VK
1515 return 0;
1516
dfde5d62
VP
1517 policy = cpufreq_cpu_get(policy->cpu);
1518 if (!policy)
1519 return -EINVAL;
1520
1c3d85dd 1521 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1522
dfde5d62
VP
1523 cpufreq_cpu_put(policy);
1524 return ret;
1525}
5a01f2e8 1526EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1527
153d7f3f 1528/*
153d7f3f
AV
1529 * when "event" is CPUFREQ_GOV_LIMITS
1530 */
1da177e4 1531
e08f5f5b
GS
1532static int __cpufreq_governor(struct cpufreq_policy *policy,
1533 unsigned int event)
1da177e4 1534{
cc993cab 1535 int ret;
6afde10c
TR
1536
1537 /* Only must be defined when default governor is known to have latency
1538 restrictions, like e.g. conservative or ondemand.
1539 That this is the case is already ensured in Kconfig
1540 */
1541#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1542 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1543#else
1544 struct cpufreq_governor *gov = NULL;
1545#endif
1c256245
TR
1546
1547 if (policy->governor->max_transition_latency &&
1548 policy->cpuinfo.transition_latency >
1549 policy->governor->max_transition_latency) {
6afde10c
TR
1550 if (!gov)
1551 return -EINVAL;
1552 else {
1553 printk(KERN_WARNING "%s governor failed, too long"
1554 " transition latency of HW, fallback"
1555 " to %s governor\n",
1556 policy->governor->name,
1557 gov->name);
1558 policy->governor = gov;
1559 }
1c256245 1560 }
1da177e4
LT
1561
1562 if (!try_module_get(policy->governor->owner))
1563 return -EINVAL;
1564
2d06d8c4 1565 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1566 policy->cpu, event);
ba17ca46
XC
1567
1568 mutex_lock(&cpufreq_governor_lock);
1569 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1570 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1571 mutex_unlock(&cpufreq_governor_lock);
1572 return -EBUSY;
1573 }
1574
1575 if (event == CPUFREQ_GOV_STOP)
1576 policy->governor_enabled = false;
1577 else if (event == CPUFREQ_GOV_START)
1578 policy->governor_enabled = true;
1579
1580 mutex_unlock(&cpufreq_governor_lock);
1581
1da177e4
LT
1582 ret = policy->governor->governor(policy, event);
1583
4d5dcc42
VK
1584 if (!ret) {
1585 if (event == CPUFREQ_GOV_POLICY_INIT)
1586 policy->governor->initialized++;
1587 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1588 policy->governor->initialized--;
ba17ca46
XC
1589 } else {
1590 /* Restore original values */
1591 mutex_lock(&cpufreq_governor_lock);
1592 if (event == CPUFREQ_GOV_STOP)
1593 policy->governor_enabled = true;
1594 else if (event == CPUFREQ_GOV_START)
1595 policy->governor_enabled = false;
1596 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1597 }
b394058f 1598
e08f5f5b
GS
1599 /* we keep one module reference alive for
1600 each CPU governed by this CPU */
1da177e4
LT
1601 if ((event != CPUFREQ_GOV_START) || ret)
1602 module_put(policy->governor->owner);
1603 if ((event == CPUFREQ_GOV_STOP) && !ret)
1604 module_put(policy->governor->owner);
1605
1606 return ret;
1607}
1608
1609
1da177e4
LT
1610int cpufreq_register_governor(struct cpufreq_governor *governor)
1611{
3bcb09a3 1612 int err;
1da177e4
LT
1613
1614 if (!governor)
1615 return -EINVAL;
1616
a7b422cd
KRW
1617 if (cpufreq_disabled())
1618 return -ENODEV;
1619
3fc54d37 1620 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1621
b394058f 1622 governor->initialized = 0;
3bcb09a3
JF
1623 err = -EBUSY;
1624 if (__find_governor(governor->name) == NULL) {
1625 err = 0;
1626 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1627 }
1da177e4 1628
32ee8c3e 1629 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1630 return err;
1da177e4
LT
1631}
1632EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1633
1634
1635void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1636{
90e41bac
PB
1637#ifdef CONFIG_HOTPLUG_CPU
1638 int cpu;
1639#endif
1640
1da177e4
LT
1641 if (!governor)
1642 return;
1643
a7b422cd
KRW
1644 if (cpufreq_disabled())
1645 return;
1646
90e41bac
PB
1647#ifdef CONFIG_HOTPLUG_CPU
1648 for_each_present_cpu(cpu) {
1649 if (cpu_online(cpu))
1650 continue;
1651 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1652 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1653 }
1654#endif
1655
3fc54d37 1656 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1657 list_del(&governor->governor_list);
3fc54d37 1658 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1659 return;
1660}
1661EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1662
1663
1664
1665/*********************************************************************
1666 * POLICY INTERFACE *
1667 *********************************************************************/
1668
1669/**
1670 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1671 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1672 * is written
1da177e4
LT
1673 *
1674 * Reads the current cpufreq policy.
1675 */
1676int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1677{
1678 struct cpufreq_policy *cpu_policy;
1679 if (!policy)
1680 return -EINVAL;
1681
1682 cpu_policy = cpufreq_cpu_get(cpu);
1683 if (!cpu_policy)
1684 return -EINVAL;
1685
1da177e4 1686 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1687
1688 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1689 return 0;
1690}
1691EXPORT_SYMBOL(cpufreq_get_policy);
1692
1693
153d7f3f 1694/*
e08f5f5b
GS
1695 * data : current policy.
1696 * policy : policy to be set.
153d7f3f 1697 */
e08f5f5b
GS
1698static int __cpufreq_set_policy(struct cpufreq_policy *data,
1699 struct cpufreq_policy *policy)
1da177e4 1700{
7bd353a9 1701 int ret = 0, failed = 1;
1da177e4 1702
2d06d8c4 1703 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1704 policy->min, policy->max);
1705
e08f5f5b
GS
1706 memcpy(&policy->cpuinfo, &data->cpuinfo,
1707 sizeof(struct cpufreq_cpuinfo));
1da177e4 1708
53391fa2 1709 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1710 ret = -EINVAL;
1711 goto error_out;
1712 }
1713
1da177e4 1714 /* verify the cpu speed can be set within this limit */
1c3d85dd 1715 ret = cpufreq_driver->verify(policy);
1da177e4
LT
1716 if (ret)
1717 goto error_out;
1718
1da177e4 1719 /* adjust if necessary - all reasons */
e041c683
AS
1720 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1721 CPUFREQ_ADJUST, policy);
1da177e4
LT
1722
1723 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1724 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1725 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1726
1727 /* verify the cpu speed can be set within this limit,
1728 which might be different to the first one */
1c3d85dd 1729 ret = cpufreq_driver->verify(policy);
e041c683 1730 if (ret)
1da177e4 1731 goto error_out;
1da177e4
LT
1732
1733 /* notification of the new policy */
e041c683
AS
1734 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1735 CPUFREQ_NOTIFY, policy);
1da177e4 1736
7d5e350f
DJ
1737 data->min = policy->min;
1738 data->max = policy->max;
1da177e4 1739
2d06d8c4 1740 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1741 data->min, data->max);
1da177e4 1742
1c3d85dd 1743 if (cpufreq_driver->setpolicy) {
1da177e4 1744 data->policy = policy->policy;
2d06d8c4 1745 pr_debug("setting range\n");
1c3d85dd 1746 ret = cpufreq_driver->setpolicy(policy);
1da177e4
LT
1747 } else {
1748 if (policy->governor != data->governor) {
1749 /* save old, working values */
1750 struct cpufreq_governor *old_gov = data->governor;
1751
2d06d8c4 1752 pr_debug("governor switch\n");
1da177e4
LT
1753
1754 /* end old governor */
7bd353a9 1755 if (data->governor) {
1da177e4 1756 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
955ef483 1757 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1758 __cpufreq_governor(data,
1759 CPUFREQ_GOV_POLICY_EXIT);
955ef483 1760 lock_policy_rwsem_write(policy->cpu);
7bd353a9 1761 }
1da177e4
LT
1762
1763 /* start new governor */
1764 data->governor = policy->governor;
7bd353a9 1765 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
955ef483 1766 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
7bd353a9 1767 failed = 0;
955ef483
VK
1768 } else {
1769 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1770 __cpufreq_governor(data,
1771 CPUFREQ_GOV_POLICY_EXIT);
955ef483
VK
1772 lock_policy_rwsem_write(policy->cpu);
1773 }
7bd353a9
VK
1774 }
1775
1776 if (failed) {
1da177e4 1777 /* new governor failed, so re-start old one */
2d06d8c4 1778 pr_debug("starting governor %s failed\n",
e08f5f5b 1779 data->governor->name);
1da177e4
LT
1780 if (old_gov) {
1781 data->governor = old_gov;
7bd353a9
VK
1782 __cpufreq_governor(data,
1783 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1784 __cpufreq_governor(data,
1785 CPUFREQ_GOV_START);
1da177e4
LT
1786 }
1787 ret = -EINVAL;
1788 goto error_out;
1789 }
1790 /* might be a policy change, too, so fall through */
1791 }
2d06d8c4 1792 pr_debug("governor: change or update limits\n");
1da177e4
LT
1793 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1794 }
1795
7d5e350f 1796error_out:
1da177e4
LT
1797 return ret;
1798}
1799
1da177e4
LT
1800/**
1801 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1802 * @cpu: CPU which shall be re-evaluated
1803 *
25985edc 1804 * Useful for policy notifiers which have different necessities
1da177e4
LT
1805 * at different times.
1806 */
1807int cpufreq_update_policy(unsigned int cpu)
1808{
1809 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1810 struct cpufreq_policy policy;
f1829e4a 1811 int ret;
1da177e4 1812
f1829e4a
JL
1813 if (!data) {
1814 ret = -ENODEV;
1815 goto no_policy;
1816 }
1da177e4 1817
f1829e4a
JL
1818 if (unlikely(lock_policy_rwsem_write(cpu))) {
1819 ret = -EINVAL;
1820 goto fail;
1821 }
1da177e4 1822
2d06d8c4 1823 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1824 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1825 policy.min = data->user_policy.min;
1826 policy.max = data->user_policy.max;
1827 policy.policy = data->user_policy.policy;
1828 policy.governor = data->user_policy.governor;
1829
0961dd0d
TR
1830 /* BIOS might change freq behind our back
1831 -> ask driver for current freq and notify governors about a change */
1c3d85dd
RW
1832 if (cpufreq_driver->get) {
1833 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1834 if (!data->cur) {
2d06d8c4 1835 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1836 data->cur = policy.cur;
1837 } else {
1c3d85dd 1838 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
1839 cpufreq_out_of_sync(cpu, data->cur,
1840 policy.cur);
a85f7bd3 1841 }
0961dd0d
TR
1842 }
1843
1da177e4
LT
1844 ret = __cpufreq_set_policy(data, &policy);
1845
5a01f2e8
VP
1846 unlock_policy_rwsem_write(cpu);
1847
f1829e4a 1848fail:
1da177e4 1849 cpufreq_cpu_put(data);
f1829e4a 1850no_policy:
1da177e4
LT
1851 return ret;
1852}
1853EXPORT_SYMBOL(cpufreq_update_policy);
1854
dd184a01 1855static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1856 unsigned long action, void *hcpu)
1857{
1858 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1859 struct device *dev;
c32b6b8e 1860
8a25a2fd
KS
1861 dev = get_cpu_device(cpu);
1862 if (dev) {
c32b6b8e
AR
1863 switch (action) {
1864 case CPU_ONLINE:
9d3ce4af 1865 case CPU_ONLINE_FROZEN:
8a25a2fd 1866 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1867 break;
1868 case CPU_DOWN_PREPARE:
9d3ce4af 1869 case CPU_DOWN_PREPARE_FROZEN:
8a25a2fd 1870 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1871 break;
5a01f2e8 1872 case CPU_DOWN_FAILED:
9d3ce4af 1873 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1874 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1875 break;
1876 }
1877 }
1878 return NOTIFY_OK;
1879}
1880
9c36f746 1881static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1882 .notifier_call = cpufreq_cpu_callback,
1883};
1da177e4
LT
1884
1885/*********************************************************************
1886 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1887 *********************************************************************/
1888
1889/**
1890 * cpufreq_register_driver - register a CPU Frequency driver
1891 * @driver_data: A struct cpufreq_driver containing the values#
1892 * submitted by the CPU Frequency driver.
1893 *
32ee8c3e 1894 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1895 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1896 * (and isn't unregistered in the meantime).
1da177e4
LT
1897 *
1898 */
221dee28 1899int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1900{
1901 unsigned long flags;
1902 int ret;
1903
a7b422cd
KRW
1904 if (cpufreq_disabled())
1905 return -ENODEV;
1906
1da177e4
LT
1907 if (!driver_data || !driver_data->verify || !driver_data->init ||
1908 ((!driver_data->setpolicy) && (!driver_data->target)))
1909 return -EINVAL;
1910
2d06d8c4 1911 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1912
1913 if (driver_data->setpolicy)
1914 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1915
0d1857a1 1916 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1917 if (cpufreq_driver) {
0d1857a1 1918 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1919 return -EBUSY;
1920 }
1c3d85dd 1921 cpufreq_driver = driver_data;
0d1857a1 1922 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1923
8a25a2fd 1924 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1925 if (ret)
1926 goto err_null_driver;
1da177e4 1927
1c3d85dd 1928 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1929 int i;
1930 ret = -ENODEV;
1931
1932 /* check for at least one working CPU */
7a6aedfa
MT
1933 for (i = 0; i < nr_cpu_ids; i++)
1934 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1935 ret = 0;
7a6aedfa
MT
1936 break;
1937 }
1da177e4
LT
1938
1939 /* if all ->init() calls failed, unregister */
1940 if (ret) {
2d06d8c4 1941 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1942 driver_data->name);
8a25a2fd 1943 goto err_if_unreg;
1da177e4
LT
1944 }
1945 }
1946
8f5bc2ab 1947 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1948 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1949
8f5bc2ab 1950 return 0;
8a25a2fd
KS
1951err_if_unreg:
1952 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 1953err_null_driver:
0d1857a1 1954 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1955 cpufreq_driver = NULL;
0d1857a1 1956 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1957 return ret;
1da177e4
LT
1958}
1959EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1960
1961
1962/**
1963 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1964 *
32ee8c3e 1965 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1966 * the right to do so, i.e. if you have succeeded in initialising before!
1967 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1968 * currently not initialised.
1969 */
221dee28 1970int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1971{
1972 unsigned long flags;
1973
1c3d85dd 1974 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1975 return -EINVAL;
1da177e4 1976
2d06d8c4 1977 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1978
8a25a2fd 1979 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1980 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 1981
0d1857a1 1982 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1983 cpufreq_driver = NULL;
0d1857a1 1984 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1985
1986 return 0;
1987}
1988EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1989
1990static int __init cpufreq_core_init(void)
1991{
1992 int cpu;
1993
a7b422cd
KRW
1994 if (cpufreq_disabled())
1995 return -ENODEV;
1996
5a01f2e8 1997 for_each_possible_cpu(cpu) {
f1625066 1998 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1999 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2000 }
8aa84ad8 2001
8a25a2fd 2002 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 2003 BUG_ON(!cpufreq_global_kobject);
e00e56df 2004 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2005
5a01f2e8
VP
2006 return 0;
2007}
5a01f2e8 2008core_initcall(cpufreq_core_init);