cpufreq: cpufreq-cpu0: use the exact frequency for clk_set_rate()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
1c3d85dd 42static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
0d1857a1 48static DEFINE_RWLOCK(cpufreq_driver_lock);
1da177e4 49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 71static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 72{ \
f1625066 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
76 \
77 return 0; \
78}
79
80lock_policy_rwsem(read, cpu);
5a01f2e8 81lock_policy_rwsem(write, cpu);
5a01f2e8 82
fa1d8af4
VK
83#define unlock_policy_rwsem(mode, cpu) \
84static void unlock_policy_rwsem_##mode(int cpu) \
85{ \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 89}
5a01f2e8 90
fa1d8af4
VK
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
5a01f2e8 93
1da177e4 94/* internal prototypes */
29464f28
DJ
95static int __cpufreq_governor(struct cpufreq_policy *policy,
96 unsigned int event);
5a01f2e8 97static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 98static void handle_update(struct work_struct *work);
1da177e4
LT
99
100/**
32ee8c3e
DJ
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
1da177e4
LT
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
106 */
e041c683 107static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 108static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 109
74212ca4 110static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
111static int __init init_cpufreq_transition_notifier_list(void)
112{
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 114 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
115 return 0;
116}
b3438f82 117pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 118
a7b422cd 119static int off __read_mostly;
da584455 120static int cpufreq_disabled(void)
a7b422cd
KRW
121{
122 return off;
123}
124void disable_cpufreq(void)
125{
126 off = 1;
127}
1da177e4 128static LIST_HEAD(cpufreq_governor_list);
29464f28 129static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 130
4d5dcc42
VK
131bool have_governor_per_policy(void)
132{
1c3d85dd 133 return cpufreq_driver->have_governor_per_policy;
4d5dcc42
VK
134}
135
a9144436 136static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
137{
138 struct cpufreq_policy *data;
139 unsigned long flags;
140
7a6aedfa 141 if (cpu >= nr_cpu_ids)
1da177e4
LT
142 goto err_out;
143
144 /* get the cpufreq driver */
1c3d85dd 145 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 146
1c3d85dd 147 if (!cpufreq_driver)
1da177e4
LT
148 goto err_out_unlock;
149
1c3d85dd 150 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
151 goto err_out_unlock;
152
153
154 /* get the CPU */
7a6aedfa 155 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
156
157 if (!data)
158 goto err_out_put_module;
159
a9144436 160 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
161 goto err_out_put_module;
162
0d1857a1 163 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
164 return data;
165
7d5e350f 166err_out_put_module:
1c3d85dd 167 module_put(cpufreq_driver->owner);
5800043b 168err_out_unlock:
1c3d85dd 169 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 170err_out:
1da177e4
LT
171 return NULL;
172}
a9144436
SB
173
174struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
175{
d5aaffa9
DB
176 if (cpufreq_disabled())
177 return NULL;
178
a9144436
SB
179 return __cpufreq_cpu_get(cpu, false);
180}
1da177e4
LT
181EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
182
a9144436
SB
183static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
184{
185 return __cpufreq_cpu_get(cpu, true);
186}
187
188static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
189{
190 if (!sysfs)
191 kobject_put(&data->kobj);
1c3d85dd 192 module_put(cpufreq_driver->owner);
a9144436 193}
7d5e350f 194
1da177e4
LT
195void cpufreq_cpu_put(struct cpufreq_policy *data)
196{
d5aaffa9
DB
197 if (cpufreq_disabled())
198 return;
199
a9144436 200 __cpufreq_cpu_put(data, false);
1da177e4
LT
201}
202EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
203
a9144436
SB
204static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
205{
206 __cpufreq_cpu_put(data, true);
207}
1da177e4 208
1da177e4
LT
209/*********************************************************************
210 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
211 *********************************************************************/
212
213/**
214 * adjust_jiffies - adjust the system "loops_per_jiffy"
215 *
216 * This function alters the system "loops_per_jiffy" for the clock
217 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 218 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
219 * per-CPU loops_per_jiffy value wherever possible.
220 */
221#ifndef CONFIG_SMP
222static unsigned long l_p_j_ref;
223static unsigned int l_p_j_ref_freq;
224
858119e1 225static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
226{
227 if (ci->flags & CPUFREQ_CONST_LOOPS)
228 return;
229
230 if (!l_p_j_ref_freq) {
231 l_p_j_ref = loops_per_jiffy;
232 l_p_j_ref_freq = ci->old;
2d06d8c4 233 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 234 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 235 }
d08de0c1 236 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 237 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
238 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
239 ci->new);
2d06d8c4 240 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 241 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
242 }
243}
244#else
e08f5f5b
GS
245static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
246{
247 return;
248}
1da177e4
LT
249#endif
250
251
b43a7ffb
VK
252void __cpufreq_notify_transition(struct cpufreq_policy *policy,
253 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
254{
255 BUG_ON(irqs_disabled());
256
d5aaffa9
DB
257 if (cpufreq_disabled())
258 return;
259
1c3d85dd 260 freqs->flags = cpufreq_driver->flags;
2d06d8c4 261 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 262 state, freqs->new);
1da177e4 263
1da177e4 264 switch (state) {
e4472cb3 265
1da177e4 266 case CPUFREQ_PRECHANGE:
32ee8c3e 267 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
268 * which is not equal to what the cpufreq core thinks is
269 * "old frequency".
1da177e4 270 */
1c3d85dd 271 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
272 if ((policy) && (policy->cpu == freqs->cpu) &&
273 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 274 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
275 " %u, cpufreq assumed %u kHz.\n",
276 freqs->old, policy->cur);
277 freqs->old = policy->cur;
1da177e4
LT
278 }
279 }
b4dfdbb3 280 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 281 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
282 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
283 break;
e4472cb3 284
1da177e4
LT
285 case CPUFREQ_POSTCHANGE:
286 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 287 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 288 (unsigned long)freqs->cpu);
25e41933 289 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 290 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 291 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
292 if (likely(policy) && likely(policy->cpu == freqs->cpu))
293 policy->cur = freqs->new;
1da177e4
LT
294 break;
295 }
1da177e4 296}
b43a7ffb
VK
297/**
298 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
299 * on frequency transition.
300 *
301 * This function calls the transition notifiers and the "adjust_jiffies"
302 * function. It is called twice on all CPU frequency changes that have
303 * external effects.
304 */
305void cpufreq_notify_transition(struct cpufreq_policy *policy,
306 struct cpufreq_freqs *freqs, unsigned int state)
307{
308 for_each_cpu(freqs->cpu, policy->cpus)
309 __cpufreq_notify_transition(policy, freqs, state);
310}
1da177e4
LT
311EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
312
313
314
315/*********************************************************************
316 * SYSFS INTERFACE *
317 *********************************************************************/
318
3bcb09a3
JF
319static struct cpufreq_governor *__find_governor(const char *str_governor)
320{
321 struct cpufreq_governor *t;
322
323 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 324 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
325 return t;
326
327 return NULL;
328}
329
1da177e4
LT
330/**
331 * cpufreq_parse_governor - parse a governor string
332 */
905d77cd 333static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
334 struct cpufreq_governor **governor)
335{
3bcb09a3 336 int err = -EINVAL;
1c3d85dd
RW
337
338 if (!cpufreq_driver)
3bcb09a3
JF
339 goto out;
340
1c3d85dd 341 if (cpufreq_driver->setpolicy) {
1da177e4
LT
342 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
343 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 344 err = 0;
e08f5f5b
GS
345 } else if (!strnicmp(str_governor, "powersave",
346 CPUFREQ_NAME_LEN)) {
1da177e4 347 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 348 err = 0;
1da177e4 349 }
1c3d85dd 350 } else if (cpufreq_driver->target) {
1da177e4 351 struct cpufreq_governor *t;
3bcb09a3 352
3fc54d37 353 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
354
355 t = __find_governor(str_governor);
356
ea714970 357 if (t == NULL) {
1a8e1463 358 int ret;
ea714970 359
1a8e1463
KC
360 mutex_unlock(&cpufreq_governor_mutex);
361 ret = request_module("cpufreq_%s", str_governor);
362 mutex_lock(&cpufreq_governor_mutex);
ea714970 363
1a8e1463
KC
364 if (ret == 0)
365 t = __find_governor(str_governor);
ea714970
JF
366 }
367
3bcb09a3
JF
368 if (t != NULL) {
369 *governor = t;
370 err = 0;
1da177e4 371 }
3bcb09a3 372
3fc54d37 373 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 374 }
29464f28 375out:
3bcb09a3 376 return err;
1da177e4 377}
1da177e4
LT
378
379
1da177e4 380/**
e08f5f5b
GS
381 * cpufreq_per_cpu_attr_read() / show_##file_name() -
382 * print out cpufreq information
1da177e4
LT
383 *
384 * Write out information from cpufreq_driver->policy[cpu]; object must be
385 * "unsigned int".
386 */
387
32ee8c3e
DJ
388#define show_one(file_name, object) \
389static ssize_t show_##file_name \
905d77cd 390(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 391{ \
29464f28 392 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
393}
394
395show_one(cpuinfo_min_freq, cpuinfo.min_freq);
396show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 397show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
398show_one(scaling_min_freq, min);
399show_one(scaling_max_freq, max);
400show_one(scaling_cur_freq, cur);
401
e08f5f5b
GS
402static int __cpufreq_set_policy(struct cpufreq_policy *data,
403 struct cpufreq_policy *policy);
7970e08b 404
1da177e4
LT
405/**
406 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
407 */
408#define store_one(file_name, object) \
409static ssize_t store_##file_name \
905d77cd 410(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 411{ \
f55c9c26 412 unsigned int ret; \
1da177e4
LT
413 struct cpufreq_policy new_policy; \
414 \
415 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
416 if (ret) \
417 return -EINVAL; \
418 \
29464f28 419 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
420 if (ret != 1) \
421 return -EINVAL; \
422 \
7970e08b
TR
423 ret = __cpufreq_set_policy(policy, &new_policy); \
424 policy->user_policy.object = policy->object; \
1da177e4
LT
425 \
426 return ret ? ret : count; \
427}
428
29464f28
DJ
429store_one(scaling_min_freq, min);
430store_one(scaling_max_freq, max);
1da177e4
LT
431
432/**
433 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
434 */
905d77cd
DJ
435static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
436 char *buf)
1da177e4 437{
5a01f2e8 438 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
439 if (!cur_freq)
440 return sprintf(buf, "<unknown>");
441 return sprintf(buf, "%u\n", cur_freq);
442}
443
444
445/**
446 * show_scaling_governor - show the current policy for the specified CPU
447 */
905d77cd 448static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 449{
29464f28 450 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
451 return sprintf(buf, "powersave\n");
452 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
453 return sprintf(buf, "performance\n");
454 else if (policy->governor)
4b972f0b 455 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 456 policy->governor->name);
1da177e4
LT
457 return -EINVAL;
458}
459
460
461/**
462 * store_scaling_governor - store policy for the specified CPU
463 */
905d77cd
DJ
464static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
465 const char *buf, size_t count)
1da177e4 466{
f55c9c26 467 unsigned int ret;
1da177e4
LT
468 char str_governor[16];
469 struct cpufreq_policy new_policy;
470
471 ret = cpufreq_get_policy(&new_policy, policy->cpu);
472 if (ret)
473 return ret;
474
29464f28 475 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
476 if (ret != 1)
477 return -EINVAL;
478
e08f5f5b
GS
479 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
480 &new_policy.governor))
1da177e4
LT
481 return -EINVAL;
482
7970e08b
TR
483 /* Do not use cpufreq_set_policy here or the user_policy.max
484 will be wrongly overridden */
7970e08b
TR
485 ret = __cpufreq_set_policy(policy, &new_policy);
486
487 policy->user_policy.policy = policy->policy;
488 policy->user_policy.governor = policy->governor;
7970e08b 489
e08f5f5b
GS
490 if (ret)
491 return ret;
492 else
493 return count;
1da177e4
LT
494}
495
496/**
497 * show_scaling_driver - show the cpufreq driver currently loaded
498 */
905d77cd 499static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 500{
1c3d85dd 501 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
502}
503
504/**
505 * show_scaling_available_governors - show the available CPUfreq governors
506 */
905d77cd
DJ
507static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
508 char *buf)
1da177e4
LT
509{
510 ssize_t i = 0;
511 struct cpufreq_governor *t;
512
1c3d85dd 513 if (!cpufreq_driver->target) {
1da177e4
LT
514 i += sprintf(buf, "performance powersave");
515 goto out;
516 }
517
518 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
519 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
520 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 521 goto out;
4b972f0b 522 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 523 }
7d5e350f 524out:
1da177e4
LT
525 i += sprintf(&buf[i], "\n");
526 return i;
527}
e8628dd0 528
835481d9 529static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
530{
531 ssize_t i = 0;
532 unsigned int cpu;
533
835481d9 534 for_each_cpu(cpu, mask) {
1da177e4
LT
535 if (i)
536 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
537 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
538 if (i >= (PAGE_SIZE - 5))
29464f28 539 break;
1da177e4
LT
540 }
541 i += sprintf(&buf[i], "\n");
542 return i;
543}
544
e8628dd0
DW
545/**
546 * show_related_cpus - show the CPUs affected by each transition even if
547 * hw coordination is in use
548 */
549static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
550{
e8628dd0
DW
551 return show_cpus(policy->related_cpus, buf);
552}
553
554/**
555 * show_affected_cpus - show the CPUs affected by each transition
556 */
557static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
558{
559 return show_cpus(policy->cpus, buf);
560}
561
9e76988e 562static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 563 const char *buf, size_t count)
9e76988e
VP
564{
565 unsigned int freq = 0;
566 unsigned int ret;
567
879000f9 568 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
569 return -EINVAL;
570
571 ret = sscanf(buf, "%u", &freq);
572 if (ret != 1)
573 return -EINVAL;
574
575 policy->governor->store_setspeed(policy, freq);
576
577 return count;
578}
579
580static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
581{
879000f9 582 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
583 return sprintf(buf, "<unsupported>\n");
584
585 return policy->governor->show_setspeed(policy, buf);
586}
1da177e4 587
e2f74f35 588/**
8bf1ac72 589 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
590 */
591static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
592{
593 unsigned int limit;
594 int ret;
1c3d85dd
RW
595 if (cpufreq_driver->bios_limit) {
596 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
597 if (!ret)
598 return sprintf(buf, "%u\n", limit);
599 }
600 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
601}
602
6dad2a29
BP
603cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
604cpufreq_freq_attr_ro(cpuinfo_min_freq);
605cpufreq_freq_attr_ro(cpuinfo_max_freq);
606cpufreq_freq_attr_ro(cpuinfo_transition_latency);
607cpufreq_freq_attr_ro(scaling_available_governors);
608cpufreq_freq_attr_ro(scaling_driver);
609cpufreq_freq_attr_ro(scaling_cur_freq);
610cpufreq_freq_attr_ro(bios_limit);
611cpufreq_freq_attr_ro(related_cpus);
612cpufreq_freq_attr_ro(affected_cpus);
613cpufreq_freq_attr_rw(scaling_min_freq);
614cpufreq_freq_attr_rw(scaling_max_freq);
615cpufreq_freq_attr_rw(scaling_governor);
616cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 617
905d77cd 618static struct attribute *default_attrs[] = {
1da177e4
LT
619 &cpuinfo_min_freq.attr,
620 &cpuinfo_max_freq.attr,
ed129784 621 &cpuinfo_transition_latency.attr,
1da177e4
LT
622 &scaling_min_freq.attr,
623 &scaling_max_freq.attr,
624 &affected_cpus.attr,
e8628dd0 625 &related_cpus.attr,
1da177e4
LT
626 &scaling_governor.attr,
627 &scaling_driver.attr,
628 &scaling_available_governors.attr,
9e76988e 629 &scaling_setspeed.attr,
1da177e4
LT
630 NULL
631};
632
8aa84ad8
TR
633struct kobject *cpufreq_global_kobject;
634EXPORT_SYMBOL(cpufreq_global_kobject);
635
29464f28
DJ
636#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
637#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 638
29464f28 639static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 640{
905d77cd
DJ
641 struct cpufreq_policy *policy = to_policy(kobj);
642 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 643 ssize_t ret = -EINVAL;
a9144436 644 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 645 if (!policy)
0db4a8a9 646 goto no_policy;
5a01f2e8
VP
647
648 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 649 goto fail;
5a01f2e8 650
e08f5f5b
GS
651 if (fattr->show)
652 ret = fattr->show(policy, buf);
653 else
654 ret = -EIO;
655
5a01f2e8 656 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 657fail:
a9144436 658 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 659no_policy:
1da177e4
LT
660 return ret;
661}
662
905d77cd
DJ
663static ssize_t store(struct kobject *kobj, struct attribute *attr,
664 const char *buf, size_t count)
1da177e4 665{
905d77cd
DJ
666 struct cpufreq_policy *policy = to_policy(kobj);
667 struct freq_attr *fattr = to_attr(attr);
a07530b4 668 ssize_t ret = -EINVAL;
a9144436 669 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 670 if (!policy)
a07530b4 671 goto no_policy;
5a01f2e8
VP
672
673 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 674 goto fail;
5a01f2e8 675
e08f5f5b
GS
676 if (fattr->store)
677 ret = fattr->store(policy, buf, count);
678 else
679 ret = -EIO;
680
5a01f2e8 681 unlock_policy_rwsem_write(policy->cpu);
a07530b4 682fail:
a9144436 683 cpufreq_cpu_put_sysfs(policy);
a07530b4 684no_policy:
1da177e4
LT
685 return ret;
686}
687
905d77cd 688static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 689{
905d77cd 690 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 691 pr_debug("last reference is dropped\n");
1da177e4
LT
692 complete(&policy->kobj_unregister);
693}
694
52cf25d0 695static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
696 .show = show,
697 .store = store,
698};
699
700static struct kobj_type ktype_cpufreq = {
701 .sysfs_ops = &sysfs_ops,
702 .default_attrs = default_attrs,
703 .release = cpufreq_sysfs_release,
704};
705
19d6f7ec 706/* symlink affected CPUs */
cf3289d0
AC
707static int cpufreq_add_dev_symlink(unsigned int cpu,
708 struct cpufreq_policy *policy)
19d6f7ec
DJ
709{
710 unsigned int j;
711 int ret = 0;
712
713 for_each_cpu(j, policy->cpus) {
714 struct cpufreq_policy *managed_policy;
8a25a2fd 715 struct device *cpu_dev;
19d6f7ec
DJ
716
717 if (j == cpu)
718 continue;
19d6f7ec 719
2d06d8c4 720 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 721 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
722 cpu_dev = get_cpu_device(j);
723 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
724 "cpufreq");
725 if (ret) {
726 cpufreq_cpu_put(managed_policy);
727 return ret;
728 }
729 }
730 return ret;
731}
732
cf3289d0
AC
733static int cpufreq_add_dev_interface(unsigned int cpu,
734 struct cpufreq_policy *policy,
8a25a2fd 735 struct device *dev)
909a694e 736{
ecf7e461 737 struct cpufreq_policy new_policy;
909a694e
DJ
738 struct freq_attr **drv_attr;
739 unsigned long flags;
740 int ret = 0;
741 unsigned int j;
742
743 /* prepare interface data */
744 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 745 &dev->kobj, "cpufreq");
909a694e
DJ
746 if (ret)
747 return ret;
748
749 /* set up files for this cpu device */
1c3d85dd 750 drv_attr = cpufreq_driver->attr;
909a694e
DJ
751 while ((drv_attr) && (*drv_attr)) {
752 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
753 if (ret)
1c3d85dd 754 goto err_out_kobj_put;
909a694e
DJ
755 drv_attr++;
756 }
1c3d85dd 757 if (cpufreq_driver->get) {
909a694e
DJ
758 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
759 if (ret)
1c3d85dd 760 goto err_out_kobj_put;
909a694e 761 }
1c3d85dd 762 if (cpufreq_driver->target) {
909a694e
DJ
763 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
764 if (ret)
1c3d85dd 765 goto err_out_kobj_put;
909a694e 766 }
1c3d85dd 767 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
768 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
769 if (ret)
1c3d85dd 770 goto err_out_kobj_put;
e2f74f35 771 }
909a694e 772
0d1857a1 773 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 774 for_each_cpu(j, policy->cpus) {
909a694e 775 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 776 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 777 }
0d1857a1 778 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
779
780 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
781 if (ret)
782 goto err_out_kobj_put;
783
784 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
785 /* assure that the starting sequence is run in __cpufreq_set_policy */
786 policy->governor = NULL;
787
788 /* set default policy */
789 ret = __cpufreq_set_policy(policy, &new_policy);
790 policy->user_policy.policy = policy->policy;
791 policy->user_policy.governor = policy->governor;
792
793 if (ret) {
2d06d8c4 794 pr_debug("setting policy failed\n");
1c3d85dd
RW
795 if (cpufreq_driver->exit)
796 cpufreq_driver->exit(policy);
ecf7e461 797 }
909a694e
DJ
798 return ret;
799
800err_out_kobj_put:
801 kobject_put(&policy->kobj);
802 wait_for_completion(&policy->kobj_unregister);
803 return ret;
804}
805
fcf80582
VK
806#ifdef CONFIG_HOTPLUG_CPU
807static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
808 struct device *dev)
809{
810 struct cpufreq_policy *policy;
1c3d85dd 811 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
812 unsigned long flags;
813
814 policy = cpufreq_cpu_get(sibling);
815 WARN_ON(!policy);
816
820c6ca2
VK
817 if (has_target)
818 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 819
2eaa3e2d
VK
820 lock_policy_rwsem_write(sibling);
821
0d1857a1 822 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 823
fcf80582 824 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 825 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 826 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 827 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 828
2eaa3e2d
VK
829 unlock_policy_rwsem_write(sibling);
830
820c6ca2
VK
831 if (has_target) {
832 __cpufreq_governor(policy, CPUFREQ_GOV_START);
833 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
834 }
fcf80582 835
fcf80582
VK
836 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
837 if (ret) {
838 cpufreq_cpu_put(policy);
839 return ret;
840 }
841
842 return 0;
843}
844#endif
1da177e4
LT
845
846/**
847 * cpufreq_add_dev - add a CPU device
848 *
32ee8c3e 849 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
850 *
851 * The Oracle says: try running cpufreq registration/unregistration concurrently
852 * with with cpu hotplugging and all hell will break loose. Tried to clean this
853 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 854 */
8a25a2fd 855static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 856{
fcf80582 857 unsigned int j, cpu = dev->id;
65922465 858 int ret = -ENOMEM;
1da177e4 859 struct cpufreq_policy *policy;
1da177e4 860 unsigned long flags;
90e41bac 861#ifdef CONFIG_HOTPLUG_CPU
fcf80582 862 struct cpufreq_governor *gov;
90e41bac
PB
863 int sibling;
864#endif
1da177e4 865
c32b6b8e
AR
866 if (cpu_is_offline(cpu))
867 return 0;
868
2d06d8c4 869 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
870
871#ifdef CONFIG_SMP
872 /* check whether a different CPU already registered this
873 * CPU because it is in the same boat. */
874 policy = cpufreq_cpu_get(cpu);
875 if (unlikely(policy)) {
8ff69732 876 cpufreq_cpu_put(policy);
1da177e4
LT
877 return 0;
878 }
fcf80582
VK
879
880#ifdef CONFIG_HOTPLUG_CPU
881 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 882 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
883 for_each_online_cpu(sibling) {
884 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 885 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 886 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 887 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 888 }
fcf80582 889 }
0d1857a1 890 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 891#endif
1da177e4
LT
892#endif
893
1c3d85dd 894 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
895 ret = -EINVAL;
896 goto module_out;
897 }
898
e98df50c 899 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 900 if (!policy)
1da177e4 901 goto nomem_out;
059019a3
DJ
902
903 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 904 goto err_free_policy;
059019a3
DJ
905
906 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 907 goto err_free_cpumask;
1da177e4
LT
908
909 policy->cpu = cpu;
65922465 910 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 911 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 912
5a01f2e8 913 /* Initially set CPU itself as the policy_cpu */
f1625066 914 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 915
1da177e4 916 init_completion(&policy->kobj_unregister);
65f27f38 917 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
918
919 /* call driver. From then on the cpufreq must be able
920 * to accept all calls to ->verify and ->setpolicy for this CPU
921 */
1c3d85dd 922 ret = cpufreq_driver->init(policy);
1da177e4 923 if (ret) {
2d06d8c4 924 pr_debug("initialization failed\n");
2eaa3e2d 925 goto err_set_policy_cpu;
1da177e4 926 }
643ae6e8 927
fcf80582
VK
928 /* related cpus should atleast have policy->cpus */
929 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
930
643ae6e8
VK
931 /*
932 * affected cpus must always be the one, which are online. We aren't
933 * managing offline cpus here.
934 */
935 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
936
187d9f4e
MC
937 policy->user_policy.min = policy->min;
938 policy->user_policy.max = policy->max;
1da177e4 939
a1531acd
TR
940 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
941 CPUFREQ_START, policy);
942
fcf80582
VK
943#ifdef CONFIG_HOTPLUG_CPU
944 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
945 if (gov) {
946 policy->governor = gov;
947 pr_debug("Restoring governor %s for cpu %d\n",
948 policy->governor->name, cpu);
4bfa042c 949 }
fcf80582 950#endif
1da177e4 951
8a25a2fd 952 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
953 if (ret)
954 goto err_out_unregister;
8ff69732 955
038c5b3e 956 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 957 module_put(cpufreq_driver->owner);
2d06d8c4 958 pr_debug("initialization complete\n");
87c32271 959
1da177e4
LT
960 return 0;
961
1da177e4 962err_out_unregister:
0d1857a1 963 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 964 for_each_cpu(j, policy->cpus)
7a6aedfa 965 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 966 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 967
c10997f6 968 kobject_put(&policy->kobj);
1da177e4
LT
969 wait_for_completion(&policy->kobj_unregister);
970
2eaa3e2d
VK
971err_set_policy_cpu:
972 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 973 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
974err_free_cpumask:
975 free_cpumask_var(policy->cpus);
976err_free_policy:
1da177e4 977 kfree(policy);
1da177e4 978nomem_out:
1c3d85dd 979 module_put(cpufreq_driver->owner);
c32b6b8e 980module_out:
1da177e4
LT
981 return ret;
982}
983
b8eed8af
VK
984static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
985{
986 int j;
987
988 policy->last_cpu = policy->cpu;
989 policy->cpu = cpu;
990
3361b7b1 991 for_each_cpu(j, policy->cpus)
b8eed8af 992 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
993
994#ifdef CONFIG_CPU_FREQ_TABLE
995 cpufreq_frequency_table_update_policy_cpu(policy);
996#endif
997 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
998 CPUFREQ_UPDATE_POLICY_CPU, policy);
999}
1da177e4
LT
1000
1001/**
5a01f2e8 1002 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1003 *
1004 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1005 * Caller should already have policy_rwsem in write mode for this CPU.
1006 * This routine frees the rwsem before returning.
1da177e4 1007 */
8a25a2fd 1008static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1009{
b8eed8af 1010 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1011 unsigned long flags;
1012 struct cpufreq_policy *data;
499bca9b
AW
1013 struct kobject *kobj;
1014 struct completion *cmp;
8a25a2fd 1015 struct device *cpu_dev;
1da177e4 1016
b8eed8af 1017 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1018
0d1857a1 1019 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1020
7a6aedfa 1021 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1022 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1023
0d1857a1 1024 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1025
1026 if (!data) {
b8eed8af 1027 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1028 return -EINVAL;
1029 }
1da177e4 1030
1c3d85dd 1031 if (cpufreq_driver->target)
f6a7409c 1032 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1033
084f3493 1034#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1035 if (!cpufreq_driver->setpolicy)
fa69e33f
DB
1036 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1037 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1038#endif
1039
2eaa3e2d 1040 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1041 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1042
1043 if (cpus > 1)
1044 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1045 unlock_policy_rwsem_write(cpu);
084f3493 1046
73bf0fc2
VK
1047 if (cpu != data->cpu) {
1048 sysfs_remove_link(&dev->kobj, "cpufreq");
1049 } else if (cpus > 1) {
b8eed8af
VK
1050 /* first sibling now owns the new sysfs dir */
1051 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1052 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1053 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1054 if (ret) {
1055 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1056
2eaa3e2d 1057 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1058 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1059
0d1857a1 1060 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1061 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1062 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1063
499bca9b 1064 unlock_policy_rwsem_write(cpu);
1da177e4 1065
2eaa3e2d
VK
1066 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1067 "cpufreq");
b8eed8af 1068 return -EINVAL;
1da177e4 1069 }
5a01f2e8 1070
2eaa3e2d 1071 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1072 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1073 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1074 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1075 __func__, cpu_dev->id, cpu);
1da177e4 1076 }
1da177e4 1077
d96038e0
VK
1078 if ((cpus == 1) && (cpufreq_driver->target))
1079 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1080
b8eed8af
VK
1081 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1082 cpufreq_cpu_put(data);
1da177e4 1083
b8eed8af
VK
1084 /* If cpu is last user of policy, free policy */
1085 if (cpus == 1) {
2eaa3e2d 1086 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1087 kobj = &data->kobj;
1088 cmp = &data->kobj_unregister;
2eaa3e2d 1089 unlock_policy_rwsem_read(cpu);
b8eed8af 1090 kobject_put(kobj);
7d26e2d5 1091
b8eed8af
VK
1092 /* we need to make sure that the underlying kobj is actually
1093 * not referenced anymore by anybody before we proceed with
1094 * unloading.
1095 */
1096 pr_debug("waiting for dropping of refcount\n");
1097 wait_for_completion(cmp);
1098 pr_debug("wait complete\n");
7d26e2d5 1099
1c3d85dd
RW
1100 if (cpufreq_driver->exit)
1101 cpufreq_driver->exit(data);
27ecddc2 1102
b8eed8af
VK
1103 free_cpumask_var(data->related_cpus);
1104 free_cpumask_var(data->cpus);
1105 kfree(data);
1c3d85dd 1106 } else if (cpufreq_driver->target) {
b8eed8af
VK
1107 __cpufreq_governor(data, CPUFREQ_GOV_START);
1108 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
27ecddc2 1109 }
1da177e4 1110
2eaa3e2d 1111 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1112 return 0;
1113}
1114
1115
8a25a2fd 1116static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1117{
8a25a2fd 1118 unsigned int cpu = dev->id;
5a01f2e8 1119 int retval;
ec28297a
VP
1120
1121 if (cpu_is_offline(cpu))
1122 return 0;
1123
8a25a2fd 1124 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1125 return retval;
1126}
1127
1128
65f27f38 1129static void handle_update(struct work_struct *work)
1da177e4 1130{
65f27f38
DH
1131 struct cpufreq_policy *policy =
1132 container_of(work, struct cpufreq_policy, update);
1133 unsigned int cpu = policy->cpu;
2d06d8c4 1134 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1135 cpufreq_update_policy(cpu);
1136}
1137
1138/**
1139 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1140 * @cpu: cpu number
1141 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1142 * @new_freq: CPU frequency the CPU actually runs at
1143 *
29464f28
DJ
1144 * We adjust to current frequency first, and need to clean up later.
1145 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1146 */
e08f5f5b
GS
1147static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1148 unsigned int new_freq)
1da177e4 1149{
b43a7ffb 1150 struct cpufreq_policy *policy;
1da177e4 1151 struct cpufreq_freqs freqs;
b43a7ffb
VK
1152 unsigned long flags;
1153
1da177e4 1154
2d06d8c4 1155 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1156 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1157
1da177e4
LT
1158 freqs.old = old_freq;
1159 freqs.new = new_freq;
b43a7ffb
VK
1160
1161 read_lock_irqsave(&cpufreq_driver_lock, flags);
1162 policy = per_cpu(cpufreq_cpu_data, cpu);
1163 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1164
1165 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1166 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1167}
1168
1169
32ee8c3e 1170/**
4ab70df4 1171 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1172 * @cpu: CPU number
1173 *
1174 * This is the last known freq, without actually getting it from the driver.
1175 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1176 */
1177unsigned int cpufreq_quick_get(unsigned int cpu)
1178{
9e21ba8b 1179 struct cpufreq_policy *policy;
e08f5f5b 1180 unsigned int ret_freq = 0;
95235ca2 1181
1c3d85dd
RW
1182 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1183 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1184
1185 policy = cpufreq_cpu_get(cpu);
95235ca2 1186 if (policy) {
e08f5f5b 1187 ret_freq = policy->cur;
95235ca2
VP
1188 cpufreq_cpu_put(policy);
1189 }
1190
4d34a67d 1191 return ret_freq;
95235ca2
VP
1192}
1193EXPORT_SYMBOL(cpufreq_quick_get);
1194
3d737108
JB
1195/**
1196 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1197 * @cpu: CPU number
1198 *
1199 * Just return the max possible frequency for a given CPU.
1200 */
1201unsigned int cpufreq_quick_get_max(unsigned int cpu)
1202{
1203 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1204 unsigned int ret_freq = 0;
1205
1206 if (policy) {
1207 ret_freq = policy->max;
1208 cpufreq_cpu_put(policy);
1209 }
1210
1211 return ret_freq;
1212}
1213EXPORT_SYMBOL(cpufreq_quick_get_max);
1214
95235ca2 1215
5a01f2e8 1216static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1217{
7a6aedfa 1218 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1219 unsigned int ret_freq = 0;
5800043b 1220
1c3d85dd 1221 if (!cpufreq_driver->get)
4d34a67d 1222 return ret_freq;
1da177e4 1223
1c3d85dd 1224 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1225
e08f5f5b 1226 if (ret_freq && policy->cur &&
1c3d85dd 1227 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1228 /* verify no discrepancy between actual and
1229 saved value exists */
1230 if (unlikely(ret_freq != policy->cur)) {
1231 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1232 schedule_work(&policy->update);
1233 }
1234 }
1235
4d34a67d 1236 return ret_freq;
5a01f2e8 1237}
1da177e4 1238
5a01f2e8
VP
1239/**
1240 * cpufreq_get - get the current CPU frequency (in kHz)
1241 * @cpu: CPU number
1242 *
1243 * Get the CPU current (static) CPU frequency
1244 */
1245unsigned int cpufreq_get(unsigned int cpu)
1246{
1247 unsigned int ret_freq = 0;
1248 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1249
1250 if (!policy)
1251 goto out;
1252
1253 if (unlikely(lock_policy_rwsem_read(cpu)))
1254 goto out_policy;
1255
1256 ret_freq = __cpufreq_get(cpu);
1257
1258 unlock_policy_rwsem_read(cpu);
1da177e4 1259
5a01f2e8
VP
1260out_policy:
1261 cpufreq_cpu_put(policy);
1262out:
4d34a67d 1263 return ret_freq;
1da177e4
LT
1264}
1265EXPORT_SYMBOL(cpufreq_get);
1266
8a25a2fd
KS
1267static struct subsys_interface cpufreq_interface = {
1268 .name = "cpufreq",
1269 .subsys = &cpu_subsys,
1270 .add_dev = cpufreq_add_dev,
1271 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1272};
1273
1da177e4 1274
42d4dc3f 1275/**
e00e56df
RW
1276 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1277 *
1278 * This function is only executed for the boot processor. The other CPUs
1279 * have been put offline by means of CPU hotplug.
42d4dc3f 1280 */
e00e56df 1281static int cpufreq_bp_suspend(void)
42d4dc3f 1282{
e08f5f5b 1283 int ret = 0;
4bc5d341 1284
e00e56df 1285 int cpu = smp_processor_id();
42d4dc3f
BH
1286 struct cpufreq_policy *cpu_policy;
1287
2d06d8c4 1288 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1289
e00e56df 1290 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1291 cpu_policy = cpufreq_cpu_get(cpu);
1292 if (!cpu_policy)
e00e56df 1293 return 0;
42d4dc3f 1294
1c3d85dd
RW
1295 if (cpufreq_driver->suspend) {
1296 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1297 if (ret)
42d4dc3f
BH
1298 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1299 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1300 }
1301
42d4dc3f 1302 cpufreq_cpu_put(cpu_policy);
c9060494 1303 return ret;
42d4dc3f
BH
1304}
1305
1da177e4 1306/**
e00e56df 1307 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1308 *
1309 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1310 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1311 * restored. It will verify that the current freq is in sync with
1312 * what we believe it to be. This is a bit later than when it
1313 * should be, but nonethteless it's better than calling
1314 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1315 *
1316 * This function is only executed for the boot CPU. The other CPUs have not
1317 * been turned on yet.
1da177e4 1318 */
e00e56df 1319static void cpufreq_bp_resume(void)
1da177e4 1320{
e08f5f5b 1321 int ret = 0;
4bc5d341 1322
e00e56df 1323 int cpu = smp_processor_id();
1da177e4
LT
1324 struct cpufreq_policy *cpu_policy;
1325
2d06d8c4 1326 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1327
e00e56df 1328 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1329 cpu_policy = cpufreq_cpu_get(cpu);
1330 if (!cpu_policy)
e00e56df 1331 return;
1da177e4 1332
1c3d85dd
RW
1333 if (cpufreq_driver->resume) {
1334 ret = cpufreq_driver->resume(cpu_policy);
1da177e4
LT
1335 if (ret) {
1336 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1337 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1338 goto fail;
1da177e4
LT
1339 }
1340 }
1341
1da177e4 1342 schedule_work(&cpu_policy->update);
ce6c3997 1343
c9060494 1344fail:
1da177e4 1345 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1346}
1347
e00e56df
RW
1348static struct syscore_ops cpufreq_syscore_ops = {
1349 .suspend = cpufreq_bp_suspend,
1350 .resume = cpufreq_bp_resume,
1da177e4
LT
1351};
1352
9d95046e
BP
1353/**
1354 * cpufreq_get_current_driver - return current driver's name
1355 *
1356 * Return the name string of the currently loaded cpufreq driver
1357 * or NULL, if none.
1358 */
1359const char *cpufreq_get_current_driver(void)
1360{
1c3d85dd
RW
1361 if (cpufreq_driver)
1362 return cpufreq_driver->name;
1363
1364 return NULL;
9d95046e
BP
1365}
1366EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1367
1368/*********************************************************************
1369 * NOTIFIER LISTS INTERFACE *
1370 *********************************************************************/
1371
1372/**
1373 * cpufreq_register_notifier - register a driver with cpufreq
1374 * @nb: notifier function to register
1375 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1376 *
32ee8c3e 1377 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1378 * are notified about clock rate changes (once before and once after
1379 * the transition), or a list of drivers that are notified about
1380 * changes in cpufreq policy.
1381 *
1382 * This function may sleep, and has the same return conditions as
e041c683 1383 * blocking_notifier_chain_register.
1da177e4
LT
1384 */
1385int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1386{
1387 int ret;
1388
d5aaffa9
DB
1389 if (cpufreq_disabled())
1390 return -EINVAL;
1391
74212ca4
CEB
1392 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1393
1da177e4
LT
1394 switch (list) {
1395 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1396 ret = srcu_notifier_chain_register(
e041c683 1397 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1398 break;
1399 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1400 ret = blocking_notifier_chain_register(
1401 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1402 break;
1403 default:
1404 ret = -EINVAL;
1405 }
1da177e4
LT
1406
1407 return ret;
1408}
1409EXPORT_SYMBOL(cpufreq_register_notifier);
1410
1411
1412/**
1413 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1414 * @nb: notifier block to be unregistered
1415 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1416 *
1417 * Remove a driver from the CPU frequency notifier list.
1418 *
1419 * This function may sleep, and has the same return conditions as
e041c683 1420 * blocking_notifier_chain_unregister.
1da177e4
LT
1421 */
1422int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1423{
1424 int ret;
1425
d5aaffa9
DB
1426 if (cpufreq_disabled())
1427 return -EINVAL;
1428
1da177e4
LT
1429 switch (list) {
1430 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1431 ret = srcu_notifier_chain_unregister(
e041c683 1432 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1433 break;
1434 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1435 ret = blocking_notifier_chain_unregister(
1436 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1437 break;
1438 default:
1439 ret = -EINVAL;
1440 }
1da177e4
LT
1441
1442 return ret;
1443}
1444EXPORT_SYMBOL(cpufreq_unregister_notifier);
1445
1446
1447/*********************************************************************
1448 * GOVERNORS *
1449 *********************************************************************/
1450
1451
1452int __cpufreq_driver_target(struct cpufreq_policy *policy,
1453 unsigned int target_freq,
1454 unsigned int relation)
1455{
1456 int retval = -EINVAL;
7249924e 1457 unsigned int old_target_freq = target_freq;
c32b6b8e 1458
a7b422cd
KRW
1459 if (cpufreq_disabled())
1460 return -ENODEV;
1461
7249924e
VK
1462 /* Make sure that target_freq is within supported range */
1463 if (target_freq > policy->max)
1464 target_freq = policy->max;
1465 if (target_freq < policy->min)
1466 target_freq = policy->min;
1467
1468 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1469 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1470
1471 if (target_freq == policy->cur)
1472 return 0;
1473
1c3d85dd
RW
1474 if (cpufreq_driver->target)
1475 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1476
1da177e4
LT
1477 return retval;
1478}
1479EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1480
1da177e4
LT
1481int cpufreq_driver_target(struct cpufreq_policy *policy,
1482 unsigned int target_freq,
1483 unsigned int relation)
1484{
f1829e4a 1485 int ret = -EINVAL;
1da177e4
LT
1486
1487 policy = cpufreq_cpu_get(policy->cpu);
1488 if (!policy)
f1829e4a 1489 goto no_policy;
1da177e4 1490
5a01f2e8 1491 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1492 goto fail;
1da177e4
LT
1493
1494 ret = __cpufreq_driver_target(policy, target_freq, relation);
1495
5a01f2e8 1496 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1497
f1829e4a 1498fail:
1da177e4 1499 cpufreq_cpu_put(policy);
f1829e4a 1500no_policy:
1da177e4
LT
1501 return ret;
1502}
1503EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1504
bf0b90e3 1505int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1506{
1507 int ret = 0;
1508
d5aaffa9
DB
1509 if (cpufreq_disabled())
1510 return ret;
1511
1c3d85dd 1512 if (!cpufreq_driver->getavg)
0676f7f2
VK
1513 return 0;
1514
dfde5d62
VP
1515 policy = cpufreq_cpu_get(policy->cpu);
1516 if (!policy)
1517 return -EINVAL;
1518
1c3d85dd 1519 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1520
dfde5d62
VP
1521 cpufreq_cpu_put(policy);
1522 return ret;
1523}
5a01f2e8 1524EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1525
153d7f3f 1526/*
153d7f3f
AV
1527 * when "event" is CPUFREQ_GOV_LIMITS
1528 */
1da177e4 1529
e08f5f5b
GS
1530static int __cpufreq_governor(struct cpufreq_policy *policy,
1531 unsigned int event)
1da177e4 1532{
cc993cab 1533 int ret;
6afde10c
TR
1534
1535 /* Only must be defined when default governor is known to have latency
1536 restrictions, like e.g. conservative or ondemand.
1537 That this is the case is already ensured in Kconfig
1538 */
1539#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1540 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1541#else
1542 struct cpufreq_governor *gov = NULL;
1543#endif
1c256245
TR
1544
1545 if (policy->governor->max_transition_latency &&
1546 policy->cpuinfo.transition_latency >
1547 policy->governor->max_transition_latency) {
6afde10c
TR
1548 if (!gov)
1549 return -EINVAL;
1550 else {
1551 printk(KERN_WARNING "%s governor failed, too long"
1552 " transition latency of HW, fallback"
1553 " to %s governor\n",
1554 policy->governor->name,
1555 gov->name);
1556 policy->governor = gov;
1557 }
1c256245 1558 }
1da177e4
LT
1559
1560 if (!try_module_get(policy->governor->owner))
1561 return -EINVAL;
1562
2d06d8c4 1563 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1564 policy->cpu, event);
1da177e4
LT
1565 ret = policy->governor->governor(policy, event);
1566
4d5dcc42
VK
1567 if (!ret) {
1568 if (event == CPUFREQ_GOV_POLICY_INIT)
1569 policy->governor->initialized++;
1570 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1571 policy->governor->initialized--;
1572 }
b394058f 1573
e08f5f5b
GS
1574 /* we keep one module reference alive for
1575 each CPU governed by this CPU */
1da177e4
LT
1576 if ((event != CPUFREQ_GOV_START) || ret)
1577 module_put(policy->governor->owner);
1578 if ((event == CPUFREQ_GOV_STOP) && !ret)
1579 module_put(policy->governor->owner);
1580
1581 return ret;
1582}
1583
1584
1da177e4
LT
1585int cpufreq_register_governor(struct cpufreq_governor *governor)
1586{
3bcb09a3 1587 int err;
1da177e4
LT
1588
1589 if (!governor)
1590 return -EINVAL;
1591
a7b422cd
KRW
1592 if (cpufreq_disabled())
1593 return -ENODEV;
1594
3fc54d37 1595 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1596
b394058f 1597 governor->initialized = 0;
3bcb09a3
JF
1598 err = -EBUSY;
1599 if (__find_governor(governor->name) == NULL) {
1600 err = 0;
1601 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1602 }
1da177e4 1603
32ee8c3e 1604 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1605 return err;
1da177e4
LT
1606}
1607EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1608
1609
1610void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1611{
90e41bac
PB
1612#ifdef CONFIG_HOTPLUG_CPU
1613 int cpu;
1614#endif
1615
1da177e4
LT
1616 if (!governor)
1617 return;
1618
a7b422cd
KRW
1619 if (cpufreq_disabled())
1620 return;
1621
90e41bac
PB
1622#ifdef CONFIG_HOTPLUG_CPU
1623 for_each_present_cpu(cpu) {
1624 if (cpu_online(cpu))
1625 continue;
1626 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1627 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1628 }
1629#endif
1630
3fc54d37 1631 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1632 list_del(&governor->governor_list);
3fc54d37 1633 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1634 return;
1635}
1636EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1637
1638
1639
1640/*********************************************************************
1641 * POLICY INTERFACE *
1642 *********************************************************************/
1643
1644/**
1645 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1646 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1647 * is written
1da177e4
LT
1648 *
1649 * Reads the current cpufreq policy.
1650 */
1651int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1652{
1653 struct cpufreq_policy *cpu_policy;
1654 if (!policy)
1655 return -EINVAL;
1656
1657 cpu_policy = cpufreq_cpu_get(cpu);
1658 if (!cpu_policy)
1659 return -EINVAL;
1660
1da177e4 1661 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1662
1663 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1664 return 0;
1665}
1666EXPORT_SYMBOL(cpufreq_get_policy);
1667
1668
153d7f3f 1669/*
e08f5f5b
GS
1670 * data : current policy.
1671 * policy : policy to be set.
153d7f3f 1672 */
e08f5f5b
GS
1673static int __cpufreq_set_policy(struct cpufreq_policy *data,
1674 struct cpufreq_policy *policy)
1da177e4 1675{
7bd353a9 1676 int ret = 0, failed = 1;
1da177e4 1677
2d06d8c4 1678 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1679 policy->min, policy->max);
1680
e08f5f5b
GS
1681 memcpy(&policy->cpuinfo, &data->cpuinfo,
1682 sizeof(struct cpufreq_cpuinfo));
1da177e4 1683
53391fa2 1684 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1685 ret = -EINVAL;
1686 goto error_out;
1687 }
1688
1da177e4 1689 /* verify the cpu speed can be set within this limit */
1c3d85dd 1690 ret = cpufreq_driver->verify(policy);
1da177e4
LT
1691 if (ret)
1692 goto error_out;
1693
1da177e4 1694 /* adjust if necessary - all reasons */
e041c683
AS
1695 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1696 CPUFREQ_ADJUST, policy);
1da177e4
LT
1697
1698 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1699 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1700 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1701
1702 /* verify the cpu speed can be set within this limit,
1703 which might be different to the first one */
1c3d85dd 1704 ret = cpufreq_driver->verify(policy);
e041c683 1705 if (ret)
1da177e4 1706 goto error_out;
1da177e4
LT
1707
1708 /* notification of the new policy */
e041c683
AS
1709 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1710 CPUFREQ_NOTIFY, policy);
1da177e4 1711
7d5e350f
DJ
1712 data->min = policy->min;
1713 data->max = policy->max;
1da177e4 1714
2d06d8c4 1715 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1716 data->min, data->max);
1da177e4 1717
1c3d85dd 1718 if (cpufreq_driver->setpolicy) {
1da177e4 1719 data->policy = policy->policy;
2d06d8c4 1720 pr_debug("setting range\n");
1c3d85dd 1721 ret = cpufreq_driver->setpolicy(policy);
1da177e4
LT
1722 } else {
1723 if (policy->governor != data->governor) {
1724 /* save old, working values */
1725 struct cpufreq_governor *old_gov = data->governor;
1726
2d06d8c4 1727 pr_debug("governor switch\n");
1da177e4
LT
1728
1729 /* end old governor */
7bd353a9 1730 if (data->governor) {
1da177e4 1731 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
955ef483 1732 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1733 __cpufreq_governor(data,
1734 CPUFREQ_GOV_POLICY_EXIT);
955ef483 1735 lock_policy_rwsem_write(policy->cpu);
7bd353a9 1736 }
1da177e4
LT
1737
1738 /* start new governor */
1739 data->governor = policy->governor;
7bd353a9 1740 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
955ef483 1741 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
7bd353a9 1742 failed = 0;
955ef483
VK
1743 } else {
1744 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1745 __cpufreq_governor(data,
1746 CPUFREQ_GOV_POLICY_EXIT);
955ef483
VK
1747 lock_policy_rwsem_write(policy->cpu);
1748 }
7bd353a9
VK
1749 }
1750
1751 if (failed) {
1da177e4 1752 /* new governor failed, so re-start old one */
2d06d8c4 1753 pr_debug("starting governor %s failed\n",
e08f5f5b 1754 data->governor->name);
1da177e4
LT
1755 if (old_gov) {
1756 data->governor = old_gov;
7bd353a9
VK
1757 __cpufreq_governor(data,
1758 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1759 __cpufreq_governor(data,
1760 CPUFREQ_GOV_START);
1da177e4
LT
1761 }
1762 ret = -EINVAL;
1763 goto error_out;
1764 }
1765 /* might be a policy change, too, so fall through */
1766 }
2d06d8c4 1767 pr_debug("governor: change or update limits\n");
1da177e4
LT
1768 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1769 }
1770
7d5e350f 1771error_out:
1da177e4
LT
1772 return ret;
1773}
1774
1da177e4
LT
1775/**
1776 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1777 * @cpu: CPU which shall be re-evaluated
1778 *
25985edc 1779 * Useful for policy notifiers which have different necessities
1da177e4
LT
1780 * at different times.
1781 */
1782int cpufreq_update_policy(unsigned int cpu)
1783{
1784 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1785 struct cpufreq_policy policy;
f1829e4a 1786 int ret;
1da177e4 1787
f1829e4a
JL
1788 if (!data) {
1789 ret = -ENODEV;
1790 goto no_policy;
1791 }
1da177e4 1792
f1829e4a
JL
1793 if (unlikely(lock_policy_rwsem_write(cpu))) {
1794 ret = -EINVAL;
1795 goto fail;
1796 }
1da177e4 1797
2d06d8c4 1798 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1799 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1800 policy.min = data->user_policy.min;
1801 policy.max = data->user_policy.max;
1802 policy.policy = data->user_policy.policy;
1803 policy.governor = data->user_policy.governor;
1804
0961dd0d
TR
1805 /* BIOS might change freq behind our back
1806 -> ask driver for current freq and notify governors about a change */
1c3d85dd
RW
1807 if (cpufreq_driver->get) {
1808 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1809 if (!data->cur) {
2d06d8c4 1810 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1811 data->cur = policy.cur;
1812 } else {
1c3d85dd 1813 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
1814 cpufreq_out_of_sync(cpu, data->cur,
1815 policy.cur);
a85f7bd3 1816 }
0961dd0d
TR
1817 }
1818
1da177e4
LT
1819 ret = __cpufreq_set_policy(data, &policy);
1820
5a01f2e8
VP
1821 unlock_policy_rwsem_write(cpu);
1822
f1829e4a 1823fail:
1da177e4 1824 cpufreq_cpu_put(data);
f1829e4a 1825no_policy:
1da177e4
LT
1826 return ret;
1827}
1828EXPORT_SYMBOL(cpufreq_update_policy);
1829
dd184a01 1830static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1831 unsigned long action, void *hcpu)
1832{
1833 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1834 struct device *dev;
c32b6b8e 1835
8a25a2fd
KS
1836 dev = get_cpu_device(cpu);
1837 if (dev) {
c32b6b8e
AR
1838 switch (action) {
1839 case CPU_ONLINE:
8a25a2fd 1840 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1841 break;
1842 case CPU_DOWN_PREPARE:
a66b2e50 1843 case CPU_UP_CANCELED_FROZEN:
8a25a2fd 1844 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1845 break;
5a01f2e8 1846 case CPU_DOWN_FAILED:
8a25a2fd 1847 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1848 break;
1849 }
1850 }
1851 return NOTIFY_OK;
1852}
1853
9c36f746 1854static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1855 .notifier_call = cpufreq_cpu_callback,
1856};
1da177e4
LT
1857
1858/*********************************************************************
1859 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1860 *********************************************************************/
1861
1862/**
1863 * cpufreq_register_driver - register a CPU Frequency driver
1864 * @driver_data: A struct cpufreq_driver containing the values#
1865 * submitted by the CPU Frequency driver.
1866 *
32ee8c3e 1867 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1868 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1869 * (and isn't unregistered in the meantime).
1da177e4
LT
1870 *
1871 */
221dee28 1872int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1873{
1874 unsigned long flags;
1875 int ret;
1876
a7b422cd
KRW
1877 if (cpufreq_disabled())
1878 return -ENODEV;
1879
1da177e4
LT
1880 if (!driver_data || !driver_data->verify || !driver_data->init ||
1881 ((!driver_data->setpolicy) && (!driver_data->target)))
1882 return -EINVAL;
1883
2d06d8c4 1884 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1885
1886 if (driver_data->setpolicy)
1887 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1888
0d1857a1 1889 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1890 if (cpufreq_driver) {
0d1857a1 1891 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1892 return -EBUSY;
1893 }
1c3d85dd 1894 cpufreq_driver = driver_data;
0d1857a1 1895 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1896
8a25a2fd 1897 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1898 if (ret)
1899 goto err_null_driver;
1da177e4 1900
1c3d85dd 1901 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1902 int i;
1903 ret = -ENODEV;
1904
1905 /* check for at least one working CPU */
7a6aedfa
MT
1906 for (i = 0; i < nr_cpu_ids; i++)
1907 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1908 ret = 0;
7a6aedfa
MT
1909 break;
1910 }
1da177e4
LT
1911
1912 /* if all ->init() calls failed, unregister */
1913 if (ret) {
2d06d8c4 1914 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1915 driver_data->name);
8a25a2fd 1916 goto err_if_unreg;
1da177e4
LT
1917 }
1918 }
1919
8f5bc2ab 1920 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1921 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1922
8f5bc2ab 1923 return 0;
8a25a2fd
KS
1924err_if_unreg:
1925 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 1926err_null_driver:
0d1857a1 1927 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1928 cpufreq_driver = NULL;
0d1857a1 1929 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1930 return ret;
1da177e4
LT
1931}
1932EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1933
1934
1935/**
1936 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1937 *
32ee8c3e 1938 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1939 * the right to do so, i.e. if you have succeeded in initialising before!
1940 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1941 * currently not initialised.
1942 */
221dee28 1943int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1944{
1945 unsigned long flags;
1946
1c3d85dd 1947 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1948 return -EINVAL;
1da177e4 1949
2d06d8c4 1950 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1951
8a25a2fd 1952 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1953 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 1954
0d1857a1 1955 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1956 cpufreq_driver = NULL;
0d1857a1 1957 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1958
1959 return 0;
1960}
1961EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1962
1963static int __init cpufreq_core_init(void)
1964{
1965 int cpu;
1966
a7b422cd
KRW
1967 if (cpufreq_disabled())
1968 return -ENODEV;
1969
5a01f2e8 1970 for_each_possible_cpu(cpu) {
f1625066 1971 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1972 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1973 }
8aa84ad8 1974
8a25a2fd 1975 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 1976 BUG_ON(!cpufreq_global_kobject);
e00e56df 1977 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1978
5a01f2e8
VP
1979 return 0;
1980}
5a01f2e8 1981core_initcall(cpufreq_core_init);