Merge tag 'v3.10.107' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
6fa3eb70 20#include <asm/cputime.h>
1da177e4 21#include <linux/kernel.h>
6fa3eb70 22#include <linux/kernel_stat.h>
1da177e4
LT
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/notifier.h>
26#include <linux/cpufreq.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/spinlock.h>
6fa3eb70 30#include <linux/tick.h>
1da177e4
LT
31#include <linux/device.h>
32#include <linux/slab.h>
33#include <linux/cpu.h>
34#include <linux/completion.h>
3fc54d37 35#include <linux/mutex.h>
e00e56df 36#include <linux/syscore_ops.h>
1da177e4 37
6f4f2723
TR
38#include <trace/events/power.h>
39
1da177e4 40/**
cd878479 41 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
42 * level driver of CPUFreq support, and its spinlock. This lock
43 * also protects the cpufreq_cpu_data array.
44 */
1c3d85dd 45static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 46static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
47#ifdef CONFIG_HOTPLUG_CPU
48/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 49static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 50#endif
0d1857a1 51static DEFINE_RWLOCK(cpufreq_driver_lock);
ba17ca46 52static DEFINE_MUTEX(cpufreq_governor_lock);
1da177e4 53
5a01f2e8
VP
54/*
55 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
56 * all cpufreq/hotplug/workqueue/etc related lock issues.
57 *
58 * The rules for this semaphore:
59 * - Any routine that wants to read from the policy structure will
60 * do a down_read on this semaphore.
61 * - Any routine that will write to the policy structure and/or may take away
62 * the policy altogether (eg. CPU hotplug), will hold this lock in write
63 * mode before doing so.
64 *
65 * Additional rules:
5a01f2e8
VP
66 * - Governor routines that can be called in cpufreq hotplug path should not
67 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
68 * - Lock should not be held across
69 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 70 */
f1625066 71static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
72static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
73
74#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 75static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 76{ \
f1625066 77 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
78 BUG_ON(policy_cpu == -1); \
79 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
80 \
81 return 0; \
82}
83
84lock_policy_rwsem(read, cpu);
5a01f2e8 85lock_policy_rwsem(write, cpu);
5a01f2e8 86
fa1d8af4
VK
87#define unlock_policy_rwsem(mode, cpu) \
88static void unlock_policy_rwsem_##mode(int cpu) \
89{ \
90 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
91 BUG_ON(policy_cpu == -1); \
92 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 93}
5a01f2e8 94
fa1d8af4
VK
95unlock_policy_rwsem(read, cpu);
96unlock_policy_rwsem(write, cpu);
5a01f2e8 97
1da177e4 98/* internal prototypes */
29464f28
DJ
99static int __cpufreq_governor(struct cpufreq_policy *policy,
100 unsigned int event);
5a01f2e8 101static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 102static void handle_update(struct work_struct *work);
1da177e4
LT
103
104/**
32ee8c3e
DJ
105 * Two notifier lists: the "policy" list is involved in the
106 * validation process for a new CPU frequency policy; the
1da177e4
LT
107 * "transition" list for kernel code that needs to handle
108 * changes to devices when the CPU clock speed changes.
109 * The mutex locks both lists.
110 */
e041c683 111static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 112static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 113
74212ca4 114static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
115static int __init init_cpufreq_transition_notifier_list(void)
116{
117 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 118 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
119 return 0;
120}
b3438f82 121pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 122
a7b422cd 123static int off __read_mostly;
da584455 124static int cpufreq_disabled(void)
a7b422cd
KRW
125{
126 return off;
127}
128void disable_cpufreq(void)
129{
130 off = 1;
131}
1da177e4 132static LIST_HEAD(cpufreq_governor_list);
29464f28 133static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 134
4d5dcc42
VK
135bool have_governor_per_policy(void)
136{
1c3d85dd 137 return cpufreq_driver->have_governor_per_policy;
4d5dcc42 138}
6fa3eb70
S
139EXPORT_SYMBOL_GPL(have_governor_per_policy);
140
141struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
142{
143 if (have_governor_per_policy())
144 return &policy->kobj;
145 else
146 return cpufreq_global_kobject;
147}
148EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
149
150static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
151{
152 u64 idle_time;
153 u64 cur_wall_time;
154 u64 busy_time;
155
156 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
157
158 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
159 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
160 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
164
165 idle_time = cur_wall_time - busy_time;
166 if (wall)
167 *wall = cputime_to_usecs(cur_wall_time);
168
169 return cputime_to_usecs(idle_time);
170}
171
172u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
173{
174 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
175
176 if (idle_time == -1ULL)
177 return get_cpu_idle_time_jiffy(cpu, wall);
178 else if (!io_busy)
179 idle_time += get_cpu_iowait_time_us(cpu, wall);
180
181 return idle_time;
182}
183EXPORT_SYMBOL_GPL(get_cpu_idle_time);
4d5dcc42 184
a9144436 185static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
186{
187 struct cpufreq_policy *data;
188 unsigned long flags;
189
7a6aedfa 190 if (cpu >= nr_cpu_ids)
1da177e4
LT
191 goto err_out;
192
193 /* get the cpufreq driver */
1c3d85dd 194 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 195
1c3d85dd 196 if (!cpufreq_driver)
1da177e4
LT
197 goto err_out_unlock;
198
1c3d85dd 199 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
200 goto err_out_unlock;
201
202
203 /* get the CPU */
7a6aedfa 204 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
205
206 if (!data)
207 goto err_out_put_module;
208
a9144436 209 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
210 goto err_out_put_module;
211
0d1857a1 212 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
213 return data;
214
7d5e350f 215err_out_put_module:
1c3d85dd 216 module_put(cpufreq_driver->owner);
5800043b 217err_out_unlock:
1c3d85dd 218 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 219err_out:
1da177e4
LT
220 return NULL;
221}
a9144436
SB
222
223struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
224{
d5aaffa9
DB
225 if (cpufreq_disabled())
226 return NULL;
227
a9144436
SB
228 return __cpufreq_cpu_get(cpu, false);
229}
1da177e4
LT
230EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
231
a9144436
SB
232static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
233{
234 return __cpufreq_cpu_get(cpu, true);
235}
236
237static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
238{
239 if (!sysfs)
240 kobject_put(&data->kobj);
1c3d85dd 241 module_put(cpufreq_driver->owner);
a9144436 242}
7d5e350f 243
1da177e4
LT
244void cpufreq_cpu_put(struct cpufreq_policy *data)
245{
d5aaffa9
DB
246 if (cpufreq_disabled())
247 return;
248
a9144436 249 __cpufreq_cpu_put(data, false);
1da177e4
LT
250}
251EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
252
a9144436
SB
253static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
254{
255 __cpufreq_cpu_put(data, true);
256}
1da177e4 257
1da177e4
LT
258/*********************************************************************
259 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
260 *********************************************************************/
261
262/**
263 * adjust_jiffies - adjust the system "loops_per_jiffy"
264 *
265 * This function alters the system "loops_per_jiffy" for the clock
266 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 267 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
268 * per-CPU loops_per_jiffy value wherever possible.
269 */
270#ifndef CONFIG_SMP
271static unsigned long l_p_j_ref;
272static unsigned int l_p_j_ref_freq;
273
858119e1 274static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
275{
276 if (ci->flags & CPUFREQ_CONST_LOOPS)
277 return;
278
279 if (!l_p_j_ref_freq) {
280 l_p_j_ref = loops_per_jiffy;
281 l_p_j_ref_freq = ci->old;
2d06d8c4 282 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 283 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 284 }
d08de0c1 285 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 286 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
287 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
288 ci->new);
2d06d8c4 289 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 290 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
291 }
292}
293#else
e08f5f5b
GS
294static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
295{
296 return;
297}
1da177e4
LT
298#endif
299
300
b43a7ffb
VK
301void __cpufreq_notify_transition(struct cpufreq_policy *policy,
302 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
303{
304 BUG_ON(irqs_disabled());
305
d5aaffa9
DB
306 if (cpufreq_disabled())
307 return;
308
1c3d85dd 309 freqs->flags = cpufreq_driver->flags;
2d06d8c4 310 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 311 state, freqs->new);
1da177e4 312
1da177e4 313 switch (state) {
e4472cb3 314
1da177e4 315 case CPUFREQ_PRECHANGE:
32ee8c3e 316 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
317 * which is not equal to what the cpufreq core thinks is
318 * "old frequency".
1da177e4 319 */
1c3d85dd 320 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
321 if ((policy) && (policy->cpu == freqs->cpu) &&
322 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 323 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
324 " %u, cpufreq assumed %u kHz.\n",
325 freqs->old, policy->cur);
326 freqs->old = policy->cur;
1da177e4
LT
327 }
328 }
b4dfdbb3 329 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 330 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
331 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
332 break;
e4472cb3 333
1da177e4
LT
334 case CPUFREQ_POSTCHANGE:
335 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 336 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 337 (unsigned long)freqs->cpu);
25e41933 338 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 339 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 340 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
341 if (likely(policy) && likely(policy->cpu == freqs->cpu))
342 policy->cur = freqs->new;
1da177e4
LT
343 break;
344 }
1da177e4 345}
b43a7ffb
VK
346/**
347 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
348 * on frequency transition.
349 *
350 * This function calls the transition notifiers and the "adjust_jiffies"
351 * function. It is called twice on all CPU frequency changes that have
352 * external effects.
353 */
354void cpufreq_notify_transition(struct cpufreq_policy *policy,
355 struct cpufreq_freqs *freqs, unsigned int state)
356{
357 for_each_cpu(freqs->cpu, policy->cpus)
358 __cpufreq_notify_transition(policy, freqs, state);
359}
1da177e4
LT
360EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
361
362
363
364/*********************************************************************
365 * SYSFS INTERFACE *
366 *********************************************************************/
367
3bcb09a3
JF
368static struct cpufreq_governor *__find_governor(const char *str_governor)
369{
370 struct cpufreq_governor *t;
371
372 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 373 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
374 return t;
375
376 return NULL;
377}
378
1da177e4
LT
379/**
380 * cpufreq_parse_governor - parse a governor string
381 */
905d77cd 382static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
383 struct cpufreq_governor **governor)
384{
3bcb09a3 385 int err = -EINVAL;
1c3d85dd
RW
386
387 if (!cpufreq_driver)
3bcb09a3
JF
388 goto out;
389
1c3d85dd 390 if (cpufreq_driver->setpolicy) {
1da177e4
LT
391 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
392 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 393 err = 0;
e08f5f5b
GS
394 } else if (!strnicmp(str_governor, "powersave",
395 CPUFREQ_NAME_LEN)) {
1da177e4 396 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 397 err = 0;
1da177e4 398 }
1c3d85dd 399 } else if (cpufreq_driver->target) {
1da177e4 400 struct cpufreq_governor *t;
3bcb09a3 401
3fc54d37 402 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
403
404 t = __find_governor(str_governor);
405
ea714970 406 if (t == NULL) {
1a8e1463 407 int ret;
ea714970 408
1a8e1463
KC
409 mutex_unlock(&cpufreq_governor_mutex);
410 ret = request_module("cpufreq_%s", str_governor);
411 mutex_lock(&cpufreq_governor_mutex);
ea714970 412
1a8e1463
KC
413 if (ret == 0)
414 t = __find_governor(str_governor);
ea714970
JF
415 }
416
3bcb09a3
JF
417 if (t != NULL) {
418 *governor = t;
419 err = 0;
1da177e4 420 }
3bcb09a3 421
3fc54d37 422 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 423 }
29464f28 424out:
3bcb09a3 425 return err;
1da177e4 426}
1da177e4
LT
427
428
1da177e4 429/**
e08f5f5b
GS
430 * cpufreq_per_cpu_attr_read() / show_##file_name() -
431 * print out cpufreq information
1da177e4
LT
432 *
433 * Write out information from cpufreq_driver->policy[cpu]; object must be
434 * "unsigned int".
435 */
436
32ee8c3e
DJ
437#define show_one(file_name, object) \
438static ssize_t show_##file_name \
905d77cd 439(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 440{ \
29464f28 441 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
442}
443
444show_one(cpuinfo_min_freq, cpuinfo.min_freq);
445show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 446show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
447show_one(scaling_min_freq, min);
448show_one(scaling_max_freq, max);
449show_one(scaling_cur_freq, cur);
450
e08f5f5b
GS
451static int __cpufreq_set_policy(struct cpufreq_policy *data,
452 struct cpufreq_policy *policy);
7970e08b 453
1da177e4
LT
454/**
455 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
456 */
457#define store_one(file_name, object) \
458static ssize_t store_##file_name \
905d77cd 459(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 460{ \
f55c9c26 461 unsigned int ret; \
1da177e4
LT
462 struct cpufreq_policy new_policy; \
463 \
464 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
465 if (ret) \
466 return -EINVAL; \
467 \
29464f28 468 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
469 if (ret != 1) \
470 return -EINVAL; \
471 \
7970e08b
TR
472 ret = __cpufreq_set_policy(policy, &new_policy); \
473 policy->user_policy.object = policy->object; \
1da177e4
LT
474 \
475 return ret ? ret : count; \
476}
477
29464f28
DJ
478store_one(scaling_min_freq, min);
479store_one(scaling_max_freq, max);
1da177e4
LT
480
481/**
482 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
483 */
905d77cd
DJ
484static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
485 char *buf)
1da177e4 486{
5a01f2e8 487 unsigned int cur_freq = __cpufreq_get(policy->cpu);
741c8ac5
RW
488
489 if (cur_freq)
490 return sprintf(buf, "%u\n", cur_freq);
491
492 return sprintf(buf, "<unknown>\n");
1da177e4
LT
493}
494
495
496/**
497 * show_scaling_governor - show the current policy for the specified CPU
498 */
905d77cd 499static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 500{
29464f28 501 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
502 return sprintf(buf, "powersave\n");
503 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
504 return sprintf(buf, "performance\n");
505 else if (policy->governor)
4b972f0b 506 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 507 policy->governor->name);
1da177e4
LT
508 return -EINVAL;
509}
510
511
512/**
513 * store_scaling_governor - store policy for the specified CPU
514 */
905d77cd
DJ
515static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
516 const char *buf, size_t count)
1da177e4 517{
f55c9c26 518 unsigned int ret;
1da177e4
LT
519 char str_governor[16];
520 struct cpufreq_policy new_policy;
521
522 ret = cpufreq_get_policy(&new_policy, policy->cpu);
523 if (ret)
524 return ret;
525
29464f28 526 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
527 if (ret != 1)
528 return -EINVAL;
529
e08f5f5b
GS
530 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
531 &new_policy.governor))
1da177e4
LT
532 return -EINVAL;
533
7970e08b
TR
534 /* Do not use cpufreq_set_policy here or the user_policy.max
535 will be wrongly overridden */
7970e08b
TR
536 ret = __cpufreq_set_policy(policy, &new_policy);
537
538 policy->user_policy.policy = policy->policy;
539 policy->user_policy.governor = policy->governor;
7970e08b 540
e08f5f5b
GS
541 if (ret)
542 return ret;
543 else
544 return count;
1da177e4
LT
545}
546
547/**
548 * show_scaling_driver - show the cpufreq driver currently loaded
549 */
905d77cd 550static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 551{
1c3d85dd 552 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
553}
554
555/**
556 * show_scaling_available_governors - show the available CPUfreq governors
557 */
905d77cd
DJ
558static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
559 char *buf)
1da177e4
LT
560{
561 ssize_t i = 0;
562 struct cpufreq_governor *t;
563
1c3d85dd 564 if (!cpufreq_driver->target) {
1da177e4
LT
565 i += sprintf(buf, "performance powersave");
566 goto out;
567 }
568
569 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
570 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
571 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 572 goto out;
4b972f0b 573 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 574 }
7d5e350f 575out:
1da177e4
LT
576 i += sprintf(&buf[i], "\n");
577 return i;
578}
e8628dd0 579
835481d9 580static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
581{
582 ssize_t i = 0;
583 unsigned int cpu;
584
835481d9 585 for_each_cpu(cpu, mask) {
1da177e4
LT
586 if (i)
587 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
588 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
589 if (i >= (PAGE_SIZE - 5))
29464f28 590 break;
1da177e4
LT
591 }
592 i += sprintf(&buf[i], "\n");
593 return i;
594}
595
e8628dd0
DW
596/**
597 * show_related_cpus - show the CPUs affected by each transition even if
598 * hw coordination is in use
599 */
600static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
601{
e8628dd0
DW
602 return show_cpus(policy->related_cpus, buf);
603}
604
605/**
606 * show_affected_cpus - show the CPUs affected by each transition
607 */
608static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
609{
610 return show_cpus(policy->cpus, buf);
611}
612
9e76988e 613static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 614 const char *buf, size_t count)
9e76988e
VP
615{
616 unsigned int freq = 0;
617 unsigned int ret;
618
879000f9 619 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
620 return -EINVAL;
621
622 ret = sscanf(buf, "%u", &freq);
623 if (ret != 1)
624 return -EINVAL;
625
626 policy->governor->store_setspeed(policy, freq);
627
628 return count;
629}
630
631static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
632{
879000f9 633 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
634 return sprintf(buf, "<unsupported>\n");
635
636 return policy->governor->show_setspeed(policy, buf);
637}
1da177e4 638
e2f74f35 639/**
8bf1ac72 640 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
641 */
642static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
643{
644 unsigned int limit;
645 int ret;
1c3d85dd
RW
646 if (cpufreq_driver->bios_limit) {
647 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
648 if (!ret)
649 return sprintf(buf, "%u\n", limit);
650 }
651 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
652}
653
6dad2a29
BP
654cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
655cpufreq_freq_attr_ro(cpuinfo_min_freq);
656cpufreq_freq_attr_ro(cpuinfo_max_freq);
657cpufreq_freq_attr_ro(cpuinfo_transition_latency);
658cpufreq_freq_attr_ro(scaling_available_governors);
659cpufreq_freq_attr_ro(scaling_driver);
660cpufreq_freq_attr_ro(scaling_cur_freq);
661cpufreq_freq_attr_ro(bios_limit);
662cpufreq_freq_attr_ro(related_cpus);
663cpufreq_freq_attr_ro(affected_cpus);
664cpufreq_freq_attr_rw(scaling_min_freq);
665cpufreq_freq_attr_rw(scaling_max_freq);
666cpufreq_freq_attr_rw(scaling_governor);
667cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 668
905d77cd 669static struct attribute *default_attrs[] = {
1da177e4
LT
670 &cpuinfo_min_freq.attr,
671 &cpuinfo_max_freq.attr,
ed129784 672 &cpuinfo_transition_latency.attr,
1da177e4
LT
673 &scaling_min_freq.attr,
674 &scaling_max_freq.attr,
675 &affected_cpus.attr,
e8628dd0 676 &related_cpus.attr,
1da177e4
LT
677 &scaling_governor.attr,
678 &scaling_driver.attr,
679 &scaling_available_governors.attr,
9e76988e 680 &scaling_setspeed.attr,
1da177e4
LT
681 NULL
682};
683
8aa84ad8
TR
684struct kobject *cpufreq_global_kobject;
685EXPORT_SYMBOL(cpufreq_global_kobject);
686
29464f28
DJ
687#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
688#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 689
29464f28 690static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 691{
905d77cd
DJ
692 struct cpufreq_policy *policy = to_policy(kobj);
693 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 694 ssize_t ret = -EINVAL;
a9144436 695 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 696 if (!policy)
0db4a8a9 697 goto no_policy;
5a01f2e8
VP
698
699 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 700 goto fail;
5a01f2e8 701
e08f5f5b
GS
702 if (fattr->show)
703 ret = fattr->show(policy, buf);
704 else
705 ret = -EIO;
706
5a01f2e8 707 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 708fail:
a9144436 709 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 710no_policy:
1da177e4
LT
711 return ret;
712}
713
905d77cd
DJ
714static ssize_t store(struct kobject *kobj, struct attribute *attr,
715 const char *buf, size_t count)
1da177e4 716{
905d77cd
DJ
717 struct cpufreq_policy *policy = to_policy(kobj);
718 struct freq_attr *fattr = to_attr(attr);
a07530b4 719 ssize_t ret = -EINVAL;
a9144436 720 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 721 if (!policy)
a07530b4 722 goto no_policy;
5a01f2e8
VP
723
724 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 725 goto fail;
5a01f2e8 726
e08f5f5b
GS
727 if (fattr->store)
728 ret = fattr->store(policy, buf, count);
729 else
730 ret = -EIO;
731
5a01f2e8 732 unlock_policy_rwsem_write(policy->cpu);
a07530b4 733fail:
a9144436 734 cpufreq_cpu_put_sysfs(policy);
a07530b4 735no_policy:
1da177e4
LT
736 return ret;
737}
738
905d77cd 739static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 740{
905d77cd 741 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 742 pr_debug("last reference is dropped\n");
1da177e4
LT
743 complete(&policy->kobj_unregister);
744}
745
52cf25d0 746static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
747 .show = show,
748 .store = store,
749};
750
751static struct kobj_type ktype_cpufreq = {
752 .sysfs_ops = &sysfs_ops,
753 .default_attrs = default_attrs,
754 .release = cpufreq_sysfs_release,
755};
756
19d6f7ec 757/* symlink affected CPUs */
cf3289d0
AC
758static int cpufreq_add_dev_symlink(unsigned int cpu,
759 struct cpufreq_policy *policy)
19d6f7ec
DJ
760{
761 unsigned int j;
762 int ret = 0;
763
764 for_each_cpu(j, policy->cpus) {
765 struct cpufreq_policy *managed_policy;
8a25a2fd 766 struct device *cpu_dev;
19d6f7ec
DJ
767
768 if (j == cpu)
769 continue;
19d6f7ec 770
2d06d8c4 771 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 772 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
773 cpu_dev = get_cpu_device(j);
774 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
775 "cpufreq");
776 if (ret) {
777 cpufreq_cpu_put(managed_policy);
778 return ret;
779 }
780 }
781 return ret;
782}
783
cf3289d0
AC
784static int cpufreq_add_dev_interface(unsigned int cpu,
785 struct cpufreq_policy *policy,
8a25a2fd 786 struct device *dev)
909a694e 787{
ecf7e461 788 struct cpufreq_policy new_policy;
909a694e
DJ
789 struct freq_attr **drv_attr;
790 unsigned long flags;
791 int ret = 0;
792 unsigned int j;
793
794 /* prepare interface data */
795 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 796 &dev->kobj, "cpufreq");
909a694e
DJ
797 if (ret)
798 return ret;
799
800 /* set up files for this cpu device */
1c3d85dd 801 drv_attr = cpufreq_driver->attr;
909a694e
DJ
802 while ((drv_attr) && (*drv_attr)) {
803 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
804 if (ret)
1c3d85dd 805 goto err_out_kobj_put;
909a694e
DJ
806 drv_attr++;
807 }
1c3d85dd 808 if (cpufreq_driver->get) {
909a694e
DJ
809 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
810 if (ret)
1c3d85dd 811 goto err_out_kobj_put;
909a694e 812 }
1c3d85dd 813 if (cpufreq_driver->target) {
909a694e
DJ
814 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
815 if (ret)
1c3d85dd 816 goto err_out_kobj_put;
909a694e 817 }
1c3d85dd 818 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
819 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
820 if (ret)
1c3d85dd 821 goto err_out_kobj_put;
e2f74f35 822 }
909a694e 823
0d1857a1 824 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 825 for_each_cpu(j, policy->cpus) {
909a694e 826 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 827 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 828 }
0d1857a1 829 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
830
831 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
832 if (ret)
833 goto err_out_kobj_put;
834
835 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
836 /* assure that the starting sequence is run in __cpufreq_set_policy */
837 policy->governor = NULL;
838
839 /* set default policy */
840 ret = __cpufreq_set_policy(policy, &new_policy);
841 policy->user_policy.policy = policy->policy;
842 policy->user_policy.governor = policy->governor;
843
844 if (ret) {
2d06d8c4 845 pr_debug("setting policy failed\n");
1c3d85dd
RW
846 if (cpufreq_driver->exit)
847 cpufreq_driver->exit(policy);
ecf7e461 848 }
909a694e
DJ
849 return ret;
850
851err_out_kobj_put:
852 kobject_put(&policy->kobj);
853 wait_for_completion(&policy->kobj_unregister);
854 return ret;
855}
856
fcf80582
VK
857#ifdef CONFIG_HOTPLUG_CPU
858static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
859 struct device *dev)
860{
861 struct cpufreq_policy *policy;
1c3d85dd 862 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
863 unsigned long flags;
864
865 policy = cpufreq_cpu_get(sibling);
866 WARN_ON(!policy);
867
820c6ca2
VK
868 if (has_target)
869 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 870
2eaa3e2d
VK
871 lock_policy_rwsem_write(sibling);
872
0d1857a1 873 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 874
fcf80582 875 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 876 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 877 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 878 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 879
2eaa3e2d
VK
880 unlock_policy_rwsem_write(sibling);
881
820c6ca2
VK
882 if (has_target) {
883 __cpufreq_governor(policy, CPUFREQ_GOV_START);
884 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
885 }
fcf80582 886
fcf80582
VK
887 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
888 if (ret) {
889 cpufreq_cpu_put(policy);
890 return ret;
891 }
892
893 return 0;
894}
895#endif
1da177e4
LT
896
897/**
898 * cpufreq_add_dev - add a CPU device
899 *
32ee8c3e 900 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
901 *
902 * The Oracle says: try running cpufreq registration/unregistration concurrently
903 * with with cpu hotplugging and all hell will break loose. Tried to clean this
904 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 905 */
8a25a2fd 906static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 907{
fcf80582 908 unsigned int j, cpu = dev->id;
65922465 909 int ret = -ENOMEM;
1da177e4 910 struct cpufreq_policy *policy;
1da177e4 911 unsigned long flags;
90e41bac 912#ifdef CONFIG_HOTPLUG_CPU
fcf80582 913 struct cpufreq_governor *gov;
90e41bac
PB
914 int sibling;
915#endif
1da177e4 916
c32b6b8e
AR
917 if (cpu_is_offline(cpu))
918 return 0;
919
2d06d8c4 920 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
921
922#ifdef CONFIG_SMP
923 /* check whether a different CPU already registered this
924 * CPU because it is in the same boat. */
925 policy = cpufreq_cpu_get(cpu);
926 if (unlikely(policy)) {
8ff69732 927 cpufreq_cpu_put(policy);
1da177e4
LT
928 return 0;
929 }
fcf80582
VK
930
931#ifdef CONFIG_HOTPLUG_CPU
932 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 933 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
934 for_each_online_cpu(sibling) {
935 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 936 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 937 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 938 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 939 }
fcf80582 940 }
0d1857a1 941 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 942#endif
1da177e4
LT
943#endif
944
1c3d85dd 945 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
946 ret = -EINVAL;
947 goto module_out;
948 }
949
e98df50c 950 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 951 if (!policy)
1da177e4 952 goto nomem_out;
059019a3
DJ
953
954 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 955 goto err_free_policy;
059019a3
DJ
956
957 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 958 goto err_free_cpumask;
1da177e4
LT
959
960 policy->cpu = cpu;
65922465 961 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 962 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 963
5a01f2e8 964 /* Initially set CPU itself as the policy_cpu */
f1625066 965 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 966
1da177e4 967 init_completion(&policy->kobj_unregister);
65f27f38 968 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
969
970 /* call driver. From then on the cpufreq must be able
971 * to accept all calls to ->verify and ->setpolicy for this CPU
972 */
1c3d85dd 973 ret = cpufreq_driver->init(policy);
1da177e4 974 if (ret) {
2d06d8c4 975 pr_debug("initialization failed\n");
2eaa3e2d 976 goto err_set_policy_cpu;
1da177e4 977 }
643ae6e8 978
fcf80582
VK
979 /* related cpus should atleast have policy->cpus */
980 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
981
643ae6e8
VK
982 /*
983 * affected cpus must always be the one, which are online. We aren't
984 * managing offline cpus here.
985 */
986 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
987
187d9f4e
MC
988 policy->user_policy.min = policy->min;
989 policy->user_policy.max = policy->max;
1da177e4 990
a1531acd
TR
991 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
992 CPUFREQ_START, policy);
993
fcf80582
VK
994#ifdef CONFIG_HOTPLUG_CPU
995 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
996 if (gov) {
997 policy->governor = gov;
998 pr_debug("Restoring governor %s for cpu %d\n",
999 policy->governor->name, cpu);
4bfa042c 1000 }
fcf80582 1001#endif
1da177e4 1002
8a25a2fd 1003 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
1004 if (ret)
1005 goto err_out_unregister;
8ff69732 1006
038c5b3e 1007 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 1008 module_put(cpufreq_driver->owner);
2d06d8c4 1009 pr_debug("initialization complete\n");
87c32271 1010
1da177e4
LT
1011 return 0;
1012
1da177e4 1013err_out_unregister:
0d1857a1 1014 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1015 for_each_cpu(j, policy->cpus)
7a6aedfa 1016 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1017 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1018
c10997f6 1019 kobject_put(&policy->kobj);
1da177e4
LT
1020 wait_for_completion(&policy->kobj_unregister);
1021
2eaa3e2d
VK
1022err_set_policy_cpu:
1023 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 1024 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
1025err_free_cpumask:
1026 free_cpumask_var(policy->cpus);
1027err_free_policy:
1da177e4 1028 kfree(policy);
1da177e4 1029nomem_out:
1c3d85dd 1030 module_put(cpufreq_driver->owner);
c32b6b8e 1031module_out:
1da177e4
LT
1032 return ret;
1033}
1034
b8eed8af
VK
1035static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1036{
1037 int j;
1038
1039 policy->last_cpu = policy->cpu;
1040 policy->cpu = cpu;
1041
3361b7b1 1042 for_each_cpu(j, policy->cpus)
b8eed8af 1043 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1044
1045#ifdef CONFIG_CPU_FREQ_TABLE
1046 cpufreq_frequency_table_update_policy_cpu(policy);
1047#endif
1048 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1049 CPUFREQ_UPDATE_POLICY_CPU, policy);
1050}
1da177e4
LT
1051
1052/**
5a01f2e8 1053 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1054 *
1055 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1056 * Caller should already have policy_rwsem in write mode for this CPU.
1057 * This routine frees the rwsem before returning.
1da177e4 1058 */
8a25a2fd 1059static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1060{
b8eed8af 1061 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1062 unsigned long flags;
1063 struct cpufreq_policy *data;
499bca9b
AW
1064 struct kobject *kobj;
1065 struct completion *cmp;
8a25a2fd 1066 struct device *cpu_dev;
1da177e4 1067
b8eed8af 1068 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1069
0d1857a1 1070 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1071
7a6aedfa 1072 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1073 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1074
0d1857a1 1075 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1076
1077 if (!data) {
b8eed8af 1078 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1079 return -EINVAL;
1080 }
1da177e4 1081
1c3d85dd 1082 if (cpufreq_driver->target)
f6a7409c 1083 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1084
084f3493 1085#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1086 if (!cpufreq_driver->setpolicy)
fa69e33f
DB
1087 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1088 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1089#endif
1090
2eaa3e2d 1091 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1092 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1093
1094 if (cpus > 1)
1095 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1096 unlock_policy_rwsem_write(cpu);
084f3493 1097
73bf0fc2
VK
1098 if (cpu != data->cpu) {
1099 sysfs_remove_link(&dev->kobj, "cpufreq");
1100 } else if (cpus > 1) {
b8eed8af
VK
1101 /* first sibling now owns the new sysfs dir */
1102 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1103 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1104 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1105 if (ret) {
1106 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1107
2eaa3e2d 1108 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1109 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1110
0d1857a1 1111 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1112 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1113 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1114
499bca9b 1115 unlock_policy_rwsem_write(cpu);
1da177e4 1116
2eaa3e2d
VK
1117 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1118 "cpufreq");
b8eed8af 1119 return -EINVAL;
1da177e4 1120 }
5a01f2e8 1121
2eaa3e2d 1122 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1123 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1124 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1125 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1126 __func__, cpu_dev->id, cpu);
1da177e4 1127 }
1da177e4 1128
b8eed8af
VK
1129 /* If cpu is last user of policy, free policy */
1130 if (cpus == 1) {
e9ef4410
RW
1131 if (cpufreq_driver->target)
1132 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1133
2eaa3e2d 1134 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1135 kobj = &data->kobj;
1136 cmp = &data->kobj_unregister;
2eaa3e2d 1137 unlock_policy_rwsem_read(cpu);
b8eed8af 1138 kobject_put(kobj);
7d26e2d5 1139
b8eed8af
VK
1140 /* we need to make sure that the underlying kobj is actually
1141 * not referenced anymore by anybody before we proceed with
1142 * unloading.
1143 */
1144 pr_debug("waiting for dropping of refcount\n");
1145 wait_for_completion(cmp);
1146 pr_debug("wait complete\n");
7d26e2d5 1147
1c3d85dd
RW
1148 if (cpufreq_driver->exit)
1149 cpufreq_driver->exit(data);
27ecddc2 1150
b8eed8af
VK
1151 free_cpumask_var(data->related_cpus);
1152 free_cpumask_var(data->cpus);
1153 kfree(data);
e9ef4410
RW
1154 } else {
1155 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1156 cpufreq_cpu_put(data);
1157 if (cpufreq_driver->target) {
1158 __cpufreq_governor(data, CPUFREQ_GOV_START);
1159 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1160 }
27ecddc2 1161 }
1da177e4 1162
2eaa3e2d 1163 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1164 return 0;
1165}
1166
1167
8a25a2fd 1168static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1169{
8a25a2fd 1170 unsigned int cpu = dev->id;
5a01f2e8 1171 int retval;
ec28297a
VP
1172
1173 if (cpu_is_offline(cpu))
1174 return 0;
1175
8a25a2fd 1176 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1177 return retval;
1178}
1179
1180
65f27f38 1181static void handle_update(struct work_struct *work)
1da177e4 1182{
65f27f38
DH
1183 struct cpufreq_policy *policy =
1184 container_of(work, struct cpufreq_policy, update);
1185 unsigned int cpu = policy->cpu;
2d06d8c4 1186 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1187 cpufreq_update_policy(cpu);
1188}
1189
1190/**
1191 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1192 * @cpu: cpu number
1193 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1194 * @new_freq: CPU frequency the CPU actually runs at
1195 *
29464f28
DJ
1196 * We adjust to current frequency first, and need to clean up later.
1197 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1198 */
e08f5f5b
GS
1199static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1200 unsigned int new_freq)
1da177e4 1201{
b43a7ffb 1202 struct cpufreq_policy *policy;
1da177e4 1203 struct cpufreq_freqs freqs;
b43a7ffb
VK
1204 unsigned long flags;
1205
1da177e4 1206
2d06d8c4 1207 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1208 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1209
1da177e4
LT
1210 freqs.old = old_freq;
1211 freqs.new = new_freq;
b43a7ffb
VK
1212
1213 read_lock_irqsave(&cpufreq_driver_lock, flags);
1214 policy = per_cpu(cpufreq_cpu_data, cpu);
1215 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1216
1217 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1218 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1219}
1220
1221
32ee8c3e 1222/**
4ab70df4 1223 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1224 * @cpu: CPU number
1225 *
1226 * This is the last known freq, without actually getting it from the driver.
1227 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1228 */
1229unsigned int cpufreq_quick_get(unsigned int cpu)
1230{
9e21ba8b 1231 struct cpufreq_policy *policy;
e08f5f5b 1232 unsigned int ret_freq = 0;
95235ca2 1233
1c3d85dd
RW
1234 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1235 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1236
1237 policy = cpufreq_cpu_get(cpu);
95235ca2 1238 if (policy) {
e08f5f5b 1239 ret_freq = policy->cur;
95235ca2
VP
1240 cpufreq_cpu_put(policy);
1241 }
1242
4d34a67d 1243 return ret_freq;
95235ca2
VP
1244}
1245EXPORT_SYMBOL(cpufreq_quick_get);
1246
3d737108
JB
1247/**
1248 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1249 * @cpu: CPU number
1250 *
1251 * Just return the max possible frequency for a given CPU.
1252 */
1253unsigned int cpufreq_quick_get_max(unsigned int cpu)
1254{
1255 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1256 unsigned int ret_freq = 0;
1257
1258 if (policy) {
1259 ret_freq = policy->max;
1260 cpufreq_cpu_put(policy);
1261 }
1262
1263 return ret_freq;
1264}
1265EXPORT_SYMBOL(cpufreq_quick_get_max);
1266
95235ca2 1267
5a01f2e8 1268static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1269{
7a6aedfa 1270 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1271 unsigned int ret_freq = 0;
5800043b 1272
1c3d85dd 1273 if (!cpufreq_driver->get)
4d34a67d 1274 return ret_freq;
1da177e4 1275
1c3d85dd 1276 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1277
e08f5f5b 1278 if (ret_freq && policy->cur &&
1c3d85dd 1279 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1280 /* verify no discrepancy between actual and
1281 saved value exists */
1282 if (unlikely(ret_freq != policy->cur)) {
1283 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1284 schedule_work(&policy->update);
1285 }
1286 }
1287
4d34a67d 1288 return ret_freq;
5a01f2e8 1289}
1da177e4 1290
5a01f2e8
VP
1291/**
1292 * cpufreq_get - get the current CPU frequency (in kHz)
1293 * @cpu: CPU number
1294 *
1295 * Get the CPU current (static) CPU frequency
1296 */
1297unsigned int cpufreq_get(unsigned int cpu)
1298{
1299 unsigned int ret_freq = 0;
1300 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1301
1302 if (!policy)
1303 goto out;
1304
1305 if (unlikely(lock_policy_rwsem_read(cpu)))
1306 goto out_policy;
1307
1308 ret_freq = __cpufreq_get(cpu);
1309
1310 unlock_policy_rwsem_read(cpu);
1da177e4 1311
5a01f2e8
VP
1312out_policy:
1313 cpufreq_cpu_put(policy);
1314out:
4d34a67d 1315 return ret_freq;
1da177e4
LT
1316}
1317EXPORT_SYMBOL(cpufreq_get);
1318
8a25a2fd
KS
1319static struct subsys_interface cpufreq_interface = {
1320 .name = "cpufreq",
1321 .subsys = &cpu_subsys,
1322 .add_dev = cpufreq_add_dev,
1323 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1324};
1325
1da177e4 1326
42d4dc3f 1327/**
e00e56df
RW
1328 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1329 *
1330 * This function is only executed for the boot processor. The other CPUs
1331 * have been put offline by means of CPU hotplug.
42d4dc3f 1332 */
e00e56df 1333static int cpufreq_bp_suspend(void)
42d4dc3f 1334{
e08f5f5b 1335 int ret = 0;
4bc5d341 1336
e00e56df 1337 int cpu = smp_processor_id();
42d4dc3f
BH
1338 struct cpufreq_policy *cpu_policy;
1339
2d06d8c4 1340 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1341
e00e56df 1342 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1343 cpu_policy = cpufreq_cpu_get(cpu);
1344 if (!cpu_policy)
e00e56df 1345 return 0;
42d4dc3f 1346
1c3d85dd
RW
1347 if (cpufreq_driver->suspend) {
1348 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1349 if (ret)
42d4dc3f
BH
1350 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1351 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1352 }
1353
42d4dc3f 1354 cpufreq_cpu_put(cpu_policy);
c9060494 1355 return ret;
42d4dc3f
BH
1356}
1357
1da177e4 1358/**
e00e56df 1359 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1360 *
1361 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1362 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1363 * restored. It will verify that the current freq is in sync with
1364 * what we believe it to be. This is a bit later than when it
1365 * should be, but nonethteless it's better than calling
1366 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1367 *
1368 * This function is only executed for the boot CPU. The other CPUs have not
1369 * been turned on yet.
1da177e4 1370 */
e00e56df 1371static void cpufreq_bp_resume(void)
1da177e4 1372{
e08f5f5b 1373 int ret = 0;
4bc5d341 1374
e00e56df 1375 int cpu = smp_processor_id();
1da177e4
LT
1376 struct cpufreq_policy *cpu_policy;
1377
2d06d8c4 1378 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1379
e00e56df 1380 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1381 cpu_policy = cpufreq_cpu_get(cpu);
1382 if (!cpu_policy)
e00e56df 1383 return;
1da177e4 1384
1c3d85dd
RW
1385 if (cpufreq_driver->resume) {
1386 ret = cpufreq_driver->resume(cpu_policy);
1da177e4
LT
1387 if (ret) {
1388 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1389 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1390 goto fail;
1da177e4
LT
1391 }
1392 }
1393
1da177e4 1394 schedule_work(&cpu_policy->update);
ce6c3997 1395
c9060494 1396fail:
1da177e4 1397 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1398}
1399
e00e56df
RW
1400static struct syscore_ops cpufreq_syscore_ops = {
1401 .suspend = cpufreq_bp_suspend,
1402 .resume = cpufreq_bp_resume,
1da177e4
LT
1403};
1404
9d95046e
BP
1405/**
1406 * cpufreq_get_current_driver - return current driver's name
1407 *
1408 * Return the name string of the currently loaded cpufreq driver
1409 * or NULL, if none.
1410 */
1411const char *cpufreq_get_current_driver(void)
1412{
1c3d85dd
RW
1413 if (cpufreq_driver)
1414 return cpufreq_driver->name;
1415
1416 return NULL;
9d95046e
BP
1417}
1418EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1419
1420/*********************************************************************
1421 * NOTIFIER LISTS INTERFACE *
1422 *********************************************************************/
1423
1424/**
1425 * cpufreq_register_notifier - register a driver with cpufreq
1426 * @nb: notifier function to register
1427 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1428 *
32ee8c3e 1429 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1430 * are notified about clock rate changes (once before and once after
1431 * the transition), or a list of drivers that are notified about
1432 * changes in cpufreq policy.
1433 *
1434 * This function may sleep, and has the same return conditions as
e041c683 1435 * blocking_notifier_chain_register.
1da177e4
LT
1436 */
1437int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1438{
1439 int ret;
1440
d5aaffa9
DB
1441 if (cpufreq_disabled())
1442 return -EINVAL;
1443
74212ca4
CEB
1444 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1445
1da177e4
LT
1446 switch (list) {
1447 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1448 ret = srcu_notifier_chain_register(
e041c683 1449 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1450 break;
1451 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1452 ret = blocking_notifier_chain_register(
1453 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1454 break;
1455 default:
1456 ret = -EINVAL;
1457 }
1da177e4
LT
1458
1459 return ret;
1460}
1461EXPORT_SYMBOL(cpufreq_register_notifier);
1462
1463
1464/**
1465 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1466 * @nb: notifier block to be unregistered
1467 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1468 *
1469 * Remove a driver from the CPU frequency notifier list.
1470 *
1471 * This function may sleep, and has the same return conditions as
e041c683 1472 * blocking_notifier_chain_unregister.
1da177e4
LT
1473 */
1474int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1475{
1476 int ret;
1477
d5aaffa9
DB
1478 if (cpufreq_disabled())
1479 return -EINVAL;
1480
1da177e4
LT
1481 switch (list) {
1482 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1483 ret = srcu_notifier_chain_unregister(
e041c683 1484 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1485 break;
1486 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1487 ret = blocking_notifier_chain_unregister(
1488 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1489 break;
1490 default:
1491 ret = -EINVAL;
1492 }
1da177e4
LT
1493
1494 return ret;
1495}
1496EXPORT_SYMBOL(cpufreq_unregister_notifier);
1497
1498
1499/*********************************************************************
1500 * GOVERNORS *
1501 *********************************************************************/
1502
1503
1504int __cpufreq_driver_target(struct cpufreq_policy *policy,
1505 unsigned int target_freq,
1506 unsigned int relation)
1507{
1508 int retval = -EINVAL;
7249924e 1509 unsigned int old_target_freq = target_freq;
c32b6b8e 1510
a7b422cd
KRW
1511 if (cpufreq_disabled())
1512 return -ENODEV;
1513
7249924e
VK
1514 /* Make sure that target_freq is within supported range */
1515 if (target_freq > policy->max)
1516 target_freq = policy->max;
1517 if (target_freq < policy->min)
1518 target_freq = policy->min;
1519
1520 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1521 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1522
1523 if (target_freq == policy->cur)
1524 return 0;
1525
1c3d85dd
RW
1526 if (cpufreq_driver->target)
1527 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1528
1da177e4
LT
1529 return retval;
1530}
1531EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1532
1da177e4
LT
1533int cpufreq_driver_target(struct cpufreq_policy *policy,
1534 unsigned int target_freq,
1535 unsigned int relation)
1536{
f1829e4a 1537 int ret = -EINVAL;
1da177e4
LT
1538
1539 policy = cpufreq_cpu_get(policy->cpu);
1540 if (!policy)
f1829e4a 1541 goto no_policy;
1da177e4 1542
5a01f2e8 1543 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1544 goto fail;
1da177e4
LT
1545
1546 ret = __cpufreq_driver_target(policy, target_freq, relation);
1547
5a01f2e8 1548 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1549
f1829e4a 1550fail:
1da177e4 1551 cpufreq_cpu_put(policy);
f1829e4a 1552no_policy:
1da177e4
LT
1553 return ret;
1554}
1555EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1556
bf0b90e3 1557int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1558{
1559 int ret = 0;
1560
d5aaffa9
DB
1561 if (cpufreq_disabled())
1562 return ret;
1563
1c3d85dd 1564 if (!cpufreq_driver->getavg)
0676f7f2
VK
1565 return 0;
1566
dfde5d62
VP
1567 policy = cpufreq_cpu_get(policy->cpu);
1568 if (!policy)
1569 return -EINVAL;
1570
1c3d85dd 1571 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1572
dfde5d62
VP
1573 cpufreq_cpu_put(policy);
1574 return ret;
1575}
5a01f2e8 1576EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1577
153d7f3f 1578/*
153d7f3f
AV
1579 * when "event" is CPUFREQ_GOV_LIMITS
1580 */
1da177e4 1581
e08f5f5b
GS
1582static int __cpufreq_governor(struct cpufreq_policy *policy,
1583 unsigned int event)
1da177e4 1584{
cc993cab 1585 int ret;
6afde10c
TR
1586
1587 /* Only must be defined when default governor is known to have latency
1588 restrictions, like e.g. conservative or ondemand.
1589 That this is the case is already ensured in Kconfig
1590 */
1591#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1592 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1593#else
1594 struct cpufreq_governor *gov = NULL;
1595#endif
1c256245
TR
1596
1597 if (policy->governor->max_transition_latency &&
1598 policy->cpuinfo.transition_latency >
1599 policy->governor->max_transition_latency) {
6afde10c
TR
1600 if (!gov)
1601 return -EINVAL;
1602 else {
1603 printk(KERN_WARNING "%s governor failed, too long"
1604 " transition latency of HW, fallback"
1605 " to %s governor\n",
1606 policy->governor->name,
1607 gov->name);
1608 policy->governor = gov;
1609 }
1c256245 1610 }
1da177e4
LT
1611
1612 if (!try_module_get(policy->governor->owner))
1613 return -EINVAL;
1614
2d06d8c4 1615 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1616 policy->cpu, event);
ba17ca46
XC
1617
1618 mutex_lock(&cpufreq_governor_lock);
1619 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1620 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1621 mutex_unlock(&cpufreq_governor_lock);
1622 return -EBUSY;
1623 }
1624
1625 if (event == CPUFREQ_GOV_STOP)
1626 policy->governor_enabled = false;
1627 else if (event == CPUFREQ_GOV_START)
1628 policy->governor_enabled = true;
1629
1630 mutex_unlock(&cpufreq_governor_lock);
1631
1da177e4
LT
1632 ret = policy->governor->governor(policy, event);
1633
4d5dcc42
VK
1634 if (!ret) {
1635 if (event == CPUFREQ_GOV_POLICY_INIT)
1636 policy->governor->initialized++;
1637 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1638 policy->governor->initialized--;
ba17ca46
XC
1639 } else {
1640 /* Restore original values */
1641 mutex_lock(&cpufreq_governor_lock);
1642 if (event == CPUFREQ_GOV_STOP)
1643 policy->governor_enabled = true;
1644 else if (event == CPUFREQ_GOV_START)
1645 policy->governor_enabled = false;
1646 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1647 }
b394058f 1648
e08f5f5b
GS
1649 /* we keep one module reference alive for
1650 each CPU governed by this CPU */
1da177e4
LT
1651 if ((event != CPUFREQ_GOV_START) || ret)
1652 module_put(policy->governor->owner);
1653 if ((event == CPUFREQ_GOV_STOP) && !ret)
1654 module_put(policy->governor->owner);
1655
1656 return ret;
1657}
1658
1659
1da177e4
LT
1660int cpufreq_register_governor(struct cpufreq_governor *governor)
1661{
3bcb09a3 1662 int err;
1da177e4
LT
1663
1664 if (!governor)
1665 return -EINVAL;
1666
a7b422cd
KRW
1667 if (cpufreq_disabled())
1668 return -ENODEV;
1669
3fc54d37 1670 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1671
b394058f 1672 governor->initialized = 0;
3bcb09a3
JF
1673 err = -EBUSY;
1674 if (__find_governor(governor->name) == NULL) {
1675 err = 0;
1676 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1677 }
1da177e4 1678
32ee8c3e 1679 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1680 return err;
1da177e4
LT
1681}
1682EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1683
1684
1685void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1686{
90e41bac
PB
1687#ifdef CONFIG_HOTPLUG_CPU
1688 int cpu;
1689#endif
1690
1da177e4
LT
1691 if (!governor)
1692 return;
1693
a7b422cd
KRW
1694 if (cpufreq_disabled())
1695 return;
1696
90e41bac
PB
1697#ifdef CONFIG_HOTPLUG_CPU
1698 for_each_present_cpu(cpu) {
1699 if (cpu_online(cpu))
1700 continue;
1701 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1702 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1703 }
1704#endif
1705
3fc54d37 1706 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1707 list_del(&governor->governor_list);
3fc54d37 1708 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1709 return;
1710}
1711EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1712
1713
1714
1715/*********************************************************************
1716 * POLICY INTERFACE *
1717 *********************************************************************/
1718
1719/**
1720 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1721 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1722 * is written
1da177e4
LT
1723 *
1724 * Reads the current cpufreq policy.
1725 */
1726int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1727{
1728 struct cpufreq_policy *cpu_policy;
1729 if (!policy)
1730 return -EINVAL;
1731
1732 cpu_policy = cpufreq_cpu_get(cpu);
1733 if (!cpu_policy)
1734 return -EINVAL;
1735
1da177e4 1736 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1737
1738 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1739 return 0;
1740}
1741EXPORT_SYMBOL(cpufreq_get_policy);
1742
1743
153d7f3f 1744/*
e08f5f5b
GS
1745 * data : current policy.
1746 * policy : policy to be set.
153d7f3f 1747 */
e08f5f5b
GS
1748static int __cpufreq_set_policy(struct cpufreq_policy *data,
1749 struct cpufreq_policy *policy)
1da177e4 1750{
7bd353a9 1751 int ret = 0, failed = 1;
1da177e4 1752
2d06d8c4 1753 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1754 policy->min, policy->max);
1755
e08f5f5b
GS
1756 memcpy(&policy->cpuinfo, &data->cpuinfo,
1757 sizeof(struct cpufreq_cpuinfo));
1da177e4 1758
53391fa2 1759 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1760 ret = -EINVAL;
1761 goto error_out;
1762 }
1763
1da177e4 1764 /* verify the cpu speed can be set within this limit */
1c3d85dd 1765 ret = cpufreq_driver->verify(policy);
1da177e4
LT
1766 if (ret)
1767 goto error_out;
1768
1da177e4 1769 /* adjust if necessary - all reasons */
e041c683
AS
1770 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1771 CPUFREQ_ADJUST, policy);
1da177e4
LT
1772
1773 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1774 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1775 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1776
1777 /* verify the cpu speed can be set within this limit,
1778 which might be different to the first one */
1c3d85dd 1779 ret = cpufreq_driver->verify(policy);
e041c683 1780 if (ret)
1da177e4 1781 goto error_out;
1da177e4
LT
1782
1783 /* notification of the new policy */
e041c683
AS
1784 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1785 CPUFREQ_NOTIFY, policy);
1da177e4 1786
7d5e350f
DJ
1787 data->min = policy->min;
1788 data->max = policy->max;
1da177e4 1789
2d06d8c4 1790 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1791 data->min, data->max);
1da177e4 1792
1c3d85dd 1793 if (cpufreq_driver->setpolicy) {
1da177e4 1794 data->policy = policy->policy;
2d06d8c4 1795 pr_debug("setting range\n");
1c3d85dd 1796 ret = cpufreq_driver->setpolicy(policy);
1da177e4
LT
1797 } else {
1798 if (policy->governor != data->governor) {
1799 /* save old, working values */
1800 struct cpufreq_governor *old_gov = data->governor;
1801
2d06d8c4 1802 pr_debug("governor switch\n");
1da177e4
LT
1803
1804 /* end old governor */
7bd353a9 1805 if (data->governor) {
1da177e4 1806 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
955ef483 1807 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1808 __cpufreq_governor(data,
1809 CPUFREQ_GOV_POLICY_EXIT);
955ef483 1810 lock_policy_rwsem_write(policy->cpu);
7bd353a9 1811 }
1da177e4
LT
1812
1813 /* start new governor */
1814 data->governor = policy->governor;
7bd353a9 1815 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
955ef483 1816 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
7bd353a9 1817 failed = 0;
955ef483
VK
1818 } else {
1819 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1820 __cpufreq_governor(data,
1821 CPUFREQ_GOV_POLICY_EXIT);
955ef483
VK
1822 lock_policy_rwsem_write(policy->cpu);
1823 }
7bd353a9
VK
1824 }
1825
1826 if (failed) {
1da177e4 1827 /* new governor failed, so re-start old one */
2d06d8c4 1828 pr_debug("starting governor %s failed\n",
e08f5f5b 1829 data->governor->name);
1da177e4
LT
1830 if (old_gov) {
1831 data->governor = old_gov;
7bd353a9
VK
1832 __cpufreq_governor(data,
1833 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1834 __cpufreq_governor(data,
1835 CPUFREQ_GOV_START);
1da177e4
LT
1836 }
1837 ret = -EINVAL;
1838 goto error_out;
1839 }
1840 /* might be a policy change, too, so fall through */
1841 }
2d06d8c4 1842 pr_debug("governor: change or update limits\n");
1da177e4
LT
1843 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1844 }
1845
7d5e350f 1846error_out:
1da177e4
LT
1847 return ret;
1848}
1849
1da177e4
LT
1850/**
1851 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1852 * @cpu: CPU which shall be re-evaluated
1853 *
25985edc 1854 * Useful for policy notifiers which have different necessities
1da177e4
LT
1855 * at different times.
1856 */
1857int cpufreq_update_policy(unsigned int cpu)
1858{
1859 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1860 struct cpufreq_policy policy;
f1829e4a 1861 int ret;
1da177e4 1862
f1829e4a
JL
1863 if (!data) {
1864 ret = -ENODEV;
1865 goto no_policy;
1866 }
1da177e4 1867
f1829e4a
JL
1868 if (unlikely(lock_policy_rwsem_write(cpu))) {
1869 ret = -EINVAL;
1870 goto fail;
1871 }
1da177e4 1872
2d06d8c4 1873 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1874 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1875 policy.min = data->user_policy.min;
1876 policy.max = data->user_policy.max;
1877 policy.policy = data->user_policy.policy;
1878 policy.governor = data->user_policy.governor;
1879
0961dd0d
TR
1880 /* BIOS might change freq behind our back
1881 -> ask driver for current freq and notify governors about a change */
1c3d85dd
RW
1882 if (cpufreq_driver->get) {
1883 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1884 if (!data->cur) {
2d06d8c4 1885 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1886 data->cur = policy.cur;
1887 } else {
1c3d85dd 1888 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
1889 cpufreq_out_of_sync(cpu, data->cur,
1890 policy.cur);
a85f7bd3 1891 }
0961dd0d
TR
1892 }
1893
1da177e4
LT
1894 ret = __cpufreq_set_policy(data, &policy);
1895
5a01f2e8
VP
1896 unlock_policy_rwsem_write(cpu);
1897
f1829e4a 1898fail:
1da177e4 1899 cpufreq_cpu_put(data);
f1829e4a 1900no_policy:
1da177e4
LT
1901 return ret;
1902}
1903EXPORT_SYMBOL(cpufreq_update_policy);
1904
dd184a01 1905static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1906 unsigned long action, void *hcpu)
1907{
1908 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1909 struct device *dev;
c32b6b8e 1910
8a25a2fd
KS
1911 dev = get_cpu_device(cpu);
1912 if (dev) {
c32b6b8e
AR
1913 switch (action) {
1914 case CPU_ONLINE:
9d3ce4af 1915 case CPU_ONLINE_FROZEN:
8a25a2fd 1916 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1917 break;
1918 case CPU_DOWN_PREPARE:
9d3ce4af 1919 case CPU_DOWN_PREPARE_FROZEN:
8a25a2fd 1920 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1921 break;
5a01f2e8 1922 case CPU_DOWN_FAILED:
9d3ce4af 1923 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1924 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1925 break;
1926 }
1927 }
1928 return NOTIFY_OK;
1929}
1930
9c36f746 1931static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1932 .notifier_call = cpufreq_cpu_callback,
1933};
1da177e4
LT
1934
1935/*********************************************************************
1936 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1937 *********************************************************************/
1938
1939/**
1940 * cpufreq_register_driver - register a CPU Frequency driver
1941 * @driver_data: A struct cpufreq_driver containing the values#
1942 * submitted by the CPU Frequency driver.
1943 *
32ee8c3e 1944 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1945 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1946 * (and isn't unregistered in the meantime).
1da177e4
LT
1947 *
1948 */
221dee28 1949int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1950{
1951 unsigned long flags;
1952 int ret;
1953
a7b422cd
KRW
1954 if (cpufreq_disabled())
1955 return -ENODEV;
1956
1da177e4
LT
1957 if (!driver_data || !driver_data->verify || !driver_data->init ||
1958 ((!driver_data->setpolicy) && (!driver_data->target)))
1959 return -EINVAL;
1960
2d06d8c4 1961 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1962
1963 if (driver_data->setpolicy)
1964 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1965
0d1857a1 1966 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1967 if (cpufreq_driver) {
0d1857a1 1968 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1969 return -EBUSY;
1970 }
1c3d85dd 1971 cpufreq_driver = driver_data;
0d1857a1 1972 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1973
8a25a2fd 1974 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1975 if (ret)
1976 goto err_null_driver;
1da177e4 1977
1c3d85dd 1978 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1979 int i;
1980 ret = -ENODEV;
1981
1982 /* check for at least one working CPU */
7a6aedfa
MT
1983 for (i = 0; i < nr_cpu_ids; i++)
1984 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1985 ret = 0;
7a6aedfa
MT
1986 break;
1987 }
1da177e4
LT
1988
1989 /* if all ->init() calls failed, unregister */
1990 if (ret) {
2d06d8c4 1991 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1992 driver_data->name);
8a25a2fd 1993 goto err_if_unreg;
1da177e4
LT
1994 }
1995 }
1996
8f5bc2ab 1997 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1998 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1999
8f5bc2ab 2000 return 0;
8a25a2fd
KS
2001err_if_unreg:
2002 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2003err_null_driver:
0d1857a1 2004 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2005 cpufreq_driver = NULL;
0d1857a1 2006 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2007 return ret;
1da177e4
LT
2008}
2009EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2010
2011
2012/**
2013 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2014 *
32ee8c3e 2015 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2016 * the right to do so, i.e. if you have succeeded in initialising before!
2017 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2018 * currently not initialised.
2019 */
221dee28 2020int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2021{
2022 unsigned long flags;
2023
1c3d85dd 2024 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2025 return -EINVAL;
1da177e4 2026
2d06d8c4 2027 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2028
8a25a2fd 2029 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2030 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2031
0d1857a1 2032 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2033 cpufreq_driver = NULL;
0d1857a1 2034 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2035
2036 return 0;
2037}
2038EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2039
2040static int __init cpufreq_core_init(void)
2041{
2042 int cpu;
2043
a7b422cd
KRW
2044 if (cpufreq_disabled())
2045 return -ENODEV;
2046
5a01f2e8 2047 for_each_possible_cpu(cpu) {
f1625066 2048 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2049 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2050 }
8aa84ad8 2051
8a25a2fd 2052 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 2053 BUG_ON(!cpufreq_global_kobject);
e00e56df 2054 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2055
5a01f2e8
VP
2056 return 0;
2057}
5a01f2e8 2058core_initcall(cpufreq_core_init);