Merge tag 'v3.10.67' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
6fa3eb70 20#include <asm/cputime.h>
1da177e4 21#include <linux/kernel.h>
6fa3eb70 22#include <linux/kernel_stat.h>
1da177e4
LT
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/notifier.h>
26#include <linux/cpufreq.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/spinlock.h>
6fa3eb70 30#include <linux/tick.h>
1da177e4
LT
31#include <linux/device.h>
32#include <linux/slab.h>
33#include <linux/cpu.h>
34#include <linux/completion.h>
3fc54d37 35#include <linux/mutex.h>
e00e56df 36#include <linux/syscore_ops.h>
1da177e4 37
6f4f2723
TR
38#include <trace/events/power.h>
39
1da177e4 40/**
cd878479 41 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
42 * level driver of CPUFreq support, and its spinlock. This lock
43 * also protects the cpufreq_cpu_data array.
44 */
1c3d85dd 45static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 46static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
47#ifdef CONFIG_HOTPLUG_CPU
48/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 49static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 50#endif
0d1857a1 51static DEFINE_RWLOCK(cpufreq_driver_lock);
ba17ca46 52static DEFINE_MUTEX(cpufreq_governor_lock);
1da177e4 53
5a01f2e8
VP
54/*
55 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
56 * all cpufreq/hotplug/workqueue/etc related lock issues.
57 *
58 * The rules for this semaphore:
59 * - Any routine that wants to read from the policy structure will
60 * do a down_read on this semaphore.
61 * - Any routine that will write to the policy structure and/or may take away
62 * the policy altogether (eg. CPU hotplug), will hold this lock in write
63 * mode before doing so.
64 *
65 * Additional rules:
5a01f2e8
VP
66 * - Governor routines that can be called in cpufreq hotplug path should not
67 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
68 * - Lock should not be held across
69 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 70 */
f1625066 71static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
72static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
73
74#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 75static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 76{ \
f1625066 77 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
78 BUG_ON(policy_cpu == -1); \
79 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
80 \
81 return 0; \
82}
83
84lock_policy_rwsem(read, cpu);
5a01f2e8 85lock_policy_rwsem(write, cpu);
5a01f2e8 86
fa1d8af4
VK
87#define unlock_policy_rwsem(mode, cpu) \
88static void unlock_policy_rwsem_##mode(int cpu) \
89{ \
90 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
91 BUG_ON(policy_cpu == -1); \
92 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 93}
5a01f2e8 94
fa1d8af4
VK
95unlock_policy_rwsem(read, cpu);
96unlock_policy_rwsem(write, cpu);
5a01f2e8 97
1da177e4 98/* internal prototypes */
29464f28
DJ
99static int __cpufreq_governor(struct cpufreq_policy *policy,
100 unsigned int event);
5a01f2e8 101static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 102static void handle_update(struct work_struct *work);
1da177e4
LT
103
104/**
32ee8c3e
DJ
105 * Two notifier lists: the "policy" list is involved in the
106 * validation process for a new CPU frequency policy; the
1da177e4
LT
107 * "transition" list for kernel code that needs to handle
108 * changes to devices when the CPU clock speed changes.
109 * The mutex locks both lists.
110 */
e041c683 111static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 112static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 113
74212ca4 114static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
115static int __init init_cpufreq_transition_notifier_list(void)
116{
117 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 118 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
119 return 0;
120}
b3438f82 121pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 122
a7b422cd 123static int off __read_mostly;
da584455 124static int cpufreq_disabled(void)
a7b422cd
KRW
125{
126 return off;
127}
128void disable_cpufreq(void)
129{
130 off = 1;
131}
1da177e4 132static LIST_HEAD(cpufreq_governor_list);
29464f28 133static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 134
4d5dcc42
VK
135bool have_governor_per_policy(void)
136{
1c3d85dd 137 return cpufreq_driver->have_governor_per_policy;
4d5dcc42 138}
6fa3eb70
S
139EXPORT_SYMBOL_GPL(have_governor_per_policy);
140
141struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
142{
143 if (have_governor_per_policy())
144 return &policy->kobj;
145 else
146 return cpufreq_global_kobject;
147}
148EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
149
150static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
151{
152 u64 idle_time;
153 u64 cur_wall_time;
154 u64 busy_time;
155
156 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
157
158 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
159 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
160 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
164
165 idle_time = cur_wall_time - busy_time;
166 if (wall)
167 *wall = cputime_to_usecs(cur_wall_time);
168
169 return cputime_to_usecs(idle_time);
170}
171
172u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
173{
174 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
175
176 if (idle_time == -1ULL)
177 return get_cpu_idle_time_jiffy(cpu, wall);
178 else if (!io_busy)
179 idle_time += get_cpu_iowait_time_us(cpu, wall);
180
181 return idle_time;
182}
183EXPORT_SYMBOL_GPL(get_cpu_idle_time);
4d5dcc42 184
a9144436 185static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
186{
187 struct cpufreq_policy *data;
188 unsigned long flags;
189
7a6aedfa 190 if (cpu >= nr_cpu_ids)
1da177e4
LT
191 goto err_out;
192
193 /* get the cpufreq driver */
1c3d85dd 194 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 195
1c3d85dd 196 if (!cpufreq_driver)
1da177e4
LT
197 goto err_out_unlock;
198
1c3d85dd 199 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
200 goto err_out_unlock;
201
202
203 /* get the CPU */
7a6aedfa 204 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
205
206 if (!data)
207 goto err_out_put_module;
208
a9144436 209 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
210 goto err_out_put_module;
211
0d1857a1 212 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
213 return data;
214
7d5e350f 215err_out_put_module:
1c3d85dd 216 module_put(cpufreq_driver->owner);
5800043b 217err_out_unlock:
1c3d85dd 218 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 219err_out:
1da177e4
LT
220 return NULL;
221}
a9144436
SB
222
223struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
224{
d5aaffa9
DB
225 if (cpufreq_disabled())
226 return NULL;
227
a9144436
SB
228 return __cpufreq_cpu_get(cpu, false);
229}
1da177e4
LT
230EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
231
a9144436
SB
232static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
233{
234 return __cpufreq_cpu_get(cpu, true);
235}
236
237static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
238{
239 if (!sysfs)
240 kobject_put(&data->kobj);
1c3d85dd 241 module_put(cpufreq_driver->owner);
a9144436 242}
7d5e350f 243
1da177e4
LT
244void cpufreq_cpu_put(struct cpufreq_policy *data)
245{
d5aaffa9
DB
246 if (cpufreq_disabled())
247 return;
248
a9144436 249 __cpufreq_cpu_put(data, false);
1da177e4
LT
250}
251EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
252
a9144436
SB
253static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
254{
255 __cpufreq_cpu_put(data, true);
256}
1da177e4 257
1da177e4
LT
258/*********************************************************************
259 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
260 *********************************************************************/
261
262/**
263 * adjust_jiffies - adjust the system "loops_per_jiffy"
264 *
265 * This function alters the system "loops_per_jiffy" for the clock
266 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 267 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
268 * per-CPU loops_per_jiffy value wherever possible.
269 */
270#ifndef CONFIG_SMP
271static unsigned long l_p_j_ref;
272static unsigned int l_p_j_ref_freq;
273
858119e1 274static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
275{
276 if (ci->flags & CPUFREQ_CONST_LOOPS)
277 return;
278
279 if (!l_p_j_ref_freq) {
280 l_p_j_ref = loops_per_jiffy;
281 l_p_j_ref_freq = ci->old;
2d06d8c4 282 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 283 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 284 }
d08de0c1 285 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 286 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
287 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
288 ci->new);
2d06d8c4 289 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 290 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
291 }
292}
293#else
e08f5f5b
GS
294static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
295{
296 return;
297}
1da177e4
LT
298#endif
299
300
b43a7ffb
VK
301void __cpufreq_notify_transition(struct cpufreq_policy *policy,
302 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
303{
304 BUG_ON(irqs_disabled());
305
d5aaffa9
DB
306 if (cpufreq_disabled())
307 return;
308
1c3d85dd 309 freqs->flags = cpufreq_driver->flags;
2d06d8c4 310 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 311 state, freqs->new);
1da177e4 312
1da177e4 313 switch (state) {
e4472cb3 314
1da177e4 315 case CPUFREQ_PRECHANGE:
32ee8c3e 316 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
317 * which is not equal to what the cpufreq core thinks is
318 * "old frequency".
1da177e4 319 */
1c3d85dd 320 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
321 if ((policy) && (policy->cpu == freqs->cpu) &&
322 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 323 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
324 " %u, cpufreq assumed %u kHz.\n",
325 freqs->old, policy->cur);
326 freqs->old = policy->cur;
1da177e4
LT
327 }
328 }
b4dfdbb3 329 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 330 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
331 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
332 break;
e4472cb3 333
1da177e4
LT
334 case CPUFREQ_POSTCHANGE:
335 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 336 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 337 (unsigned long)freqs->cpu);
25e41933 338 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 339 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 340 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
341 if (likely(policy) && likely(policy->cpu == freqs->cpu))
342 policy->cur = freqs->new;
1da177e4
LT
343 break;
344 }
1da177e4 345}
b43a7ffb
VK
346/**
347 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
348 * on frequency transition.
349 *
350 * This function calls the transition notifiers and the "adjust_jiffies"
351 * function. It is called twice on all CPU frequency changes that have
352 * external effects.
353 */
354void cpufreq_notify_transition(struct cpufreq_policy *policy,
355 struct cpufreq_freqs *freqs, unsigned int state)
356{
357 for_each_cpu(freqs->cpu, policy->cpus)
358 __cpufreq_notify_transition(policy, freqs, state);
359}
1da177e4
LT
360EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
361
362
363
364/*********************************************************************
365 * SYSFS INTERFACE *
366 *********************************************************************/
367
3bcb09a3
JF
368static struct cpufreq_governor *__find_governor(const char *str_governor)
369{
370 struct cpufreq_governor *t;
371
372 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 373 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
374 return t;
375
376 return NULL;
377}
378
1da177e4
LT
379/**
380 * cpufreq_parse_governor - parse a governor string
381 */
905d77cd 382static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
383 struct cpufreq_governor **governor)
384{
3bcb09a3 385 int err = -EINVAL;
1c3d85dd
RW
386
387 if (!cpufreq_driver)
3bcb09a3
JF
388 goto out;
389
1c3d85dd 390 if (cpufreq_driver->setpolicy) {
1da177e4
LT
391 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
392 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 393 err = 0;
e08f5f5b
GS
394 } else if (!strnicmp(str_governor, "powersave",
395 CPUFREQ_NAME_LEN)) {
1da177e4 396 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 397 err = 0;
1da177e4 398 }
1c3d85dd 399 } else if (cpufreq_driver->target) {
1da177e4 400 struct cpufreq_governor *t;
3bcb09a3 401
3fc54d37 402 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
403
404 t = __find_governor(str_governor);
405
ea714970 406 if (t == NULL) {
1a8e1463 407 int ret;
ea714970 408
1a8e1463
KC
409 mutex_unlock(&cpufreq_governor_mutex);
410 ret = request_module("cpufreq_%s", str_governor);
411 mutex_lock(&cpufreq_governor_mutex);
ea714970 412
1a8e1463
KC
413 if (ret == 0)
414 t = __find_governor(str_governor);
ea714970
JF
415 }
416
3bcb09a3
JF
417 if (t != NULL) {
418 *governor = t;
419 err = 0;
1da177e4 420 }
3bcb09a3 421
3fc54d37 422 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 423 }
29464f28 424out:
3bcb09a3 425 return err;
1da177e4 426}
1da177e4
LT
427
428
1da177e4 429/**
e08f5f5b
GS
430 * cpufreq_per_cpu_attr_read() / show_##file_name() -
431 * print out cpufreq information
1da177e4
LT
432 *
433 * Write out information from cpufreq_driver->policy[cpu]; object must be
434 * "unsigned int".
435 */
436
32ee8c3e
DJ
437#define show_one(file_name, object) \
438static ssize_t show_##file_name \
905d77cd 439(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 440{ \
29464f28 441 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
442}
443
444show_one(cpuinfo_min_freq, cpuinfo.min_freq);
445show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 446show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
447show_one(scaling_min_freq, min);
448show_one(scaling_max_freq, max);
449show_one(scaling_cur_freq, cur);
450
e08f5f5b
GS
451static int __cpufreq_set_policy(struct cpufreq_policy *data,
452 struct cpufreq_policy *policy);
7970e08b 453
1da177e4
LT
454/**
455 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
456 */
457#define store_one(file_name, object) \
458static ssize_t store_##file_name \
905d77cd 459(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 460{ \
f55c9c26 461 unsigned int ret; \
1da177e4
LT
462 struct cpufreq_policy new_policy; \
463 \
464 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
465 if (ret) \
466 return -EINVAL; \
467 \
29464f28 468 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
469 if (ret != 1) \
470 return -EINVAL; \
471 \
7970e08b
TR
472 ret = __cpufreq_set_policy(policy, &new_policy); \
473 policy->user_policy.object = policy->object; \
1da177e4
LT
474 \
475 return ret ? ret : count; \
476}
477
29464f28
DJ
478store_one(scaling_min_freq, min);
479store_one(scaling_max_freq, max);
1da177e4
LT
480
481/**
482 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
483 */
905d77cd
DJ
484static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
485 char *buf)
1da177e4 486{
5a01f2e8 487 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
488 if (!cur_freq)
489 return sprintf(buf, "<unknown>");
490 return sprintf(buf, "%u\n", cur_freq);
491}
492
493
494/**
495 * show_scaling_governor - show the current policy for the specified CPU
496 */
905d77cd 497static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 498{
29464f28 499 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
500 return sprintf(buf, "powersave\n");
501 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
502 return sprintf(buf, "performance\n");
503 else if (policy->governor)
4b972f0b 504 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 505 policy->governor->name);
1da177e4
LT
506 return -EINVAL;
507}
508
509
510/**
511 * store_scaling_governor - store policy for the specified CPU
512 */
905d77cd
DJ
513static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
514 const char *buf, size_t count)
1da177e4 515{
f55c9c26 516 unsigned int ret;
1da177e4
LT
517 char str_governor[16];
518 struct cpufreq_policy new_policy;
519
520 ret = cpufreq_get_policy(&new_policy, policy->cpu);
521 if (ret)
522 return ret;
523
29464f28 524 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
525 if (ret != 1)
526 return -EINVAL;
527
e08f5f5b
GS
528 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
529 &new_policy.governor))
1da177e4
LT
530 return -EINVAL;
531
7970e08b
TR
532 /* Do not use cpufreq_set_policy here or the user_policy.max
533 will be wrongly overridden */
7970e08b
TR
534 ret = __cpufreq_set_policy(policy, &new_policy);
535
536 policy->user_policy.policy = policy->policy;
537 policy->user_policy.governor = policy->governor;
7970e08b 538
e08f5f5b
GS
539 if (ret)
540 return ret;
541 else
542 return count;
1da177e4
LT
543}
544
545/**
546 * show_scaling_driver - show the cpufreq driver currently loaded
547 */
905d77cd 548static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 549{
1c3d85dd 550 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
551}
552
553/**
554 * show_scaling_available_governors - show the available CPUfreq governors
555 */
905d77cd
DJ
556static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
557 char *buf)
1da177e4
LT
558{
559 ssize_t i = 0;
560 struct cpufreq_governor *t;
561
1c3d85dd 562 if (!cpufreq_driver->target) {
1da177e4
LT
563 i += sprintf(buf, "performance powersave");
564 goto out;
565 }
566
567 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
568 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
569 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 570 goto out;
4b972f0b 571 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 572 }
7d5e350f 573out:
1da177e4
LT
574 i += sprintf(&buf[i], "\n");
575 return i;
576}
e8628dd0 577
835481d9 578static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
579{
580 ssize_t i = 0;
581 unsigned int cpu;
582
835481d9 583 for_each_cpu(cpu, mask) {
1da177e4
LT
584 if (i)
585 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
586 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
587 if (i >= (PAGE_SIZE - 5))
29464f28 588 break;
1da177e4
LT
589 }
590 i += sprintf(&buf[i], "\n");
591 return i;
592}
593
e8628dd0
DW
594/**
595 * show_related_cpus - show the CPUs affected by each transition even if
596 * hw coordination is in use
597 */
598static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
599{
e8628dd0
DW
600 return show_cpus(policy->related_cpus, buf);
601}
602
603/**
604 * show_affected_cpus - show the CPUs affected by each transition
605 */
606static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
607{
608 return show_cpus(policy->cpus, buf);
609}
610
9e76988e 611static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 612 const char *buf, size_t count)
9e76988e
VP
613{
614 unsigned int freq = 0;
615 unsigned int ret;
616
879000f9 617 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
618 return -EINVAL;
619
620 ret = sscanf(buf, "%u", &freq);
621 if (ret != 1)
622 return -EINVAL;
623
624 policy->governor->store_setspeed(policy, freq);
625
626 return count;
627}
628
629static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
630{
879000f9 631 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
632 return sprintf(buf, "<unsupported>\n");
633
634 return policy->governor->show_setspeed(policy, buf);
635}
1da177e4 636
e2f74f35 637/**
8bf1ac72 638 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
639 */
640static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
641{
642 unsigned int limit;
643 int ret;
1c3d85dd
RW
644 if (cpufreq_driver->bios_limit) {
645 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
646 if (!ret)
647 return sprintf(buf, "%u\n", limit);
648 }
649 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
650}
651
6dad2a29
BP
652cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
653cpufreq_freq_attr_ro(cpuinfo_min_freq);
654cpufreq_freq_attr_ro(cpuinfo_max_freq);
655cpufreq_freq_attr_ro(cpuinfo_transition_latency);
656cpufreq_freq_attr_ro(scaling_available_governors);
657cpufreq_freq_attr_ro(scaling_driver);
658cpufreq_freq_attr_ro(scaling_cur_freq);
659cpufreq_freq_attr_ro(bios_limit);
660cpufreq_freq_attr_ro(related_cpus);
661cpufreq_freq_attr_ro(affected_cpus);
662cpufreq_freq_attr_rw(scaling_min_freq);
663cpufreq_freq_attr_rw(scaling_max_freq);
664cpufreq_freq_attr_rw(scaling_governor);
665cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 666
905d77cd 667static struct attribute *default_attrs[] = {
1da177e4
LT
668 &cpuinfo_min_freq.attr,
669 &cpuinfo_max_freq.attr,
ed129784 670 &cpuinfo_transition_latency.attr,
1da177e4
LT
671 &scaling_min_freq.attr,
672 &scaling_max_freq.attr,
673 &affected_cpus.attr,
e8628dd0 674 &related_cpus.attr,
1da177e4
LT
675 &scaling_governor.attr,
676 &scaling_driver.attr,
677 &scaling_available_governors.attr,
9e76988e 678 &scaling_setspeed.attr,
1da177e4
LT
679 NULL
680};
681
8aa84ad8
TR
682struct kobject *cpufreq_global_kobject;
683EXPORT_SYMBOL(cpufreq_global_kobject);
684
29464f28
DJ
685#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
686#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 687
29464f28 688static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 689{
905d77cd
DJ
690 struct cpufreq_policy *policy = to_policy(kobj);
691 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 692 ssize_t ret = -EINVAL;
a9144436 693 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 694 if (!policy)
0db4a8a9 695 goto no_policy;
5a01f2e8
VP
696
697 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 698 goto fail;
5a01f2e8 699
e08f5f5b
GS
700 if (fattr->show)
701 ret = fattr->show(policy, buf);
702 else
703 ret = -EIO;
704
5a01f2e8 705 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 706fail:
a9144436 707 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 708no_policy:
1da177e4
LT
709 return ret;
710}
711
905d77cd
DJ
712static ssize_t store(struct kobject *kobj, struct attribute *attr,
713 const char *buf, size_t count)
1da177e4 714{
905d77cd
DJ
715 struct cpufreq_policy *policy = to_policy(kobj);
716 struct freq_attr *fattr = to_attr(attr);
a07530b4 717 ssize_t ret = -EINVAL;
a9144436 718 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 719 if (!policy)
a07530b4 720 goto no_policy;
5a01f2e8
VP
721
722 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 723 goto fail;
5a01f2e8 724
e08f5f5b
GS
725 if (fattr->store)
726 ret = fattr->store(policy, buf, count);
727 else
728 ret = -EIO;
729
5a01f2e8 730 unlock_policy_rwsem_write(policy->cpu);
a07530b4 731fail:
a9144436 732 cpufreq_cpu_put_sysfs(policy);
a07530b4 733no_policy:
1da177e4
LT
734 return ret;
735}
736
905d77cd 737static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 738{
905d77cd 739 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 740 pr_debug("last reference is dropped\n");
1da177e4
LT
741 complete(&policy->kobj_unregister);
742}
743
52cf25d0 744static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
745 .show = show,
746 .store = store,
747};
748
749static struct kobj_type ktype_cpufreq = {
750 .sysfs_ops = &sysfs_ops,
751 .default_attrs = default_attrs,
752 .release = cpufreq_sysfs_release,
753};
754
19d6f7ec 755/* symlink affected CPUs */
cf3289d0
AC
756static int cpufreq_add_dev_symlink(unsigned int cpu,
757 struct cpufreq_policy *policy)
19d6f7ec
DJ
758{
759 unsigned int j;
760 int ret = 0;
761
762 for_each_cpu(j, policy->cpus) {
763 struct cpufreq_policy *managed_policy;
8a25a2fd 764 struct device *cpu_dev;
19d6f7ec
DJ
765
766 if (j == cpu)
767 continue;
19d6f7ec 768
2d06d8c4 769 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 770 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
771 cpu_dev = get_cpu_device(j);
772 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
773 "cpufreq");
774 if (ret) {
775 cpufreq_cpu_put(managed_policy);
776 return ret;
777 }
778 }
779 return ret;
780}
781
cf3289d0
AC
782static int cpufreq_add_dev_interface(unsigned int cpu,
783 struct cpufreq_policy *policy,
8a25a2fd 784 struct device *dev)
909a694e 785{
ecf7e461 786 struct cpufreq_policy new_policy;
909a694e
DJ
787 struct freq_attr **drv_attr;
788 unsigned long flags;
789 int ret = 0;
790 unsigned int j;
791
792 /* prepare interface data */
793 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 794 &dev->kobj, "cpufreq");
909a694e
DJ
795 if (ret)
796 return ret;
797
798 /* set up files for this cpu device */
1c3d85dd 799 drv_attr = cpufreq_driver->attr;
909a694e
DJ
800 while ((drv_attr) && (*drv_attr)) {
801 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
802 if (ret)
1c3d85dd 803 goto err_out_kobj_put;
909a694e
DJ
804 drv_attr++;
805 }
1c3d85dd 806 if (cpufreq_driver->get) {
909a694e
DJ
807 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
808 if (ret)
1c3d85dd 809 goto err_out_kobj_put;
909a694e 810 }
1c3d85dd 811 if (cpufreq_driver->target) {
909a694e
DJ
812 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
813 if (ret)
1c3d85dd 814 goto err_out_kobj_put;
909a694e 815 }
1c3d85dd 816 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
817 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
818 if (ret)
1c3d85dd 819 goto err_out_kobj_put;
e2f74f35 820 }
909a694e 821
0d1857a1 822 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 823 for_each_cpu(j, policy->cpus) {
909a694e 824 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 825 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 826 }
0d1857a1 827 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
828
829 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
830 if (ret)
831 goto err_out_kobj_put;
832
833 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
834 /* assure that the starting sequence is run in __cpufreq_set_policy */
835 policy->governor = NULL;
836
837 /* set default policy */
838 ret = __cpufreq_set_policy(policy, &new_policy);
839 policy->user_policy.policy = policy->policy;
840 policy->user_policy.governor = policy->governor;
841
842 if (ret) {
2d06d8c4 843 pr_debug("setting policy failed\n");
1c3d85dd
RW
844 if (cpufreq_driver->exit)
845 cpufreq_driver->exit(policy);
ecf7e461 846 }
909a694e
DJ
847 return ret;
848
849err_out_kobj_put:
850 kobject_put(&policy->kobj);
851 wait_for_completion(&policy->kobj_unregister);
852 return ret;
853}
854
fcf80582
VK
855#ifdef CONFIG_HOTPLUG_CPU
856static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
857 struct device *dev)
858{
859 struct cpufreq_policy *policy;
1c3d85dd 860 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
861 unsigned long flags;
862
863 policy = cpufreq_cpu_get(sibling);
864 WARN_ON(!policy);
865
820c6ca2
VK
866 if (has_target)
867 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 868
2eaa3e2d
VK
869 lock_policy_rwsem_write(sibling);
870
0d1857a1 871 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 872
fcf80582 873 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 874 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 875 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 876 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 877
2eaa3e2d
VK
878 unlock_policy_rwsem_write(sibling);
879
820c6ca2
VK
880 if (has_target) {
881 __cpufreq_governor(policy, CPUFREQ_GOV_START);
882 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
883 }
fcf80582 884
fcf80582
VK
885 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
886 if (ret) {
887 cpufreq_cpu_put(policy);
888 return ret;
889 }
890
891 return 0;
892}
893#endif
1da177e4
LT
894
895/**
896 * cpufreq_add_dev - add a CPU device
897 *
32ee8c3e 898 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
899 *
900 * The Oracle says: try running cpufreq registration/unregistration concurrently
901 * with with cpu hotplugging and all hell will break loose. Tried to clean this
902 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 903 */
8a25a2fd 904static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 905{
fcf80582 906 unsigned int j, cpu = dev->id;
65922465 907 int ret = -ENOMEM;
1da177e4 908 struct cpufreq_policy *policy;
1da177e4 909 unsigned long flags;
90e41bac 910#ifdef CONFIG_HOTPLUG_CPU
fcf80582 911 struct cpufreq_governor *gov;
90e41bac
PB
912 int sibling;
913#endif
1da177e4 914
c32b6b8e
AR
915 if (cpu_is_offline(cpu))
916 return 0;
917
2d06d8c4 918 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
919
920#ifdef CONFIG_SMP
921 /* check whether a different CPU already registered this
922 * CPU because it is in the same boat. */
923 policy = cpufreq_cpu_get(cpu);
924 if (unlikely(policy)) {
8ff69732 925 cpufreq_cpu_put(policy);
1da177e4
LT
926 return 0;
927 }
fcf80582
VK
928
929#ifdef CONFIG_HOTPLUG_CPU
930 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 931 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
932 for_each_online_cpu(sibling) {
933 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 934 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 935 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 936 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 937 }
fcf80582 938 }
0d1857a1 939 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 940#endif
1da177e4
LT
941#endif
942
1c3d85dd 943 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
944 ret = -EINVAL;
945 goto module_out;
946 }
947
e98df50c 948 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 949 if (!policy)
1da177e4 950 goto nomem_out;
059019a3
DJ
951
952 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 953 goto err_free_policy;
059019a3
DJ
954
955 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 956 goto err_free_cpumask;
1da177e4
LT
957
958 policy->cpu = cpu;
65922465 959 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 960 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 961
5a01f2e8 962 /* Initially set CPU itself as the policy_cpu */
f1625066 963 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 964
1da177e4 965 init_completion(&policy->kobj_unregister);
65f27f38 966 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
967
968 /* call driver. From then on the cpufreq must be able
969 * to accept all calls to ->verify and ->setpolicy for this CPU
970 */
1c3d85dd 971 ret = cpufreq_driver->init(policy);
1da177e4 972 if (ret) {
2d06d8c4 973 pr_debug("initialization failed\n");
2eaa3e2d 974 goto err_set_policy_cpu;
1da177e4 975 }
643ae6e8 976
fcf80582
VK
977 /* related cpus should atleast have policy->cpus */
978 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
979
643ae6e8
VK
980 /*
981 * affected cpus must always be the one, which are online. We aren't
982 * managing offline cpus here.
983 */
984 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
985
187d9f4e
MC
986 policy->user_policy.min = policy->min;
987 policy->user_policy.max = policy->max;
1da177e4 988
a1531acd
TR
989 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
990 CPUFREQ_START, policy);
991
fcf80582
VK
992#ifdef CONFIG_HOTPLUG_CPU
993 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
994 if (gov) {
995 policy->governor = gov;
996 pr_debug("Restoring governor %s for cpu %d\n",
997 policy->governor->name, cpu);
4bfa042c 998 }
fcf80582 999#endif
1da177e4 1000
8a25a2fd 1001 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
1002 if (ret)
1003 goto err_out_unregister;
8ff69732 1004
038c5b3e 1005 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 1006 module_put(cpufreq_driver->owner);
2d06d8c4 1007 pr_debug("initialization complete\n");
87c32271 1008
1da177e4
LT
1009 return 0;
1010
1da177e4 1011err_out_unregister:
0d1857a1 1012 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1013 for_each_cpu(j, policy->cpus)
7a6aedfa 1014 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1015 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1016
c10997f6 1017 kobject_put(&policy->kobj);
1da177e4
LT
1018 wait_for_completion(&policy->kobj_unregister);
1019
2eaa3e2d
VK
1020err_set_policy_cpu:
1021 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 1022 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
1023err_free_cpumask:
1024 free_cpumask_var(policy->cpus);
1025err_free_policy:
1da177e4 1026 kfree(policy);
1da177e4 1027nomem_out:
1c3d85dd 1028 module_put(cpufreq_driver->owner);
c32b6b8e 1029module_out:
1da177e4
LT
1030 return ret;
1031}
1032
b8eed8af
VK
1033static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1034{
1035 int j;
1036
1037 policy->last_cpu = policy->cpu;
1038 policy->cpu = cpu;
1039
3361b7b1 1040 for_each_cpu(j, policy->cpus)
b8eed8af 1041 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1042
1043#ifdef CONFIG_CPU_FREQ_TABLE
1044 cpufreq_frequency_table_update_policy_cpu(policy);
1045#endif
1046 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1047 CPUFREQ_UPDATE_POLICY_CPU, policy);
1048}
1da177e4
LT
1049
1050/**
5a01f2e8 1051 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1052 *
1053 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1054 * Caller should already have policy_rwsem in write mode for this CPU.
1055 * This routine frees the rwsem before returning.
1da177e4 1056 */
8a25a2fd 1057static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1058{
b8eed8af 1059 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1060 unsigned long flags;
1061 struct cpufreq_policy *data;
499bca9b
AW
1062 struct kobject *kobj;
1063 struct completion *cmp;
8a25a2fd 1064 struct device *cpu_dev;
1da177e4 1065
b8eed8af 1066 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1067
0d1857a1 1068 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1069
7a6aedfa 1070 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1071 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1072
0d1857a1 1073 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1074
1075 if (!data) {
b8eed8af 1076 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1077 return -EINVAL;
1078 }
1da177e4 1079
1c3d85dd 1080 if (cpufreq_driver->target)
f6a7409c 1081 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1082
084f3493 1083#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1084 if (!cpufreq_driver->setpolicy)
fa69e33f
DB
1085 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1086 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1087#endif
1088
2eaa3e2d 1089 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1090 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1091
1092 if (cpus > 1)
1093 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1094 unlock_policy_rwsem_write(cpu);
084f3493 1095
73bf0fc2
VK
1096 if (cpu != data->cpu) {
1097 sysfs_remove_link(&dev->kobj, "cpufreq");
1098 } else if (cpus > 1) {
b8eed8af
VK
1099 /* first sibling now owns the new sysfs dir */
1100 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1101 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1102 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1103 if (ret) {
1104 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1105
2eaa3e2d 1106 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1107 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1108
0d1857a1 1109 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1110 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1111 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1112
499bca9b 1113 unlock_policy_rwsem_write(cpu);
1da177e4 1114
2eaa3e2d
VK
1115 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1116 "cpufreq");
b8eed8af 1117 return -EINVAL;
1da177e4 1118 }
5a01f2e8 1119
2eaa3e2d 1120 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1121 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1122 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1123 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1124 __func__, cpu_dev->id, cpu);
1da177e4 1125 }
1da177e4 1126
b8eed8af
VK
1127 /* If cpu is last user of policy, free policy */
1128 if (cpus == 1) {
e9ef4410
RW
1129 if (cpufreq_driver->target)
1130 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1131
2eaa3e2d 1132 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1133 kobj = &data->kobj;
1134 cmp = &data->kobj_unregister;
2eaa3e2d 1135 unlock_policy_rwsem_read(cpu);
b8eed8af 1136 kobject_put(kobj);
7d26e2d5 1137
b8eed8af
VK
1138 /* we need to make sure that the underlying kobj is actually
1139 * not referenced anymore by anybody before we proceed with
1140 * unloading.
1141 */
1142 pr_debug("waiting for dropping of refcount\n");
1143 wait_for_completion(cmp);
1144 pr_debug("wait complete\n");
7d26e2d5 1145
1c3d85dd
RW
1146 if (cpufreq_driver->exit)
1147 cpufreq_driver->exit(data);
27ecddc2 1148
b8eed8af
VK
1149 free_cpumask_var(data->related_cpus);
1150 free_cpumask_var(data->cpus);
1151 kfree(data);
e9ef4410
RW
1152 } else {
1153 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1154 cpufreq_cpu_put(data);
1155 if (cpufreq_driver->target) {
1156 __cpufreq_governor(data, CPUFREQ_GOV_START);
1157 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1158 }
27ecddc2 1159 }
1da177e4 1160
2eaa3e2d 1161 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1162 return 0;
1163}
1164
1165
8a25a2fd 1166static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1167{
8a25a2fd 1168 unsigned int cpu = dev->id;
5a01f2e8 1169 int retval;
ec28297a
VP
1170
1171 if (cpu_is_offline(cpu))
1172 return 0;
1173
8a25a2fd 1174 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1175 return retval;
1176}
1177
1178
65f27f38 1179static void handle_update(struct work_struct *work)
1da177e4 1180{
65f27f38
DH
1181 struct cpufreq_policy *policy =
1182 container_of(work, struct cpufreq_policy, update);
1183 unsigned int cpu = policy->cpu;
2d06d8c4 1184 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1185 cpufreq_update_policy(cpu);
1186}
1187
1188/**
1189 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1190 * @cpu: cpu number
1191 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1192 * @new_freq: CPU frequency the CPU actually runs at
1193 *
29464f28
DJ
1194 * We adjust to current frequency first, and need to clean up later.
1195 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1196 */
e08f5f5b
GS
1197static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1198 unsigned int new_freq)
1da177e4 1199{
b43a7ffb 1200 struct cpufreq_policy *policy;
1da177e4 1201 struct cpufreq_freqs freqs;
b43a7ffb
VK
1202 unsigned long flags;
1203
1da177e4 1204
2d06d8c4 1205 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1206 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1207
1da177e4
LT
1208 freqs.old = old_freq;
1209 freqs.new = new_freq;
b43a7ffb
VK
1210
1211 read_lock_irqsave(&cpufreq_driver_lock, flags);
1212 policy = per_cpu(cpufreq_cpu_data, cpu);
1213 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1214
1215 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1216 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1217}
1218
1219
32ee8c3e 1220/**
4ab70df4 1221 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1222 * @cpu: CPU number
1223 *
1224 * This is the last known freq, without actually getting it from the driver.
1225 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1226 */
1227unsigned int cpufreq_quick_get(unsigned int cpu)
1228{
9e21ba8b 1229 struct cpufreq_policy *policy;
e08f5f5b 1230 unsigned int ret_freq = 0;
95235ca2 1231
1c3d85dd
RW
1232 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1233 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1234
1235 policy = cpufreq_cpu_get(cpu);
95235ca2 1236 if (policy) {
e08f5f5b 1237 ret_freq = policy->cur;
95235ca2
VP
1238 cpufreq_cpu_put(policy);
1239 }
1240
4d34a67d 1241 return ret_freq;
95235ca2
VP
1242}
1243EXPORT_SYMBOL(cpufreq_quick_get);
1244
3d737108
JB
1245/**
1246 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1247 * @cpu: CPU number
1248 *
1249 * Just return the max possible frequency for a given CPU.
1250 */
1251unsigned int cpufreq_quick_get_max(unsigned int cpu)
1252{
1253 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1254 unsigned int ret_freq = 0;
1255
1256 if (policy) {
1257 ret_freq = policy->max;
1258 cpufreq_cpu_put(policy);
1259 }
1260
1261 return ret_freq;
1262}
1263EXPORT_SYMBOL(cpufreq_quick_get_max);
1264
95235ca2 1265
5a01f2e8 1266static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1267{
7a6aedfa 1268 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1269 unsigned int ret_freq = 0;
5800043b 1270
1c3d85dd 1271 if (!cpufreq_driver->get)
4d34a67d 1272 return ret_freq;
1da177e4 1273
1c3d85dd 1274 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1275
e08f5f5b 1276 if (ret_freq && policy->cur &&
1c3d85dd 1277 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1278 /* verify no discrepancy between actual and
1279 saved value exists */
1280 if (unlikely(ret_freq != policy->cur)) {
1281 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1282 schedule_work(&policy->update);
1283 }
1284 }
1285
4d34a67d 1286 return ret_freq;
5a01f2e8 1287}
1da177e4 1288
5a01f2e8
VP
1289/**
1290 * cpufreq_get - get the current CPU frequency (in kHz)
1291 * @cpu: CPU number
1292 *
1293 * Get the CPU current (static) CPU frequency
1294 */
1295unsigned int cpufreq_get(unsigned int cpu)
1296{
1297 unsigned int ret_freq = 0;
1298 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1299
1300 if (!policy)
1301 goto out;
1302
1303 if (unlikely(lock_policy_rwsem_read(cpu)))
1304 goto out_policy;
1305
1306 ret_freq = __cpufreq_get(cpu);
1307
1308 unlock_policy_rwsem_read(cpu);
1da177e4 1309
5a01f2e8
VP
1310out_policy:
1311 cpufreq_cpu_put(policy);
1312out:
4d34a67d 1313 return ret_freq;
1da177e4
LT
1314}
1315EXPORT_SYMBOL(cpufreq_get);
1316
8a25a2fd
KS
1317static struct subsys_interface cpufreq_interface = {
1318 .name = "cpufreq",
1319 .subsys = &cpu_subsys,
1320 .add_dev = cpufreq_add_dev,
1321 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1322};
1323
1da177e4 1324
42d4dc3f 1325/**
e00e56df
RW
1326 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1327 *
1328 * This function is only executed for the boot processor. The other CPUs
1329 * have been put offline by means of CPU hotplug.
42d4dc3f 1330 */
e00e56df 1331static int cpufreq_bp_suspend(void)
42d4dc3f 1332{
e08f5f5b 1333 int ret = 0;
4bc5d341 1334
e00e56df 1335 int cpu = smp_processor_id();
42d4dc3f
BH
1336 struct cpufreq_policy *cpu_policy;
1337
2d06d8c4 1338 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1339
e00e56df 1340 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1341 cpu_policy = cpufreq_cpu_get(cpu);
1342 if (!cpu_policy)
e00e56df 1343 return 0;
42d4dc3f 1344
1c3d85dd
RW
1345 if (cpufreq_driver->suspend) {
1346 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1347 if (ret)
42d4dc3f
BH
1348 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1349 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1350 }
1351
42d4dc3f 1352 cpufreq_cpu_put(cpu_policy);
c9060494 1353 return ret;
42d4dc3f
BH
1354}
1355
1da177e4 1356/**
e00e56df 1357 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1358 *
1359 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1360 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1361 * restored. It will verify that the current freq is in sync with
1362 * what we believe it to be. This is a bit later than when it
1363 * should be, but nonethteless it's better than calling
1364 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1365 *
1366 * This function is only executed for the boot CPU. The other CPUs have not
1367 * been turned on yet.
1da177e4 1368 */
e00e56df 1369static void cpufreq_bp_resume(void)
1da177e4 1370{
e08f5f5b 1371 int ret = 0;
4bc5d341 1372
e00e56df 1373 int cpu = smp_processor_id();
1da177e4
LT
1374 struct cpufreq_policy *cpu_policy;
1375
2d06d8c4 1376 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1377
e00e56df 1378 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1379 cpu_policy = cpufreq_cpu_get(cpu);
1380 if (!cpu_policy)
e00e56df 1381 return;
1da177e4 1382
1c3d85dd
RW
1383 if (cpufreq_driver->resume) {
1384 ret = cpufreq_driver->resume(cpu_policy);
1da177e4
LT
1385 if (ret) {
1386 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1387 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1388 goto fail;
1da177e4
LT
1389 }
1390 }
1391
1da177e4 1392 schedule_work(&cpu_policy->update);
ce6c3997 1393
c9060494 1394fail:
1da177e4 1395 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1396}
1397
e00e56df
RW
1398static struct syscore_ops cpufreq_syscore_ops = {
1399 .suspend = cpufreq_bp_suspend,
1400 .resume = cpufreq_bp_resume,
1da177e4
LT
1401};
1402
9d95046e
BP
1403/**
1404 * cpufreq_get_current_driver - return current driver's name
1405 *
1406 * Return the name string of the currently loaded cpufreq driver
1407 * or NULL, if none.
1408 */
1409const char *cpufreq_get_current_driver(void)
1410{
1c3d85dd
RW
1411 if (cpufreq_driver)
1412 return cpufreq_driver->name;
1413
1414 return NULL;
9d95046e
BP
1415}
1416EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1417
1418/*********************************************************************
1419 * NOTIFIER LISTS INTERFACE *
1420 *********************************************************************/
1421
1422/**
1423 * cpufreq_register_notifier - register a driver with cpufreq
1424 * @nb: notifier function to register
1425 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1426 *
32ee8c3e 1427 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1428 * are notified about clock rate changes (once before and once after
1429 * the transition), or a list of drivers that are notified about
1430 * changes in cpufreq policy.
1431 *
1432 * This function may sleep, and has the same return conditions as
e041c683 1433 * blocking_notifier_chain_register.
1da177e4
LT
1434 */
1435int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1436{
1437 int ret;
1438
d5aaffa9
DB
1439 if (cpufreq_disabled())
1440 return -EINVAL;
1441
74212ca4
CEB
1442 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1443
1da177e4
LT
1444 switch (list) {
1445 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1446 ret = srcu_notifier_chain_register(
e041c683 1447 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1448 break;
1449 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1450 ret = blocking_notifier_chain_register(
1451 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1452 break;
1453 default:
1454 ret = -EINVAL;
1455 }
1da177e4
LT
1456
1457 return ret;
1458}
1459EXPORT_SYMBOL(cpufreq_register_notifier);
1460
1461
1462/**
1463 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1464 * @nb: notifier block to be unregistered
1465 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1466 *
1467 * Remove a driver from the CPU frequency notifier list.
1468 *
1469 * This function may sleep, and has the same return conditions as
e041c683 1470 * blocking_notifier_chain_unregister.
1da177e4
LT
1471 */
1472int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1473{
1474 int ret;
1475
d5aaffa9
DB
1476 if (cpufreq_disabled())
1477 return -EINVAL;
1478
1da177e4
LT
1479 switch (list) {
1480 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1481 ret = srcu_notifier_chain_unregister(
e041c683 1482 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1483 break;
1484 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1485 ret = blocking_notifier_chain_unregister(
1486 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1487 break;
1488 default:
1489 ret = -EINVAL;
1490 }
1da177e4
LT
1491
1492 return ret;
1493}
1494EXPORT_SYMBOL(cpufreq_unregister_notifier);
1495
1496
1497/*********************************************************************
1498 * GOVERNORS *
1499 *********************************************************************/
1500
1501
1502int __cpufreq_driver_target(struct cpufreq_policy *policy,
1503 unsigned int target_freq,
1504 unsigned int relation)
1505{
1506 int retval = -EINVAL;
7249924e 1507 unsigned int old_target_freq = target_freq;
c32b6b8e 1508
a7b422cd
KRW
1509 if (cpufreq_disabled())
1510 return -ENODEV;
1511
7249924e
VK
1512 /* Make sure that target_freq is within supported range */
1513 if (target_freq > policy->max)
1514 target_freq = policy->max;
1515 if (target_freq < policy->min)
1516 target_freq = policy->min;
1517
1518 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1519 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1520
1521 if (target_freq == policy->cur)
1522 return 0;
1523
1c3d85dd
RW
1524 if (cpufreq_driver->target)
1525 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1526
1da177e4
LT
1527 return retval;
1528}
1529EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1530
1da177e4
LT
1531int cpufreq_driver_target(struct cpufreq_policy *policy,
1532 unsigned int target_freq,
1533 unsigned int relation)
1534{
f1829e4a 1535 int ret = -EINVAL;
1da177e4
LT
1536
1537 policy = cpufreq_cpu_get(policy->cpu);
1538 if (!policy)
f1829e4a 1539 goto no_policy;
1da177e4 1540
5a01f2e8 1541 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1542 goto fail;
1da177e4
LT
1543
1544 ret = __cpufreq_driver_target(policy, target_freq, relation);
1545
5a01f2e8 1546 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1547
f1829e4a 1548fail:
1da177e4 1549 cpufreq_cpu_put(policy);
f1829e4a 1550no_policy:
1da177e4
LT
1551 return ret;
1552}
1553EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1554
bf0b90e3 1555int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1556{
1557 int ret = 0;
1558
d5aaffa9
DB
1559 if (cpufreq_disabled())
1560 return ret;
1561
1c3d85dd 1562 if (!cpufreq_driver->getavg)
0676f7f2
VK
1563 return 0;
1564
dfde5d62
VP
1565 policy = cpufreq_cpu_get(policy->cpu);
1566 if (!policy)
1567 return -EINVAL;
1568
1c3d85dd 1569 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1570
dfde5d62
VP
1571 cpufreq_cpu_put(policy);
1572 return ret;
1573}
5a01f2e8 1574EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1575
153d7f3f 1576/*
153d7f3f
AV
1577 * when "event" is CPUFREQ_GOV_LIMITS
1578 */
1da177e4 1579
e08f5f5b
GS
1580static int __cpufreq_governor(struct cpufreq_policy *policy,
1581 unsigned int event)
1da177e4 1582{
cc993cab 1583 int ret;
6afde10c
TR
1584
1585 /* Only must be defined when default governor is known to have latency
1586 restrictions, like e.g. conservative or ondemand.
1587 That this is the case is already ensured in Kconfig
1588 */
1589#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1590 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1591#else
1592 struct cpufreq_governor *gov = NULL;
1593#endif
1c256245
TR
1594
1595 if (policy->governor->max_transition_latency &&
1596 policy->cpuinfo.transition_latency >
1597 policy->governor->max_transition_latency) {
6afde10c
TR
1598 if (!gov)
1599 return -EINVAL;
1600 else {
1601 printk(KERN_WARNING "%s governor failed, too long"
1602 " transition latency of HW, fallback"
1603 " to %s governor\n",
1604 policy->governor->name,
1605 gov->name);
1606 policy->governor = gov;
1607 }
1c256245 1608 }
1da177e4
LT
1609
1610 if (!try_module_get(policy->governor->owner))
1611 return -EINVAL;
1612
2d06d8c4 1613 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1614 policy->cpu, event);
ba17ca46
XC
1615
1616 mutex_lock(&cpufreq_governor_lock);
1617 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1618 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1619 mutex_unlock(&cpufreq_governor_lock);
1620 return -EBUSY;
1621 }
1622
1623 if (event == CPUFREQ_GOV_STOP)
1624 policy->governor_enabled = false;
1625 else if (event == CPUFREQ_GOV_START)
1626 policy->governor_enabled = true;
1627
1628 mutex_unlock(&cpufreq_governor_lock);
1629
1da177e4
LT
1630 ret = policy->governor->governor(policy, event);
1631
4d5dcc42
VK
1632 if (!ret) {
1633 if (event == CPUFREQ_GOV_POLICY_INIT)
1634 policy->governor->initialized++;
1635 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1636 policy->governor->initialized--;
ba17ca46
XC
1637 } else {
1638 /* Restore original values */
1639 mutex_lock(&cpufreq_governor_lock);
1640 if (event == CPUFREQ_GOV_STOP)
1641 policy->governor_enabled = true;
1642 else if (event == CPUFREQ_GOV_START)
1643 policy->governor_enabled = false;
1644 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1645 }
b394058f 1646
e08f5f5b
GS
1647 /* we keep one module reference alive for
1648 each CPU governed by this CPU */
1da177e4
LT
1649 if ((event != CPUFREQ_GOV_START) || ret)
1650 module_put(policy->governor->owner);
1651 if ((event == CPUFREQ_GOV_STOP) && !ret)
1652 module_put(policy->governor->owner);
1653
1654 return ret;
1655}
1656
1657
1da177e4
LT
1658int cpufreq_register_governor(struct cpufreq_governor *governor)
1659{
3bcb09a3 1660 int err;
1da177e4
LT
1661
1662 if (!governor)
1663 return -EINVAL;
1664
a7b422cd
KRW
1665 if (cpufreq_disabled())
1666 return -ENODEV;
1667
3fc54d37 1668 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1669
b394058f 1670 governor->initialized = 0;
3bcb09a3
JF
1671 err = -EBUSY;
1672 if (__find_governor(governor->name) == NULL) {
1673 err = 0;
1674 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1675 }
1da177e4 1676
32ee8c3e 1677 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1678 return err;
1da177e4
LT
1679}
1680EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1681
1682
1683void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1684{
90e41bac
PB
1685#ifdef CONFIG_HOTPLUG_CPU
1686 int cpu;
1687#endif
1688
1da177e4
LT
1689 if (!governor)
1690 return;
1691
a7b422cd
KRW
1692 if (cpufreq_disabled())
1693 return;
1694
90e41bac
PB
1695#ifdef CONFIG_HOTPLUG_CPU
1696 for_each_present_cpu(cpu) {
1697 if (cpu_online(cpu))
1698 continue;
1699 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1700 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1701 }
1702#endif
1703
3fc54d37 1704 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1705 list_del(&governor->governor_list);
3fc54d37 1706 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1707 return;
1708}
1709EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1710
1711
1712
1713/*********************************************************************
1714 * POLICY INTERFACE *
1715 *********************************************************************/
1716
1717/**
1718 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1719 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1720 * is written
1da177e4
LT
1721 *
1722 * Reads the current cpufreq policy.
1723 */
1724int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1725{
1726 struct cpufreq_policy *cpu_policy;
1727 if (!policy)
1728 return -EINVAL;
1729
1730 cpu_policy = cpufreq_cpu_get(cpu);
1731 if (!cpu_policy)
1732 return -EINVAL;
1733
1da177e4 1734 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1735
1736 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1737 return 0;
1738}
1739EXPORT_SYMBOL(cpufreq_get_policy);
1740
1741
153d7f3f 1742/*
e08f5f5b
GS
1743 * data : current policy.
1744 * policy : policy to be set.
153d7f3f 1745 */
e08f5f5b
GS
1746static int __cpufreq_set_policy(struct cpufreq_policy *data,
1747 struct cpufreq_policy *policy)
1da177e4 1748{
7bd353a9 1749 int ret = 0, failed = 1;
1da177e4 1750
2d06d8c4 1751 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1752 policy->min, policy->max);
1753
e08f5f5b
GS
1754 memcpy(&policy->cpuinfo, &data->cpuinfo,
1755 sizeof(struct cpufreq_cpuinfo));
1da177e4 1756
53391fa2 1757 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1758 ret = -EINVAL;
1759 goto error_out;
1760 }
1761
1da177e4 1762 /* verify the cpu speed can be set within this limit */
1c3d85dd 1763 ret = cpufreq_driver->verify(policy);
1da177e4
LT
1764 if (ret)
1765 goto error_out;
1766
1da177e4 1767 /* adjust if necessary - all reasons */
e041c683
AS
1768 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1769 CPUFREQ_ADJUST, policy);
1da177e4
LT
1770
1771 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1772 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1773 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1774
1775 /* verify the cpu speed can be set within this limit,
1776 which might be different to the first one */
1c3d85dd 1777 ret = cpufreq_driver->verify(policy);
e041c683 1778 if (ret)
1da177e4 1779 goto error_out;
1da177e4
LT
1780
1781 /* notification of the new policy */
e041c683
AS
1782 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1783 CPUFREQ_NOTIFY, policy);
1da177e4 1784
7d5e350f
DJ
1785 data->min = policy->min;
1786 data->max = policy->max;
1da177e4 1787
2d06d8c4 1788 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1789 data->min, data->max);
1da177e4 1790
1c3d85dd 1791 if (cpufreq_driver->setpolicy) {
1da177e4 1792 data->policy = policy->policy;
2d06d8c4 1793 pr_debug("setting range\n");
1c3d85dd 1794 ret = cpufreq_driver->setpolicy(policy);
1da177e4
LT
1795 } else {
1796 if (policy->governor != data->governor) {
1797 /* save old, working values */
1798 struct cpufreq_governor *old_gov = data->governor;
1799
2d06d8c4 1800 pr_debug("governor switch\n");
1da177e4
LT
1801
1802 /* end old governor */
7bd353a9 1803 if (data->governor) {
1da177e4 1804 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
955ef483 1805 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1806 __cpufreq_governor(data,
1807 CPUFREQ_GOV_POLICY_EXIT);
955ef483 1808 lock_policy_rwsem_write(policy->cpu);
7bd353a9 1809 }
1da177e4
LT
1810
1811 /* start new governor */
1812 data->governor = policy->governor;
7bd353a9 1813 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
955ef483 1814 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
7bd353a9 1815 failed = 0;
955ef483
VK
1816 } else {
1817 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1818 __cpufreq_governor(data,
1819 CPUFREQ_GOV_POLICY_EXIT);
955ef483
VK
1820 lock_policy_rwsem_write(policy->cpu);
1821 }
7bd353a9
VK
1822 }
1823
1824 if (failed) {
1da177e4 1825 /* new governor failed, so re-start old one */
2d06d8c4 1826 pr_debug("starting governor %s failed\n",
e08f5f5b 1827 data->governor->name);
1da177e4
LT
1828 if (old_gov) {
1829 data->governor = old_gov;
7bd353a9
VK
1830 __cpufreq_governor(data,
1831 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1832 __cpufreq_governor(data,
1833 CPUFREQ_GOV_START);
1da177e4
LT
1834 }
1835 ret = -EINVAL;
1836 goto error_out;
1837 }
1838 /* might be a policy change, too, so fall through */
1839 }
2d06d8c4 1840 pr_debug("governor: change or update limits\n");
1da177e4
LT
1841 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1842 }
1843
7d5e350f 1844error_out:
1da177e4
LT
1845 return ret;
1846}
1847
1da177e4
LT
1848/**
1849 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1850 * @cpu: CPU which shall be re-evaluated
1851 *
25985edc 1852 * Useful for policy notifiers which have different necessities
1da177e4
LT
1853 * at different times.
1854 */
1855int cpufreq_update_policy(unsigned int cpu)
1856{
1857 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1858 struct cpufreq_policy policy;
f1829e4a 1859 int ret;
1da177e4 1860
f1829e4a
JL
1861 if (!data) {
1862 ret = -ENODEV;
1863 goto no_policy;
1864 }
1da177e4 1865
f1829e4a
JL
1866 if (unlikely(lock_policy_rwsem_write(cpu))) {
1867 ret = -EINVAL;
1868 goto fail;
1869 }
1da177e4 1870
2d06d8c4 1871 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1872 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1873 policy.min = data->user_policy.min;
1874 policy.max = data->user_policy.max;
1875 policy.policy = data->user_policy.policy;
1876 policy.governor = data->user_policy.governor;
1877
0961dd0d
TR
1878 /* BIOS might change freq behind our back
1879 -> ask driver for current freq and notify governors about a change */
1c3d85dd
RW
1880 if (cpufreq_driver->get) {
1881 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1882 if (!data->cur) {
2d06d8c4 1883 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1884 data->cur = policy.cur;
1885 } else {
1c3d85dd 1886 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
1887 cpufreq_out_of_sync(cpu, data->cur,
1888 policy.cur);
a85f7bd3 1889 }
0961dd0d
TR
1890 }
1891
1da177e4
LT
1892 ret = __cpufreq_set_policy(data, &policy);
1893
5a01f2e8
VP
1894 unlock_policy_rwsem_write(cpu);
1895
f1829e4a 1896fail:
1da177e4 1897 cpufreq_cpu_put(data);
f1829e4a 1898no_policy:
1da177e4
LT
1899 return ret;
1900}
1901EXPORT_SYMBOL(cpufreq_update_policy);
1902
dd184a01 1903static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1904 unsigned long action, void *hcpu)
1905{
1906 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1907 struct device *dev;
c32b6b8e 1908
8a25a2fd
KS
1909 dev = get_cpu_device(cpu);
1910 if (dev) {
c32b6b8e
AR
1911 switch (action) {
1912 case CPU_ONLINE:
9d3ce4af 1913 case CPU_ONLINE_FROZEN:
8a25a2fd 1914 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1915 break;
1916 case CPU_DOWN_PREPARE:
9d3ce4af 1917 case CPU_DOWN_PREPARE_FROZEN:
8a25a2fd 1918 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1919 break;
5a01f2e8 1920 case CPU_DOWN_FAILED:
9d3ce4af 1921 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1922 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1923 break;
1924 }
1925 }
1926 return NOTIFY_OK;
1927}
1928
9c36f746 1929static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1930 .notifier_call = cpufreq_cpu_callback,
1931};
1da177e4
LT
1932
1933/*********************************************************************
1934 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1935 *********************************************************************/
1936
1937/**
1938 * cpufreq_register_driver - register a CPU Frequency driver
1939 * @driver_data: A struct cpufreq_driver containing the values#
1940 * submitted by the CPU Frequency driver.
1941 *
32ee8c3e 1942 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1943 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1944 * (and isn't unregistered in the meantime).
1da177e4
LT
1945 *
1946 */
221dee28 1947int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1948{
1949 unsigned long flags;
1950 int ret;
1951
a7b422cd
KRW
1952 if (cpufreq_disabled())
1953 return -ENODEV;
1954
1da177e4
LT
1955 if (!driver_data || !driver_data->verify || !driver_data->init ||
1956 ((!driver_data->setpolicy) && (!driver_data->target)))
1957 return -EINVAL;
1958
2d06d8c4 1959 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1960
1961 if (driver_data->setpolicy)
1962 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1963
0d1857a1 1964 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1965 if (cpufreq_driver) {
0d1857a1 1966 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1967 return -EBUSY;
1968 }
1c3d85dd 1969 cpufreq_driver = driver_data;
0d1857a1 1970 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1971
8a25a2fd 1972 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1973 if (ret)
1974 goto err_null_driver;
1da177e4 1975
1c3d85dd 1976 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1977 int i;
1978 ret = -ENODEV;
1979
1980 /* check for at least one working CPU */
7a6aedfa
MT
1981 for (i = 0; i < nr_cpu_ids; i++)
1982 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1983 ret = 0;
7a6aedfa
MT
1984 break;
1985 }
1da177e4
LT
1986
1987 /* if all ->init() calls failed, unregister */
1988 if (ret) {
2d06d8c4 1989 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1990 driver_data->name);
8a25a2fd 1991 goto err_if_unreg;
1da177e4
LT
1992 }
1993 }
1994
8f5bc2ab 1995 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1996 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1997
8f5bc2ab 1998 return 0;
8a25a2fd
KS
1999err_if_unreg:
2000 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2001err_null_driver:
0d1857a1 2002 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2003 cpufreq_driver = NULL;
0d1857a1 2004 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2005 return ret;
1da177e4
LT
2006}
2007EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2008
2009
2010/**
2011 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2012 *
32ee8c3e 2013 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2014 * the right to do so, i.e. if you have succeeded in initialising before!
2015 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2016 * currently not initialised.
2017 */
221dee28 2018int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2019{
2020 unsigned long flags;
2021
1c3d85dd 2022 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2023 return -EINVAL;
1da177e4 2024
2d06d8c4 2025 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2026
8a25a2fd 2027 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2028 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2029
0d1857a1 2030 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2031 cpufreq_driver = NULL;
0d1857a1 2032 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2033
2034 return 0;
2035}
2036EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2037
2038static int __init cpufreq_core_init(void)
2039{
2040 int cpu;
2041
a7b422cd
KRW
2042 if (cpufreq_disabled())
2043 return -ENODEV;
2044
5a01f2e8 2045 for_each_possible_cpu(cpu) {
f1625066 2046 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2047 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2048 }
8aa84ad8 2049
8a25a2fd 2050 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 2051 BUG_ON(!cpufreq_global_kobject);
e00e56df 2052 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2053
5a01f2e8
VP
2054 return 0;
2055}
5a01f2e8 2056core_initcall(cpufreq_core_init);