2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/cpufreq_times.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <linux/suspend.h>
31 #include <linux/syscore_ops.h>
32 #include <linux/tick.h>
33 #include <trace/events/power.h>
35 static LIST_HEAD(cpufreq_policy_list
);
37 static inline bool policy_is_inactive(struct cpufreq_policy
*policy
)
39 return cpumask_empty(policy
->cpus
);
42 /* Macros to iterate over CPU policies */
43 #define for_each_suitable_policy(__policy, __active) \
44 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
45 if ((__active) == !policy_is_inactive(__policy))
47 #define for_each_active_policy(__policy) \
48 for_each_suitable_policy(__policy, true)
49 #define for_each_inactive_policy(__policy) \
50 for_each_suitable_policy(__policy, false)
52 #define for_each_policy(__policy) \
53 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
55 /* Iterate over governors */
56 static LIST_HEAD(cpufreq_governor_list
);
57 #define for_each_governor(__governor) \
58 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
61 * The "cpufreq driver" - the arch- or hardware-dependent low
62 * level driver of CPUFreq support, and its spinlock. This lock
63 * also protects the cpufreq_cpu_data array.
65 static struct cpufreq_driver
*cpufreq_driver
;
66 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
67 static DEFINE_RWLOCK(cpufreq_driver_lock
);
69 /* Flag to suspend/resume CPUFreq governors */
70 static bool cpufreq_suspended
;
72 static inline bool has_target(void)
74 return cpufreq_driver
->target_index
|| cpufreq_driver
->target
;
77 /* internal prototypes */
78 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
);
79 static int cpufreq_init_governor(struct cpufreq_policy
*policy
);
80 static void cpufreq_exit_governor(struct cpufreq_policy
*policy
);
81 static int cpufreq_start_governor(struct cpufreq_policy
*policy
);
82 static void cpufreq_stop_governor(struct cpufreq_policy
*policy
);
83 static void cpufreq_governor_limits(struct cpufreq_policy
*policy
);
86 * Two notifier lists: the "policy" list is involved in the
87 * validation process for a new CPU frequency policy; the
88 * "transition" list for kernel code that needs to handle
89 * changes to devices when the CPU clock speed changes.
90 * The mutex locks both lists.
92 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
93 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
95 static bool init_cpufreq_transition_notifier_list_called
;
96 static int __init
init_cpufreq_transition_notifier_list(void)
98 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
99 init_cpufreq_transition_notifier_list_called
= true;
102 pure_initcall(init_cpufreq_transition_notifier_list
);
104 static int off __read_mostly
;
105 static int cpufreq_disabled(void)
109 void disable_cpufreq(void)
113 static DEFINE_MUTEX(cpufreq_governor_mutex
);
115 bool have_governor_per_policy(void)
117 return !!(cpufreq_driver
->flags
& CPUFREQ_HAVE_GOVERNOR_PER_POLICY
);
119 EXPORT_SYMBOL_GPL(have_governor_per_policy
);
121 struct kobject
*get_governor_parent_kobj(struct cpufreq_policy
*policy
)
123 if (have_governor_per_policy())
124 return &policy
->kobj
;
126 return cpufreq_global_kobject
;
128 EXPORT_SYMBOL_GPL(get_governor_parent_kobj
);
130 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
136 cur_wall_time
= jiffies64_to_nsecs(get_jiffies_64());
138 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
139 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
140 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
141 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
142 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
143 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
145 idle_time
= cur_wall_time
- busy_time
;
147 *wall
= div_u64(cur_wall_time
, NSEC_PER_USEC
);
149 return div_u64(idle_time
, NSEC_PER_USEC
);
152 u64
get_cpu_idle_time(unsigned int cpu
, u64
*wall
, int io_busy
)
154 u64 idle_time
= get_cpu_idle_time_us(cpu
, io_busy
? wall
: NULL
);
156 if (idle_time
== -1ULL)
157 return get_cpu_idle_time_jiffy(cpu
, wall
);
159 idle_time
+= get_cpu_iowait_time_us(cpu
, wall
);
163 EXPORT_SYMBOL_GPL(get_cpu_idle_time
);
166 * This is a generic cpufreq init() routine which can be used by cpufreq
167 * drivers of SMP systems. It will do following:
168 * - validate & show freq table passed
169 * - set policies transition latency
170 * - policy->cpus with all possible CPUs
172 int cpufreq_generic_init(struct cpufreq_policy
*policy
,
173 struct cpufreq_frequency_table
*table
,
174 unsigned int transition_latency
)
178 ret
= cpufreq_table_validate_and_show(policy
, table
);
180 pr_err("%s: invalid frequency table: %d\n", __func__
, ret
);
184 policy
->cpuinfo
.transition_latency
= transition_latency
;
187 * The driver only supports the SMP configuration where all processors
188 * share the clock and voltage and clock.
190 cpumask_setall(policy
->cpus
);
194 EXPORT_SYMBOL_GPL(cpufreq_generic_init
);
196 struct cpufreq_policy
*cpufreq_cpu_get_raw(unsigned int cpu
)
198 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
200 return policy
&& cpumask_test_cpu(cpu
, policy
->cpus
) ? policy
: NULL
;
202 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw
);
204 unsigned int cpufreq_generic_get(unsigned int cpu
)
206 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(cpu
);
208 if (!policy
|| IS_ERR(policy
->clk
)) {
209 pr_err("%s: No %s associated to cpu: %d\n",
210 __func__
, policy
? "clk" : "policy", cpu
);
214 return clk_get_rate(policy
->clk
) / 1000;
216 EXPORT_SYMBOL_GPL(cpufreq_generic_get
);
219 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
221 * @cpu: cpu to find policy for.
223 * This returns policy for 'cpu', returns NULL if it doesn't exist.
224 * It also increments the kobject reference count to mark it busy and so would
225 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
226 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
227 * freed as that depends on the kobj count.
229 * Return: A valid policy on success, otherwise NULL on failure.
231 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
233 struct cpufreq_policy
*policy
= NULL
;
236 if (WARN_ON(cpu
>= nr_cpu_ids
))
239 /* get the cpufreq driver */
240 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
242 if (cpufreq_driver
) {
244 policy
= cpufreq_cpu_get_raw(cpu
);
246 kobject_get(&policy
->kobj
);
249 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
253 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
256 * cpufreq_cpu_put: Decrements the usage count of a policy
258 * @policy: policy earlier returned by cpufreq_cpu_get().
260 * This decrements the kobject reference count incremented earlier by calling
263 void cpufreq_cpu_put(struct cpufreq_policy
*policy
)
265 kobject_put(&policy
->kobj
);
267 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
269 /*********************************************************************
270 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
271 *********************************************************************/
274 * adjust_jiffies - adjust the system "loops_per_jiffy"
276 * This function alters the system "loops_per_jiffy" for the clock
277 * speed change. Note that loops_per_jiffy cannot be updated on SMP
278 * systems as each CPU might be scaled differently. So, use the arch
279 * per-CPU loops_per_jiffy value wherever possible.
281 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
284 static unsigned long l_p_j_ref
;
285 static unsigned int l_p_j_ref_freq
;
287 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
290 if (!l_p_j_ref_freq
) {
291 l_p_j_ref
= loops_per_jiffy
;
292 l_p_j_ref_freq
= ci
->old
;
293 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
294 l_p_j_ref
, l_p_j_ref_freq
);
296 if (val
== CPUFREQ_POSTCHANGE
&& ci
->old
!= ci
->new) {
297 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
299 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
300 loops_per_jiffy
, ci
->new);
305 static void __cpufreq_notify_transition(struct cpufreq_policy
*policy
,
306 struct cpufreq_freqs
*freqs
, unsigned int state
)
308 BUG_ON(irqs_disabled());
310 if (cpufreq_disabled())
313 freqs
->flags
= cpufreq_driver
->flags
;
314 pr_debug("notification %u of frequency transition to %u kHz\n",
319 case CPUFREQ_PRECHANGE
:
320 /* detect if the driver reported a value as "old frequency"
321 * which is not equal to what the cpufreq core thinks is
324 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
325 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
326 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
327 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
328 freqs
->old
, policy
->cur
);
329 freqs
->old
= policy
->cur
;
332 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
333 CPUFREQ_PRECHANGE
, freqs
);
334 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
337 case CPUFREQ_POSTCHANGE
:
338 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
339 pr_debug("FREQ: %lu - CPU: %lu\n",
340 (unsigned long)freqs
->new, (unsigned long)freqs
->cpu
);
341 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
342 cpufreq_stats_record_transition(policy
, freqs
->new);
343 cpufreq_times_record_transition(freqs
);
344 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
345 CPUFREQ_POSTCHANGE
, freqs
);
346 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
347 policy
->cur
= freqs
->new;
353 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
354 * on frequency transition.
356 * This function calls the transition notifiers and the "adjust_jiffies"
357 * function. It is called twice on all CPU frequency changes that have
360 static void cpufreq_notify_transition(struct cpufreq_policy
*policy
,
361 struct cpufreq_freqs
*freqs
, unsigned int state
)
363 for_each_cpu(freqs
->cpu
, policy
->cpus
)
364 __cpufreq_notify_transition(policy
, freqs
, state
);
367 /* Do post notifications when there are chances that transition has failed */
368 static void cpufreq_notify_post_transition(struct cpufreq_policy
*policy
,
369 struct cpufreq_freqs
*freqs
, int transition_failed
)
371 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
372 if (!transition_failed
)
375 swap(freqs
->old
, freqs
->new);
376 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
377 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_POSTCHANGE
);
380 void cpufreq_freq_transition_begin(struct cpufreq_policy
*policy
,
381 struct cpufreq_freqs
*freqs
)
385 * Catch double invocations of _begin() which lead to self-deadlock.
386 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
387 * doesn't invoke _begin() on their behalf, and hence the chances of
388 * double invocations are very low. Moreover, there are scenarios
389 * where these checks can emit false-positive warnings in these
390 * drivers; so we avoid that by skipping them altogether.
392 WARN_ON(!(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
)
393 && current
== policy
->transition_task
);
396 wait_event(policy
->transition_wait
, !policy
->transition_ongoing
);
398 spin_lock(&policy
->transition_lock
);
400 if (unlikely(policy
->transition_ongoing
)) {
401 spin_unlock(&policy
->transition_lock
);
405 policy
->transition_ongoing
= true;
406 policy
->transition_task
= current
;
408 spin_unlock(&policy
->transition_lock
);
410 cpufreq_notify_transition(policy
, freqs
, CPUFREQ_PRECHANGE
);
412 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin
);
414 void cpufreq_freq_transition_end(struct cpufreq_policy
*policy
,
415 struct cpufreq_freqs
*freqs
, int transition_failed
)
417 if (unlikely(WARN_ON(!policy
->transition_ongoing
)))
420 cpufreq_notify_post_transition(policy
, freqs
, transition_failed
);
422 policy
->transition_ongoing
= false;
423 policy
->transition_task
= NULL
;
425 wake_up(&policy
->transition_wait
);
427 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end
);
430 * Fast frequency switching status count. Positive means "enabled", negative
431 * means "disabled" and 0 means "not decided yet".
433 static int cpufreq_fast_switch_count
;
434 static DEFINE_MUTEX(cpufreq_fast_switch_lock
);
436 static void cpufreq_list_transition_notifiers(void)
438 struct notifier_block
*nb
;
440 pr_info("Registered transition notifiers:\n");
442 mutex_lock(&cpufreq_transition_notifier_list
.mutex
);
444 for (nb
= cpufreq_transition_notifier_list
.head
; nb
; nb
= nb
->next
)
445 pr_info("%pF\n", nb
->notifier_call
);
447 mutex_unlock(&cpufreq_transition_notifier_list
.mutex
);
451 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
452 * @policy: cpufreq policy to enable fast frequency switching for.
454 * Try to enable fast frequency switching for @policy.
456 * The attempt will fail if there is at least one transition notifier registered
457 * at this point, as fast frequency switching is quite fundamentally at odds
458 * with transition notifiers. Thus if successful, it will make registration of
459 * transition notifiers fail going forward.
461 void cpufreq_enable_fast_switch(struct cpufreq_policy
*policy
)
463 lockdep_assert_held(&policy
->rwsem
);
465 if (!policy
->fast_switch_possible
)
468 mutex_lock(&cpufreq_fast_switch_lock
);
469 if (cpufreq_fast_switch_count
>= 0) {
470 cpufreq_fast_switch_count
++;
471 policy
->fast_switch_enabled
= true;
473 pr_warn("CPU%u: Fast frequency switching not enabled\n",
475 cpufreq_list_transition_notifiers();
477 mutex_unlock(&cpufreq_fast_switch_lock
);
479 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch
);
482 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
483 * @policy: cpufreq policy to disable fast frequency switching for.
485 void cpufreq_disable_fast_switch(struct cpufreq_policy
*policy
)
487 mutex_lock(&cpufreq_fast_switch_lock
);
488 if (policy
->fast_switch_enabled
) {
489 policy
->fast_switch_enabled
= false;
490 if (!WARN_ON(cpufreq_fast_switch_count
<= 0))
491 cpufreq_fast_switch_count
--;
493 mutex_unlock(&cpufreq_fast_switch_lock
);
495 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch
);
498 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
500 * @target_freq: target frequency to resolve.
502 * The target to driver frequency mapping is cached in the policy.
504 * Return: Lowest driver-supported frequency greater than or equal to the
505 * given target_freq, subject to policy (min/max) and driver limitations.
507 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy
*policy
,
508 unsigned int target_freq
)
510 target_freq
= clamp_val(target_freq
, policy
->min
, policy
->max
);
511 policy
->cached_target_freq
= target_freq
;
513 if (cpufreq_driver
->target_index
) {
516 idx
= cpufreq_frequency_table_target(policy
, target_freq
,
518 policy
->cached_resolved_idx
= idx
;
519 return policy
->freq_table
[idx
].frequency
;
522 if (cpufreq_driver
->resolve_freq
)
523 return cpufreq_driver
->resolve_freq(policy
, target_freq
);
527 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq
);
529 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy
*policy
)
531 unsigned int latency
;
533 if (policy
->transition_delay_us
)
534 return policy
->transition_delay_us
;
536 latency
= policy
->cpuinfo
.transition_latency
/ NSEC_PER_USEC
;
539 * For platforms that can change the frequency very fast (< 10
540 * us), the above formula gives a decent transition delay. But
541 * for platforms where transition_latency is in milliseconds, it
542 * ends up giving unrealistic values.
544 * Cap the default transition delay to 10 ms, which seems to be
545 * a reasonable amount of time after which we should reevaluate
548 return min(latency
* LATENCY_MULTIPLIER
, (unsigned int)10000);
551 return LATENCY_MULTIPLIER
;
553 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us
);
555 /*********************************************************************
557 *********************************************************************/
558 static ssize_t
show_boost(struct kobject
*kobj
,
559 struct attribute
*attr
, char *buf
)
561 return sprintf(buf
, "%d\n", cpufreq_driver
->boost_enabled
);
564 static ssize_t
store_boost(struct kobject
*kobj
, struct attribute
*attr
,
565 const char *buf
, size_t count
)
569 ret
= sscanf(buf
, "%d", &enable
);
570 if (ret
!= 1 || enable
< 0 || enable
> 1)
573 if (cpufreq_boost_trigger_state(enable
)) {
574 pr_err("%s: Cannot %s BOOST!\n",
575 __func__
, enable
? "enable" : "disable");
579 pr_debug("%s: cpufreq BOOST %s\n",
580 __func__
, enable
? "enabled" : "disabled");
584 define_one_global_rw(boost
);
586 static struct cpufreq_governor
*find_governor(const char *str_governor
)
588 struct cpufreq_governor
*t
;
591 if (!strncasecmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
598 * cpufreq_parse_governor - parse a governor string
600 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
601 struct cpufreq_governor
**governor
)
605 if (cpufreq_driver
->setpolicy
) {
606 if (!strncasecmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
607 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
609 } else if (!strncasecmp(str_governor
, "powersave",
611 *policy
= CPUFREQ_POLICY_POWERSAVE
;
615 struct cpufreq_governor
*t
;
617 mutex_lock(&cpufreq_governor_mutex
);
619 t
= find_governor(str_governor
);
624 mutex_unlock(&cpufreq_governor_mutex
);
625 ret
= request_module("cpufreq_%s", str_governor
);
626 mutex_lock(&cpufreq_governor_mutex
);
629 t
= find_governor(str_governor
);
637 mutex_unlock(&cpufreq_governor_mutex
);
643 * cpufreq_per_cpu_attr_read() / show_##file_name() -
644 * print out cpufreq information
646 * Write out information from cpufreq_driver->policy[cpu]; object must be
650 #define show_one(file_name, object) \
651 static ssize_t show_##file_name \
652 (struct cpufreq_policy *policy, char *buf) \
654 return sprintf(buf, "%u\n", policy->object); \
657 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
658 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
659 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
660 show_one(scaling_min_freq
, min
);
661 show_one(scaling_max_freq
, max
);
663 __weak
unsigned int arch_freq_get_on_cpu(int cpu
)
668 static ssize_t
show_scaling_cur_freq(struct cpufreq_policy
*policy
, char *buf
)
673 freq
= arch_freq_get_on_cpu(policy
->cpu
);
675 ret
= sprintf(buf
, "%u\n", freq
);
676 else if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&&
678 ret
= sprintf(buf
, "%u\n", cpufreq_driver
->get(policy
->cpu
));
680 ret
= sprintf(buf
, "%u\n", policy
->cur
);
684 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
685 struct cpufreq_policy
*new_policy
);
688 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
690 #define store_one(file_name, object) \
691 static ssize_t store_##file_name \
692 (struct cpufreq_policy *policy, const char *buf, size_t count) \
695 struct cpufreq_policy new_policy; \
697 memcpy(&new_policy, policy, sizeof(*policy)); \
698 new_policy.min = policy->user_policy.min; \
699 new_policy.max = policy->user_policy.max; \
701 ret = sscanf(buf, "%u", &new_policy.object); \
705 temp = new_policy.object; \
706 ret = cpufreq_set_policy(policy, &new_policy); \
708 policy->user_policy.object = temp; \
710 return ret ? ret : count; \
713 store_one(scaling_min_freq
, min
);
714 store_one(scaling_max_freq
, max
);
717 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
719 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
722 unsigned int cur_freq
= __cpufreq_get(policy
);
725 return sprintf(buf
, "%u\n", cur_freq
);
727 return sprintf(buf
, "<unknown>\n");
731 * show_scaling_governor - show the current policy for the specified CPU
733 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
735 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
736 return sprintf(buf
, "powersave\n");
737 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
738 return sprintf(buf
, "performance\n");
739 else if (policy
->governor
)
740 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n",
741 policy
->governor
->name
);
746 * store_scaling_governor - store policy for the specified CPU
748 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
749 const char *buf
, size_t count
)
752 char str_governor
[16];
753 struct cpufreq_policy new_policy
;
755 memcpy(&new_policy
, policy
, sizeof(*policy
));
757 ret
= sscanf(buf
, "%15s", str_governor
);
761 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
762 &new_policy
.governor
))
765 ret
= cpufreq_set_policy(policy
, &new_policy
);
766 return ret
? ret
: count
;
770 * show_scaling_driver - show the cpufreq driver currently loaded
772 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
774 return scnprintf(buf
, CPUFREQ_NAME_PLEN
, "%s\n", cpufreq_driver
->name
);
778 * show_scaling_available_governors - show the available CPUfreq governors
780 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
784 struct cpufreq_governor
*t
;
787 i
+= sprintf(buf
, "performance powersave");
791 for_each_governor(t
) {
792 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
793 - (CPUFREQ_NAME_LEN
+ 2)))
795 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_PLEN
, "%s ", t
->name
);
798 i
+= sprintf(&buf
[i
], "\n");
802 ssize_t
cpufreq_show_cpus(const struct cpumask
*mask
, char *buf
)
807 for_each_cpu(cpu
, mask
) {
809 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
810 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
811 if (i
>= (PAGE_SIZE
- 5))
814 i
+= sprintf(&buf
[i
], "\n");
817 EXPORT_SYMBOL_GPL(cpufreq_show_cpus
);
820 * show_related_cpus - show the CPUs affected by each transition even if
821 * hw coordination is in use
823 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
825 return cpufreq_show_cpus(policy
->related_cpus
, buf
);
829 * show_affected_cpus - show the CPUs affected by each transition
831 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
833 return cpufreq_show_cpus(policy
->cpus
, buf
);
836 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
837 const char *buf
, size_t count
)
839 unsigned int freq
= 0;
842 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
845 ret
= sscanf(buf
, "%u", &freq
);
849 policy
->governor
->store_setspeed(policy
, freq
);
854 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
856 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
857 return sprintf(buf
, "<unsupported>\n");
859 return policy
->governor
->show_setspeed(policy
, buf
);
863 * show_bios_limit - show the current cpufreq HW/BIOS limitation
865 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
869 if (cpufreq_driver
->bios_limit
) {
870 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
872 return sprintf(buf
, "%u\n", limit
);
874 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
877 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
878 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
879 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
880 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
881 cpufreq_freq_attr_ro(scaling_available_governors
);
882 cpufreq_freq_attr_ro(scaling_driver
);
883 cpufreq_freq_attr_ro(scaling_cur_freq
);
884 cpufreq_freq_attr_ro(bios_limit
);
885 cpufreq_freq_attr_ro(related_cpus
);
886 cpufreq_freq_attr_ro(affected_cpus
);
887 cpufreq_freq_attr_rw(scaling_min_freq
);
888 cpufreq_freq_attr_rw(scaling_max_freq
);
889 cpufreq_freq_attr_rw(scaling_governor
);
890 cpufreq_freq_attr_rw(scaling_setspeed
);
892 static struct attribute
*default_attrs
[] = {
893 &cpuinfo_min_freq
.attr
,
894 &cpuinfo_max_freq
.attr
,
895 &cpuinfo_transition_latency
.attr
,
896 &scaling_min_freq
.attr
,
897 &scaling_max_freq
.attr
,
900 &scaling_governor
.attr
,
901 &scaling_driver
.attr
,
902 &scaling_available_governors
.attr
,
903 &scaling_setspeed
.attr
,
907 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
908 #define to_attr(a) container_of(a, struct freq_attr, attr)
910 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
912 struct cpufreq_policy
*policy
= to_policy(kobj
);
913 struct freq_attr
*fattr
= to_attr(attr
);
916 down_read(&policy
->rwsem
);
917 ret
= fattr
->show(policy
, buf
);
918 up_read(&policy
->rwsem
);
923 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
924 const char *buf
, size_t count
)
926 struct cpufreq_policy
*policy
= to_policy(kobj
);
927 struct freq_attr
*fattr
= to_attr(attr
);
928 ssize_t ret
= -EINVAL
;
932 if (cpu_online(policy
->cpu
)) {
933 down_write(&policy
->rwsem
);
934 ret
= fattr
->store(policy
, buf
, count
);
935 up_write(&policy
->rwsem
);
943 static void cpufreq_sysfs_release(struct kobject
*kobj
)
945 struct cpufreq_policy
*policy
= to_policy(kobj
);
946 pr_debug("last reference is dropped\n");
947 complete(&policy
->kobj_unregister
);
950 static const struct sysfs_ops sysfs_ops
= {
955 static struct kobj_type ktype_cpufreq
= {
956 .sysfs_ops
= &sysfs_ops
,
957 .default_attrs
= default_attrs
,
958 .release
= cpufreq_sysfs_release
,
961 static void add_cpu_dev_symlink(struct cpufreq_policy
*policy
, unsigned int cpu
)
963 struct device
*dev
= get_cpu_device(cpu
);
968 if (cpumask_test_and_set_cpu(cpu
, policy
->real_cpus
))
971 dev_dbg(dev
, "%s: Adding symlink\n", __func__
);
972 if (sysfs_create_link(&dev
->kobj
, &policy
->kobj
, "cpufreq"))
973 dev_err(dev
, "cpufreq symlink creation failed\n");
976 static void remove_cpu_dev_symlink(struct cpufreq_policy
*policy
,
979 dev_dbg(dev
, "%s: Removing symlink\n", __func__
);
980 sysfs_remove_link(&dev
->kobj
, "cpufreq");
983 static int cpufreq_add_dev_interface(struct cpufreq_policy
*policy
)
985 struct freq_attr
**drv_attr
;
988 /* set up files for this cpu device */
989 drv_attr
= cpufreq_driver
->attr
;
990 while (drv_attr
&& *drv_attr
) {
991 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
996 if (cpufreq_driver
->get
) {
997 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
1002 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
1006 if (cpufreq_driver
->bios_limit
) {
1007 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
1015 __weak
struct cpufreq_governor
*cpufreq_default_governor(void)
1020 static int cpufreq_init_policy(struct cpufreq_policy
*policy
)
1022 struct cpufreq_governor
*gov
= NULL
;
1023 struct cpufreq_policy new_policy
;
1025 memcpy(&new_policy
, policy
, sizeof(*policy
));
1027 /* Update governor of new_policy to the governor used before hotplug */
1028 gov
= find_governor(policy
->last_governor
);
1030 pr_debug("Restoring governor %s for cpu %d\n",
1031 policy
->governor
->name
, policy
->cpu
);
1033 gov
= cpufreq_default_governor();
1038 new_policy
.governor
= gov
;
1040 /* Use the default policy if there is no last_policy. */
1041 if (cpufreq_driver
->setpolicy
) {
1042 if (policy
->last_policy
)
1043 new_policy
.policy
= policy
->last_policy
;
1045 cpufreq_parse_governor(gov
->name
, &new_policy
.policy
,
1048 /* set default policy */
1049 return cpufreq_set_policy(policy
, &new_policy
);
1052 static int cpufreq_add_policy_cpu(struct cpufreq_policy
*policy
, unsigned int cpu
)
1056 /* Has this CPU been taken care of already? */
1057 if (cpumask_test_cpu(cpu
, policy
->cpus
))
1060 down_write(&policy
->rwsem
);
1062 cpufreq_stop_governor(policy
);
1064 cpumask_set_cpu(cpu
, policy
->cpus
);
1067 ret
= cpufreq_start_governor(policy
);
1069 pr_err("%s: Failed to start governor\n", __func__
);
1071 up_write(&policy
->rwsem
);
1075 static void handle_update(struct work_struct
*work
)
1077 struct cpufreq_policy
*policy
=
1078 container_of(work
, struct cpufreq_policy
, update
);
1079 unsigned int cpu
= policy
->cpu
;
1080 pr_debug("handle_update for cpu %u called\n", cpu
);
1081 cpufreq_update_policy(cpu
);
1084 static struct cpufreq_policy
*cpufreq_policy_alloc(unsigned int cpu
)
1086 struct cpufreq_policy
*policy
;
1089 policy
= kzalloc(sizeof(*policy
), GFP_KERNEL
);
1093 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
1094 goto err_free_policy
;
1096 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
1097 goto err_free_cpumask
;
1099 if (!zalloc_cpumask_var(&policy
->real_cpus
, GFP_KERNEL
))
1100 goto err_free_rcpumask
;
1102 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
,
1103 cpufreq_global_kobject
, "policy%u", cpu
);
1105 pr_err("%s: failed to init policy->kobj: %d\n", __func__
, ret
);
1106 goto err_free_real_cpus
;
1109 INIT_LIST_HEAD(&policy
->policy_list
);
1110 init_rwsem(&policy
->rwsem
);
1111 spin_lock_init(&policy
->transition_lock
);
1112 init_waitqueue_head(&policy
->transition_wait
);
1113 init_completion(&policy
->kobj_unregister
);
1114 INIT_WORK(&policy
->update
, handle_update
);
1120 free_cpumask_var(policy
->real_cpus
);
1122 free_cpumask_var(policy
->related_cpus
);
1124 free_cpumask_var(policy
->cpus
);
1131 static void cpufreq_policy_put_kobj(struct cpufreq_policy
*policy
)
1133 struct kobject
*kobj
;
1134 struct completion
*cmp
;
1136 down_write(&policy
->rwsem
);
1137 cpufreq_stats_free_table(policy
);
1138 kobj
= &policy
->kobj
;
1139 cmp
= &policy
->kobj_unregister
;
1140 up_write(&policy
->rwsem
);
1144 * We need to make sure that the underlying kobj is
1145 * actually not referenced anymore by anybody before we
1146 * proceed with unloading.
1148 pr_debug("waiting for dropping of refcount\n");
1149 wait_for_completion(cmp
);
1150 pr_debug("wait complete\n");
1153 static void cpufreq_policy_free(struct cpufreq_policy
*policy
)
1155 unsigned long flags
;
1158 /* Remove policy from list */
1159 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1160 list_del(&policy
->policy_list
);
1162 for_each_cpu(cpu
, policy
->related_cpus
)
1163 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1164 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1166 cpufreq_policy_put_kobj(policy
);
1167 free_cpumask_var(policy
->real_cpus
);
1168 free_cpumask_var(policy
->related_cpus
);
1169 free_cpumask_var(policy
->cpus
);
1173 static int cpufreq_online(unsigned int cpu
)
1175 struct cpufreq_policy
*policy
;
1177 unsigned long flags
;
1181 pr_debug("%s: bringing CPU%u online\n", __func__
, cpu
);
1183 /* Check if this CPU already has a policy to manage it */
1184 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1186 WARN_ON(!cpumask_test_cpu(cpu
, policy
->related_cpus
));
1187 if (!policy_is_inactive(policy
))
1188 return cpufreq_add_policy_cpu(policy
, cpu
);
1190 /* This is the only online CPU for the policy. Start over. */
1192 down_write(&policy
->rwsem
);
1194 policy
->governor
= NULL
;
1195 up_write(&policy
->rwsem
);
1198 policy
= cpufreq_policy_alloc(cpu
);
1203 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1205 /* call driver. From then on the cpufreq must be able
1206 * to accept all calls to ->verify and ->setpolicy for this CPU
1208 ret
= cpufreq_driver
->init(policy
);
1210 pr_debug("initialization failed\n");
1211 goto out_free_policy
;
1214 down_write(&policy
->rwsem
);
1217 /* related_cpus should at least include policy->cpus. */
1218 cpumask_copy(policy
->related_cpus
, policy
->cpus
);
1222 * affected cpus must always be the one, which are online. We aren't
1223 * managing offline cpus here.
1225 cpumask_and(policy
->cpus
, policy
->cpus
, cpu_online_mask
);
1228 policy
->user_policy
.min
= policy
->min
;
1229 policy
->user_policy
.max
= policy
->max
;
1231 for_each_cpu(j
, policy
->related_cpus
) {
1232 per_cpu(cpufreq_cpu_data
, j
) = policy
;
1233 add_cpu_dev_symlink(policy
, j
);
1236 policy
->min
= policy
->user_policy
.min
;
1237 policy
->max
= policy
->user_policy
.max
;
1240 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
1241 policy
->cur
= cpufreq_driver
->get(policy
->cpu
);
1243 pr_err("%s: ->get() failed\n", __func__
);
1244 goto out_exit_policy
;
1249 * Sometimes boot loaders set CPU frequency to a value outside of
1250 * frequency table present with cpufreq core. In such cases CPU might be
1251 * unstable if it has to run on that frequency for long duration of time
1252 * and so its better to set it to a frequency which is specified in
1253 * freq-table. This also makes cpufreq stats inconsistent as
1254 * cpufreq-stats would fail to register because current frequency of CPU
1255 * isn't found in freq-table.
1257 * Because we don't want this change to effect boot process badly, we go
1258 * for the next freq which is >= policy->cur ('cur' must be set by now,
1259 * otherwise we will end up setting freq to lowest of the table as 'cur'
1260 * is initialized to zero).
1262 * We are passing target-freq as "policy->cur - 1" otherwise
1263 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1264 * equal to target-freq.
1266 if ((cpufreq_driver
->flags
& CPUFREQ_NEED_INITIAL_FREQ_CHECK
)
1268 /* Are we running at unknown frequency ? */
1269 ret
= cpufreq_frequency_table_get_index(policy
, policy
->cur
);
1270 if (ret
== -EINVAL
) {
1271 /* Warn user and fix it */
1272 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1273 __func__
, policy
->cpu
, policy
->cur
);
1274 ret
= __cpufreq_driver_target(policy
, policy
->cur
- 1,
1275 CPUFREQ_RELATION_L
);
1278 * Reaching here after boot in a few seconds may not
1279 * mean that system will remain stable at "unknown"
1280 * frequency for longer duration. Hence, a BUG_ON().
1283 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1284 __func__
, policy
->cpu
, policy
->cur
);
1289 ret
= cpufreq_add_dev_interface(policy
);
1291 goto out_exit_policy
;
1293 cpufreq_stats_create_table(policy
);
1294 cpufreq_times_create_policy(policy
);
1296 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
1297 list_add(&policy
->policy_list
, &cpufreq_policy_list
);
1298 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1301 ret
= cpufreq_init_policy(policy
);
1303 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1304 __func__
, cpu
, ret
);
1305 /* cpufreq_policy_free() will notify based on this */
1307 goto out_exit_policy
;
1310 up_write(&policy
->rwsem
);
1312 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1314 /* Callback for handling stuff after policy is ready */
1315 if (cpufreq_driver
->ready
)
1316 cpufreq_driver
->ready(policy
);
1318 pr_debug("initialization complete\n");
1323 for_each_cpu(j
, policy
->real_cpus
)
1324 remove_cpu_dev_symlink(policy
, get_cpu_device(j
));
1326 up_write(&policy
->rwsem
);
1328 if (cpufreq_driver
->exit
)
1329 cpufreq_driver
->exit(policy
);
1332 cpufreq_policy_free(policy
);
1337 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1339 * @sif: Subsystem interface structure pointer (not used)
1341 static int cpufreq_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
1343 struct cpufreq_policy
*policy
;
1344 unsigned cpu
= dev
->id
;
1347 dev_dbg(dev
, "%s: adding CPU%u\n", __func__
, cpu
);
1349 if (cpu_online(cpu
)) {
1350 ret
= cpufreq_online(cpu
);
1355 /* Create sysfs link on CPU registration */
1356 policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1358 add_cpu_dev_symlink(policy
, cpu
);
1363 static int cpufreq_offline(unsigned int cpu
)
1365 struct cpufreq_policy
*policy
;
1368 pr_debug("%s: unregistering CPU %u\n", __func__
, cpu
);
1370 policy
= cpufreq_cpu_get_raw(cpu
);
1372 pr_debug("%s: No cpu_data found\n", __func__
);
1376 down_write(&policy
->rwsem
);
1378 cpufreq_stop_governor(policy
);
1380 cpumask_clear_cpu(cpu
, policy
->cpus
);
1382 if (policy_is_inactive(policy
)) {
1384 strncpy(policy
->last_governor
, policy
->governor
->name
,
1387 policy
->last_policy
= policy
->policy
;
1388 } else if (cpu
== policy
->cpu
) {
1389 /* Nominate new CPU */
1390 policy
->cpu
= cpumask_any(policy
->cpus
);
1393 /* Start governor again for active policy */
1394 if (!policy_is_inactive(policy
)) {
1396 ret
= cpufreq_start_governor(policy
);
1398 pr_err("%s: Failed to start governor\n", __func__
);
1404 if (cpufreq_driver
->stop_cpu
)
1405 cpufreq_driver
->stop_cpu(policy
);
1408 cpufreq_exit_governor(policy
);
1411 * Perform the ->exit() even during light-weight tear-down,
1412 * since this is a core component, and is essential for the
1413 * subsequent light-weight ->init() to succeed.
1415 if (cpufreq_driver
->exit
) {
1416 cpufreq_driver
->exit(policy
);
1417 policy
->freq_table
= NULL
;
1421 up_write(&policy
->rwsem
);
1426 * cpufreq_remove_dev - remove a CPU device
1428 * Removes the cpufreq interface for a CPU device.
1430 static void cpufreq_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
1432 unsigned int cpu
= dev
->id
;
1433 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1438 if (cpu_online(cpu
))
1439 cpufreq_offline(cpu
);
1441 cpumask_clear_cpu(cpu
, policy
->real_cpus
);
1442 remove_cpu_dev_symlink(policy
, dev
);
1444 if (cpumask_empty(policy
->real_cpus
))
1445 cpufreq_policy_free(policy
);
1449 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1451 * @policy: policy managing CPUs
1452 * @new_freq: CPU frequency the CPU actually runs at
1454 * We adjust to current frequency first, and need to clean up later.
1455 * So either call to cpufreq_update_policy() or schedule handle_update()).
1457 static void cpufreq_out_of_sync(struct cpufreq_policy
*policy
,
1458 unsigned int new_freq
)
1460 struct cpufreq_freqs freqs
;
1462 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1463 policy
->cur
, new_freq
);
1465 freqs
.old
= policy
->cur
;
1466 freqs
.new = new_freq
;
1468 cpufreq_freq_transition_begin(policy
, &freqs
);
1469 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1473 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1476 * This is the last known freq, without actually getting it from the driver.
1477 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1479 unsigned int cpufreq_quick_get(unsigned int cpu
)
1481 struct cpufreq_policy
*policy
;
1482 unsigned int ret_freq
= 0;
1483 unsigned long flags
;
1485 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
1487 if (cpufreq_driver
&& cpufreq_driver
->setpolicy
&& cpufreq_driver
->get
) {
1488 ret_freq
= cpufreq_driver
->get(cpu
);
1489 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1493 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1495 policy
= cpufreq_cpu_get(cpu
);
1497 ret_freq
= policy
->cur
;
1498 cpufreq_cpu_put(policy
);
1503 EXPORT_SYMBOL(cpufreq_quick_get
);
1506 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1509 * Just return the max possible frequency for a given CPU.
1511 unsigned int cpufreq_quick_get_max(unsigned int cpu
)
1513 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1514 unsigned int ret_freq
= 0;
1517 ret_freq
= policy
->max
;
1518 cpufreq_cpu_put(policy
);
1523 EXPORT_SYMBOL(cpufreq_quick_get_max
);
1525 static unsigned int __cpufreq_get(struct cpufreq_policy
*policy
)
1527 unsigned int ret_freq
= 0;
1529 if (!cpufreq_driver
->get
)
1532 ret_freq
= cpufreq_driver
->get(policy
->cpu
);
1535 * Updating inactive policies is invalid, so avoid doing that. Also
1536 * if fast frequency switching is used with the given policy, the check
1537 * against policy->cur is pointless, so skip it in that case too.
1539 if (unlikely(policy_is_inactive(policy
)) || policy
->fast_switch_enabled
)
1542 if (ret_freq
&& policy
->cur
&&
1543 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1544 /* verify no discrepancy between actual and
1545 saved value exists */
1546 if (unlikely(ret_freq
!= policy
->cur
)) {
1547 cpufreq_out_of_sync(policy
, ret_freq
);
1548 schedule_work(&policy
->update
);
1556 * cpufreq_get - get the current CPU frequency (in kHz)
1559 * Get the CPU current (static) CPU frequency
1561 unsigned int cpufreq_get(unsigned int cpu
)
1563 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1564 unsigned int ret_freq
= 0;
1567 down_read(&policy
->rwsem
);
1569 if (!policy_is_inactive(policy
))
1570 ret_freq
= __cpufreq_get(policy
);
1572 up_read(&policy
->rwsem
);
1574 cpufreq_cpu_put(policy
);
1579 EXPORT_SYMBOL(cpufreq_get
);
1581 static unsigned int cpufreq_update_current_freq(struct cpufreq_policy
*policy
)
1583 unsigned int new_freq
;
1585 new_freq
= cpufreq_driver
->get(policy
->cpu
);
1590 pr_debug("cpufreq: Driver did not initialize current freq\n");
1591 policy
->cur
= new_freq
;
1592 } else if (policy
->cur
!= new_freq
&& has_target()) {
1593 cpufreq_out_of_sync(policy
, new_freq
);
1599 static struct subsys_interface cpufreq_interface
= {
1601 .subsys
= &cpu_subsys
,
1602 .add_dev
= cpufreq_add_dev
,
1603 .remove_dev
= cpufreq_remove_dev
,
1607 * In case platform wants some specific frequency to be configured
1610 int cpufreq_generic_suspend(struct cpufreq_policy
*policy
)
1614 if (!policy
->suspend_freq
) {
1615 pr_debug("%s: suspend_freq not defined\n", __func__
);
1619 pr_debug("%s: Setting suspend-freq: %u\n", __func__
,
1620 policy
->suspend_freq
);
1622 ret
= __cpufreq_driver_target(policy
, policy
->suspend_freq
,
1623 CPUFREQ_RELATION_H
);
1625 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1626 __func__
, policy
->suspend_freq
, ret
);
1630 EXPORT_SYMBOL(cpufreq_generic_suspend
);
1633 * cpufreq_suspend() - Suspend CPUFreq governors
1635 * Called during system wide Suspend/Hibernate cycles for suspending governors
1636 * as some platforms can't change frequency after this point in suspend cycle.
1637 * Because some of the devices (like: i2c, regulators, etc) they use for
1638 * changing frequency are suspended quickly after this point.
1640 void cpufreq_suspend(void)
1642 struct cpufreq_policy
*policy
;
1644 if (!cpufreq_driver
)
1647 if (!has_target() && !cpufreq_driver
->suspend
)
1650 pr_debug("%s: Suspending Governors\n", __func__
);
1652 for_each_active_policy(policy
) {
1654 down_write(&policy
->rwsem
);
1655 cpufreq_stop_governor(policy
);
1656 up_write(&policy
->rwsem
);
1659 if (cpufreq_driver
->suspend
&& cpufreq_driver
->suspend(policy
))
1660 pr_err("%s: Failed to suspend driver: %p\n", __func__
,
1665 cpufreq_suspended
= true;
1669 * cpufreq_resume() - Resume CPUFreq governors
1671 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1672 * are suspended with cpufreq_suspend().
1674 void cpufreq_resume(void)
1676 struct cpufreq_policy
*policy
;
1679 if (!cpufreq_driver
)
1682 cpufreq_suspended
= false;
1684 if (!has_target() && !cpufreq_driver
->resume
)
1687 pr_debug("%s: Resuming Governors\n", __func__
);
1689 for_each_active_policy(policy
) {
1690 if (cpufreq_driver
->resume
&& cpufreq_driver
->resume(policy
)) {
1691 pr_err("%s: Failed to resume driver: %p\n", __func__
,
1693 } else if (has_target()) {
1694 down_write(&policy
->rwsem
);
1695 ret
= cpufreq_start_governor(policy
);
1696 up_write(&policy
->rwsem
);
1699 pr_err("%s: Failed to start governor for policy: %p\n",
1706 * cpufreq_get_current_driver - return current driver's name
1708 * Return the name string of the currently loaded cpufreq driver
1711 const char *cpufreq_get_current_driver(void)
1714 return cpufreq_driver
->name
;
1718 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver
);
1721 * cpufreq_get_driver_data - return current driver data
1723 * Return the private data of the currently loaded cpufreq
1724 * driver, or NULL if no cpufreq driver is loaded.
1726 void *cpufreq_get_driver_data(void)
1729 return cpufreq_driver
->driver_data
;
1733 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data
);
1735 /*********************************************************************
1736 * NOTIFIER LISTS INTERFACE *
1737 *********************************************************************/
1740 * cpufreq_register_notifier - register a driver with cpufreq
1741 * @nb: notifier function to register
1742 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1744 * Add a driver to one of two lists: either a list of drivers that
1745 * are notified about clock rate changes (once before and once after
1746 * the transition), or a list of drivers that are notified about
1747 * changes in cpufreq policy.
1749 * This function may sleep, and has the same return conditions as
1750 * blocking_notifier_chain_register.
1752 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1756 if (cpufreq_disabled())
1759 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1762 case CPUFREQ_TRANSITION_NOTIFIER
:
1763 mutex_lock(&cpufreq_fast_switch_lock
);
1765 if (cpufreq_fast_switch_count
> 0) {
1766 mutex_unlock(&cpufreq_fast_switch_lock
);
1769 ret
= srcu_notifier_chain_register(
1770 &cpufreq_transition_notifier_list
, nb
);
1772 cpufreq_fast_switch_count
--;
1774 mutex_unlock(&cpufreq_fast_switch_lock
);
1776 case CPUFREQ_POLICY_NOTIFIER
:
1777 ret
= blocking_notifier_chain_register(
1778 &cpufreq_policy_notifier_list
, nb
);
1786 EXPORT_SYMBOL(cpufreq_register_notifier
);
1789 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1790 * @nb: notifier block to be unregistered
1791 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1793 * Remove a driver from the CPU frequency notifier list.
1795 * This function may sleep, and has the same return conditions as
1796 * blocking_notifier_chain_unregister.
1798 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1802 if (cpufreq_disabled())
1806 case CPUFREQ_TRANSITION_NOTIFIER
:
1807 mutex_lock(&cpufreq_fast_switch_lock
);
1809 ret
= srcu_notifier_chain_unregister(
1810 &cpufreq_transition_notifier_list
, nb
);
1811 if (!ret
&& !WARN_ON(cpufreq_fast_switch_count
>= 0))
1812 cpufreq_fast_switch_count
++;
1814 mutex_unlock(&cpufreq_fast_switch_lock
);
1816 case CPUFREQ_POLICY_NOTIFIER
:
1817 ret
= blocking_notifier_chain_unregister(
1818 &cpufreq_policy_notifier_list
, nb
);
1826 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1829 /*********************************************************************
1831 *********************************************************************/
1834 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1835 * @policy: cpufreq policy to switch the frequency for.
1836 * @target_freq: New frequency to set (may be approximate).
1838 * Carry out a fast frequency switch without sleeping.
1840 * The driver's ->fast_switch() callback invoked by this function must be
1841 * suitable for being called from within RCU-sched read-side critical sections
1842 * and it is expected to select the minimum available frequency greater than or
1843 * equal to @target_freq (CPUFREQ_RELATION_L).
1845 * This function must not be called if policy->fast_switch_enabled is unset.
1847 * Governors calling this function must guarantee that it will never be invoked
1848 * twice in parallel for the same policy and that it will never be called in
1849 * parallel with either ->target() or ->target_index() for the same policy.
1851 * Returns the actual frequency set for the CPU.
1853 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
1854 * error condition, the hardware configuration must be preserved.
1856 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy
*policy
,
1857 unsigned int target_freq
)
1859 target_freq
= clamp_val(target_freq
, policy
->min
, policy
->max
);
1861 return cpufreq_driver
->fast_switch(policy
, target_freq
);
1863 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch
);
1865 /* Must set freqs->new to intermediate frequency */
1866 static int __target_intermediate(struct cpufreq_policy
*policy
,
1867 struct cpufreq_freqs
*freqs
, int index
)
1871 freqs
->new = cpufreq_driver
->get_intermediate(policy
, index
);
1873 /* We don't need to switch to intermediate freq */
1877 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1878 __func__
, policy
->cpu
, freqs
->old
, freqs
->new);
1880 cpufreq_freq_transition_begin(policy
, freqs
);
1881 ret
= cpufreq_driver
->target_intermediate(policy
, index
);
1882 cpufreq_freq_transition_end(policy
, freqs
, ret
);
1885 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1891 static int __target_index(struct cpufreq_policy
*policy
, int index
)
1893 struct cpufreq_freqs freqs
= {.old
= policy
->cur
, .flags
= 0};
1894 unsigned int intermediate_freq
= 0;
1895 unsigned int newfreq
= policy
->freq_table
[index
].frequency
;
1896 int retval
= -EINVAL
;
1899 if (newfreq
== policy
->cur
)
1902 notify
= !(cpufreq_driver
->flags
& CPUFREQ_ASYNC_NOTIFICATION
);
1904 /* Handle switching to intermediate frequency */
1905 if (cpufreq_driver
->get_intermediate
) {
1906 retval
= __target_intermediate(policy
, &freqs
, index
);
1910 intermediate_freq
= freqs
.new;
1911 /* Set old freq to intermediate */
1912 if (intermediate_freq
)
1913 freqs
.old
= freqs
.new;
1916 freqs
.new = newfreq
;
1917 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1918 __func__
, policy
->cpu
, freqs
.old
, freqs
.new);
1920 cpufreq_freq_transition_begin(policy
, &freqs
);
1923 retval
= cpufreq_driver
->target_index(policy
, index
);
1925 pr_err("%s: Failed to change cpu frequency: %d\n", __func__
,
1929 cpufreq_freq_transition_end(policy
, &freqs
, retval
);
1932 * Failed after setting to intermediate freq? Driver should have
1933 * reverted back to initial frequency and so should we. Check
1934 * here for intermediate_freq instead of get_intermediate, in
1935 * case we haven't switched to intermediate freq at all.
1937 if (unlikely(retval
&& intermediate_freq
)) {
1938 freqs
.old
= intermediate_freq
;
1939 freqs
.new = policy
->restore_freq
;
1940 cpufreq_freq_transition_begin(policy
, &freqs
);
1941 cpufreq_freq_transition_end(policy
, &freqs
, 0);
1948 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1949 unsigned int target_freq
,
1950 unsigned int relation
)
1952 unsigned int old_target_freq
= target_freq
;
1955 if (cpufreq_disabled())
1958 /* Make sure that target_freq is within supported range */
1959 target_freq
= clamp_val(target_freq
, policy
->min
, policy
->max
);
1961 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1962 policy
->cpu
, target_freq
, relation
, old_target_freq
);
1965 * This might look like a redundant call as we are checking it again
1966 * after finding index. But it is left intentionally for cases where
1967 * exactly same freq is called again and so we can save on few function
1970 if (target_freq
== policy
->cur
)
1973 /* Save last value to restore later on errors */
1974 policy
->restore_freq
= policy
->cur
;
1976 if (cpufreq_driver
->target
)
1977 return cpufreq_driver
->target(policy
, target_freq
, relation
);
1979 if (!cpufreq_driver
->target_index
)
1982 index
= cpufreq_frequency_table_target(policy
, target_freq
, relation
);
1984 return __target_index(policy
, index
);
1986 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1988 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1989 unsigned int target_freq
,
1990 unsigned int relation
)
1994 down_write(&policy
->rwsem
);
1996 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1998 up_write(&policy
->rwsem
);
2002 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
2004 __weak
struct cpufreq_governor
*cpufreq_fallback_governor(void)
2009 static int cpufreq_init_governor(struct cpufreq_policy
*policy
)
2013 /* Don't start any governor operations if we are entering suspend */
2014 if (cpufreq_suspended
)
2017 * Governor might not be initiated here if ACPI _PPC changed
2018 * notification happened, so check it.
2020 if (!policy
->governor
)
2023 /* Platform doesn't want dynamic frequency switching ? */
2024 if (policy
->governor
->dynamic_switching
&&
2025 cpufreq_driver
->flags
& CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING
) {
2026 struct cpufreq_governor
*gov
= cpufreq_fallback_governor();
2029 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2030 policy
->governor
->name
, gov
->name
);
2031 policy
->governor
= gov
;
2037 if (!try_module_get(policy
->governor
->owner
))
2040 pr_debug("%s: for CPU %u\n", __func__
, policy
->cpu
);
2042 if (policy
->governor
->init
) {
2043 ret
= policy
->governor
->init(policy
);
2045 module_put(policy
->governor
->owner
);
2053 static void cpufreq_exit_governor(struct cpufreq_policy
*policy
)
2055 if (cpufreq_suspended
|| !policy
->governor
)
2058 pr_debug("%s: for CPU %u\n", __func__
, policy
->cpu
);
2060 if (policy
->governor
->exit
)
2061 policy
->governor
->exit(policy
);
2063 module_put(policy
->governor
->owner
);
2066 static int cpufreq_start_governor(struct cpufreq_policy
*policy
)
2070 if (cpufreq_suspended
)
2073 if (!policy
->governor
)
2076 pr_debug("%s: for CPU %u\n", __func__
, policy
->cpu
);
2078 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
)
2079 cpufreq_update_current_freq(policy
);
2081 if (policy
->governor
->start
) {
2082 ret
= policy
->governor
->start(policy
);
2087 if (policy
->governor
->limits
)
2088 policy
->governor
->limits(policy
);
2093 static void cpufreq_stop_governor(struct cpufreq_policy
*policy
)
2095 if (cpufreq_suspended
|| !policy
->governor
)
2098 pr_debug("%s: for CPU %u\n", __func__
, policy
->cpu
);
2100 if (policy
->governor
->stop
)
2101 policy
->governor
->stop(policy
);
2104 static void cpufreq_governor_limits(struct cpufreq_policy
*policy
)
2106 if (cpufreq_suspended
|| !policy
->governor
)
2109 pr_debug("%s: for CPU %u\n", __func__
, policy
->cpu
);
2111 if (policy
->governor
->limits
)
2112 policy
->governor
->limits(policy
);
2115 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
2122 if (cpufreq_disabled())
2125 mutex_lock(&cpufreq_governor_mutex
);
2128 if (!find_governor(governor
->name
)) {
2130 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
2133 mutex_unlock(&cpufreq_governor_mutex
);
2136 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
2138 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
2140 struct cpufreq_policy
*policy
;
2141 unsigned long flags
;
2146 if (cpufreq_disabled())
2149 /* clear last_governor for all inactive policies */
2150 read_lock_irqsave(&cpufreq_driver_lock
, flags
);
2151 for_each_inactive_policy(policy
) {
2152 if (!strcmp(policy
->last_governor
, governor
->name
)) {
2153 policy
->governor
= NULL
;
2154 strcpy(policy
->last_governor
, "\0");
2157 read_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2159 mutex_lock(&cpufreq_governor_mutex
);
2160 list_del(&governor
->governor_list
);
2161 mutex_unlock(&cpufreq_governor_mutex
);
2164 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
2167 /*********************************************************************
2168 * POLICY INTERFACE *
2169 *********************************************************************/
2172 * cpufreq_get_policy - get the current cpufreq_policy
2173 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2176 * Reads the current cpufreq policy.
2178 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
2180 struct cpufreq_policy
*cpu_policy
;
2184 cpu_policy
= cpufreq_cpu_get(cpu
);
2188 memcpy(policy
, cpu_policy
, sizeof(*policy
));
2190 cpufreq_cpu_put(cpu_policy
);
2193 EXPORT_SYMBOL(cpufreq_get_policy
);
2196 * policy : current policy.
2197 * new_policy: policy to be set.
2199 static int cpufreq_set_policy(struct cpufreq_policy
*policy
,
2200 struct cpufreq_policy
*new_policy
)
2202 struct cpufreq_governor
*old_gov
;
2205 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2206 new_policy
->cpu
, new_policy
->min
, new_policy
->max
);
2208 memcpy(&new_policy
->cpuinfo
, &policy
->cpuinfo
, sizeof(policy
->cpuinfo
));
2211 * This check works well when we store new min/max freq attributes,
2212 * because new_policy is a copy of policy with one field updated.
2214 if (new_policy
->min
> new_policy
->max
)
2217 /* verify the cpu speed can be set within this limit */
2218 ret
= cpufreq_driver
->verify(new_policy
);
2222 /* adjust if necessary - all reasons */
2223 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2224 CPUFREQ_ADJUST
, new_policy
);
2227 * verify the cpu speed can be set within this limit, which might be
2228 * different to the first one
2230 ret
= cpufreq_driver
->verify(new_policy
);
2234 /* notification of the new policy */
2235 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
2236 CPUFREQ_NOTIFY
, new_policy
);
2238 policy
->min
= new_policy
->min
;
2239 policy
->max
= new_policy
->max
;
2241 arch_set_max_freq_scale(policy
->cpus
, policy
->max
);
2243 trace_cpu_frequency_limits(policy
->max
, policy
->min
, policy
->cpu
);
2245 policy
->cached_target_freq
= UINT_MAX
;
2247 pr_debug("new min and max freqs are %u - %u kHz\n",
2248 policy
->min
, policy
->max
);
2250 if (cpufreq_driver
->setpolicy
) {
2251 policy
->policy
= new_policy
->policy
;
2252 pr_debug("setting range\n");
2253 return cpufreq_driver
->setpolicy(new_policy
);
2256 if (new_policy
->governor
== policy
->governor
) {
2257 pr_debug("cpufreq: governor limits update\n");
2258 cpufreq_governor_limits(policy
);
2262 pr_debug("governor switch\n");
2264 /* save old, working values */
2265 old_gov
= policy
->governor
;
2266 /* end old governor */
2268 cpufreq_stop_governor(policy
);
2269 cpufreq_exit_governor(policy
);
2272 /* start new governor */
2273 policy
->governor
= new_policy
->governor
;
2274 ret
= cpufreq_init_governor(policy
);
2276 ret
= cpufreq_start_governor(policy
);
2278 pr_debug("cpufreq: governor change\n");
2281 cpufreq_exit_governor(policy
);
2284 /* new governor failed, so re-start old one */
2285 pr_debug("starting governor %s failed\n", policy
->governor
->name
);
2287 policy
->governor
= old_gov
;
2288 if (cpufreq_init_governor(policy
))
2289 policy
->governor
= NULL
;
2291 cpufreq_start_governor(policy
);
2298 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2299 * @cpu: CPU which shall be re-evaluated
2301 * Useful for policy notifiers which have different necessities
2302 * at different times.
2304 void cpufreq_update_policy(unsigned int cpu
)
2306 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
2307 struct cpufreq_policy new_policy
;
2312 down_write(&policy
->rwsem
);
2314 if (policy_is_inactive(policy
))
2317 pr_debug("updating policy for CPU %u\n", cpu
);
2318 memcpy(&new_policy
, policy
, sizeof(*policy
));
2319 new_policy
.min
= policy
->user_policy
.min
;
2320 new_policy
.max
= policy
->user_policy
.max
;
2323 * BIOS might change freq behind our back
2324 * -> ask driver for current freq and notify governors about a change
2326 if (cpufreq_driver
->get
&& !cpufreq_driver
->setpolicy
) {
2327 if (cpufreq_suspended
)
2330 new_policy
.cur
= cpufreq_update_current_freq(policy
);
2331 if (WARN_ON(!new_policy
.cur
))
2335 cpufreq_set_policy(policy
, &new_policy
);
2338 up_write(&policy
->rwsem
);
2340 cpufreq_cpu_put(policy
);
2342 EXPORT_SYMBOL(cpufreq_update_policy
);
2344 /*********************************************************************
2346 *********************************************************************/
2347 static int cpufreq_boost_set_sw(int state
)
2349 struct cpufreq_policy
*policy
;
2352 for_each_active_policy(policy
) {
2353 if (!policy
->freq_table
)
2356 ret
= cpufreq_frequency_table_cpuinfo(policy
,
2357 policy
->freq_table
);
2359 pr_err("%s: Policy frequency update failed\n",
2364 down_write(&policy
->rwsem
);
2365 policy
->user_policy
.max
= policy
->max
;
2366 cpufreq_governor_limits(policy
);
2367 up_write(&policy
->rwsem
);
2373 int cpufreq_boost_trigger_state(int state
)
2375 unsigned long flags
;
2378 if (cpufreq_driver
->boost_enabled
== state
)
2381 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2382 cpufreq_driver
->boost_enabled
= state
;
2383 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2385 ret
= cpufreq_driver
->set_boost(state
);
2387 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2388 cpufreq_driver
->boost_enabled
= !state
;
2389 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2391 pr_err("%s: Cannot %s BOOST\n",
2392 __func__
, state
? "enable" : "disable");
2398 static bool cpufreq_boost_supported(void)
2400 return likely(cpufreq_driver
) && cpufreq_driver
->set_boost
;
2403 static int create_boost_sysfs_file(void)
2407 ret
= sysfs_create_file(cpufreq_global_kobject
, &boost
.attr
);
2409 pr_err("%s: cannot register global BOOST sysfs file\n",
2415 static void remove_boost_sysfs_file(void)
2417 if (cpufreq_boost_supported())
2418 sysfs_remove_file(cpufreq_global_kobject
, &boost
.attr
);
2421 int cpufreq_enable_boost_support(void)
2423 if (!cpufreq_driver
)
2426 if (cpufreq_boost_supported())
2429 cpufreq_driver
->set_boost
= cpufreq_boost_set_sw
;
2431 /* This will get removed on driver unregister */
2432 return create_boost_sysfs_file();
2434 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support
);
2436 int cpufreq_boost_enabled(void)
2438 return cpufreq_driver
->boost_enabled
;
2440 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled
);
2442 /*********************************************************************
2443 * FREQUENCY INVARIANT ACCOUNTING SUPPORT *
2444 *********************************************************************/
2446 __weak
void arch_set_freq_scale(struct cpumask
*cpus
,
2447 unsigned long cur_freq
,
2448 unsigned long max_freq
)
2451 EXPORT_SYMBOL_GPL(arch_set_freq_scale
);
2453 __weak
void arch_set_max_freq_scale(struct cpumask
*cpus
,
2454 unsigned long policy_max_freq
)
2457 EXPORT_SYMBOL_GPL(arch_set_max_freq_scale
);
2459 /*********************************************************************
2460 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2461 *********************************************************************/
2462 static enum cpuhp_state hp_online
;
2464 static int cpuhp_cpufreq_online(unsigned int cpu
)
2466 cpufreq_online(cpu
);
2471 static int cpuhp_cpufreq_offline(unsigned int cpu
)
2473 cpufreq_offline(cpu
);
2479 * cpufreq_register_driver - register a CPU Frequency driver
2480 * @driver_data: A struct cpufreq_driver containing the values#
2481 * submitted by the CPU Frequency driver.
2483 * Registers a CPU Frequency driver to this core code. This code
2484 * returns zero on success, -EEXIST when another driver got here first
2485 * (and isn't unregistered in the meantime).
2488 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
2490 unsigned long flags
;
2493 if (cpufreq_disabled())
2496 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
2497 !(driver_data
->setpolicy
|| driver_data
->target_index
||
2498 driver_data
->target
) ||
2499 (driver_data
->setpolicy
&& (driver_data
->target_index
||
2500 driver_data
->target
)) ||
2501 (!!driver_data
->get_intermediate
!= !!driver_data
->target_intermediate
))
2504 pr_debug("trying to register driver %s\n", driver_data
->name
);
2506 /* Protect against concurrent CPU online/offline. */
2509 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2510 if (cpufreq_driver
) {
2511 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2515 cpufreq_driver
= driver_data
;
2516 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2518 if (driver_data
->setpolicy
)
2519 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
2521 if (cpufreq_boost_supported()) {
2522 ret
= create_boost_sysfs_file();
2524 goto err_null_driver
;
2527 ret
= subsys_interface_register(&cpufreq_interface
);
2529 goto err_boost_unreg
;
2531 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
) &&
2532 list_empty(&cpufreq_policy_list
)) {
2533 /* if all ->init() calls failed, unregister */
2535 pr_debug("%s: No CPU initialized for driver %s\n", __func__
,
2540 ret
= cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN
,
2542 cpuhp_cpufreq_online
,
2543 cpuhp_cpufreq_offline
);
2549 pr_debug("driver %s up and running\n", driver_data
->name
);
2553 subsys_interface_unregister(&cpufreq_interface
);
2555 remove_boost_sysfs_file();
2557 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2558 cpufreq_driver
= NULL
;
2559 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2564 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
2567 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2569 * Unregister the current CPUFreq driver. Only call this if you have
2570 * the right to do so, i.e. if you have succeeded in initialising before!
2571 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2572 * currently not initialised.
2574 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
2576 unsigned long flags
;
2578 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
))
2581 pr_debug("unregistering driver %s\n", driver
->name
);
2583 /* Protect against concurrent cpu hotplug */
2585 subsys_interface_unregister(&cpufreq_interface
);
2586 remove_boost_sysfs_file();
2587 cpuhp_remove_state_nocalls_cpuslocked(hp_online
);
2589 write_lock_irqsave(&cpufreq_driver_lock
, flags
);
2591 cpufreq_driver
= NULL
;
2593 write_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2598 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2601 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2602 * or mutexes when secondary CPUs are halted.
2604 static struct syscore_ops cpufreq_syscore_ops
= {
2605 .shutdown
= cpufreq_suspend
,
2608 struct kobject
*cpufreq_global_kobject
;
2609 EXPORT_SYMBOL(cpufreq_global_kobject
);
2611 static int __init
cpufreq_core_init(void)
2613 if (cpufreq_disabled())
2616 cpufreq_global_kobject
= kobject_create_and_add("cpufreq", &cpu_subsys
.dev_root
->kobj
);
2617 BUG_ON(!cpufreq_global_kobject
);
2619 register_syscore_ops(&cpufreq_syscore_ops
);
2623 module_param(off
, int, 0444);
2624 core_initcall(cpufreq_core_init
);