2 * drivers/cpufreq/cpufreq_hotplug.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
21 #include <linux/hrtimer.h>
22 #include <linux/tick.h>
23 #include <linux/ktime.h>
24 #include <linux/sched.h>
25 #include <linux/input.h>
26 #include <linux/slab.h>
27 #include <linux/sched/rt.h>
28 #include <linux/kthread.h>
30 extern unsigned int get_normal_max_freq(void);
31 extern unsigned int mt_dvfs_power_dispatch_safe(void);
32 extern int mt_gpufreq_target(int idx
);
34 * dbs is used in this file as a shortform for demandbased switching
35 * It helps to keep variable names smaller, simpler
38 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
39 #define DEF_FREQUENCY_OD_THRESHOLD (98)
40 #define DEF_FREQUENCY_UP_THRESHOLD (80)
41 #define DEF_SAMPLING_DOWN_FACTOR (1)
42 #define MAX_SAMPLING_DOWN_FACTOR (100000)
43 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (15)
44 #define MIN_FREQUENCY_DOWN_DIFFERENTIAL (5)
45 #define MAX_FREQUENCY_DOWN_DIFFERENTIAL (20)
46 #define MICRO_FREQUENCY_UP_THRESHOLD (85)
47 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (30000)
48 #define MIN_FREQUENCY_UP_THRESHOLD (21)
49 #define MAX_FREQUENCY_UP_THRESHOLD (100)
51 #define DEF_CPU_DOWN_DIFFERENTIAL (10)
52 #define MICRO_CPU_DOWN_DIFFERENTIAL (10)
53 #define MIN_CPU_DOWN_DIFFERENTIAL (0)
54 #define MAX_CPU_DOWN_DIFFERENTIAL (30)
56 #define DEF_CPU_UP_THRESHOLD (90)
57 #define MICRO_CPU_UP_THRESHOLD (90)
58 #define MIN_CPU_UP_THRESHOLD (80)
59 #define MAX_CPU_UP_THRESHOLD (100)
61 #define CPU_UP_AVG_TIMES (10)
62 #define CPU_DOWN_AVG_TIMES (50)
63 #define THERMAL_DISPATCH_AVG_TIMES (30)
65 #define DEF_CPU_PERSIST_COUNT (10)
68 #define INPUT_BOOST (1)
71 * The polling frequency of this governor depends on the capability of
72 * the processor. Default polling frequency is 1000 times the transition
73 * latency of the processor. The governor will work on any processor with
74 * transition latency <= 10mS, using appropriate sampling
76 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
77 * this governor will not work.
78 * All times here are in uS.
80 #define MIN_SAMPLING_RATE_RATIO (2)
82 static unsigned int min_sampling_rate
;
84 #define LATENCY_MULTIPLIER (1000)
85 #define MIN_LATENCY_MULTIPLIER (100)
86 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
88 static void do_dbs_timer(struct work_struct
*work
);
89 static int cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
92 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_BALANCE
95 struct cpufreq_governor cpufreq_gov_balance
= {
97 .governor
= cpufreq_governor_dbs
,
98 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
104 static int g_next_hp_action
= 0;
106 static long g_cpu_up_sum_load
= 0;
107 static int g_cpu_up_count
= 0;
109 static long g_cpu_down_sum_load
= 0;
110 static int g_cpu_down_count
= 0;
111 static int g_max_cpu_persist_count
= 0;
112 static int g_thermal_count
= 0;
114 static void hp_work_handler(struct work_struct
*work
);
115 static struct delayed_work hp_work
;
118 static struct task_struct
*freq_up_task
;
123 static int cpu_loading
= 0;
124 static int cpus_sum_load
= 0;
126 enum {DBS_NORMAL_SAMPLE
, DBS_SUB_SAMPLE
};
128 struct cpu_dbs_info_s
{
129 cputime64_t prev_cpu_idle
;
130 cputime64_t prev_cpu_iowait
;
131 cputime64_t prev_cpu_wall
;
132 cputime64_t prev_cpu_nice
;
133 struct cpufreq_policy
*cur_policy
;
134 struct delayed_work work
;
135 struct cpufreq_frequency_table
*freq_table
;
136 unsigned int freq_lo
;
137 unsigned int freq_lo_jiffies
;
138 unsigned int freq_hi_jiffies
;
139 unsigned int rate_mult
;
141 unsigned int sample_type
:1;
143 * percpu mutex that serializes governor limit change with
144 * do_dbs_timer invocation. We do not want do_dbs_timer to run
145 * when user is changing the governor or limits.
147 struct mutex timer_mutex
;
149 static DEFINE_PER_CPU(struct cpu_dbs_info_s
, hp_cpu_dbs_info
);
151 static unsigned int dbs_enable
; /* number of CPUs using this policy */
152 static unsigned int dbs_ignore
= 1;
153 static unsigned int dbs_thermal_limited
;
154 static unsigned int dbs_thermal_limited_freq
;
156 /* dvfs thermal limit */
157 void dbs_freq_thermal_limited(unsigned int limited
, unsigned int freq
)
159 dbs_thermal_limited
= limited
;
160 dbs_thermal_limited_freq
= freq
;
162 EXPORT_SYMBOL(dbs_freq_thermal_limited
);
165 * dbs_mutex protects dbs_enable in governor start/stop.
167 static DEFINE_MUTEX(dbs_mutex
);
170 * dbs_hotplug protects all hotplug related global variables
172 static DEFINE_MUTEX(hp_mutex
);
174 DEFINE_MUTEX(bl_onoff_mutex
);
176 static struct dbs_tuners
{
177 unsigned int sampling_rate
;
178 unsigned int od_threshold
;
179 unsigned int up_threshold
;
180 unsigned int down_differential
;
181 unsigned int ignore_nice
;
182 unsigned int sampling_down_factor
;
183 unsigned int powersave_bias
;
184 unsigned int io_is_busy
;
185 unsigned int cpu_up_threshold
;
186 unsigned int cpu_down_differential
;
187 unsigned int cpu_up_avg_times
;
188 unsigned int cpu_down_avg_times
;
189 unsigned int thermal_dispatch_avg_times
;
190 unsigned int cpu_num_limit
;
191 unsigned int cpu_num_base
;
192 unsigned int is_cpu_hotplug_disable
;
194 unsigned int cpu_input_boost_enable
;
197 .od_threshold
= DEF_FREQUENCY_OD_THRESHOLD
,
198 .up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
,
199 .sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
,
200 .down_differential
= DEF_FREQUENCY_DOWN_DIFFERENTIAL
,
203 .cpu_up_threshold
= DEF_CPU_UP_THRESHOLD
,
204 .cpu_down_differential
= DEF_CPU_DOWN_DIFFERENTIAL
,
205 .cpu_up_avg_times
= CPU_UP_AVG_TIMES
,
206 .cpu_down_avg_times
= CPU_DOWN_AVG_TIMES
,
207 .thermal_dispatch_avg_times
= THERMAL_DISPATCH_AVG_TIMES
,
210 .is_cpu_hotplug_disable
= 1,
212 .cpu_input_boost_enable
= 1,
216 static void dbs_freq_increase(struct cpufreq_policy
*p
, unsigned int freq
);
218 static inline u64
get_cpu_idle_time_jiffy(unsigned int cpu
, u64
*wall
)
224 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
226 busy_time
= kcpustat_cpu(cpu
).cpustat
[CPUTIME_USER
];
227 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SYSTEM
];
228 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_IRQ
];
229 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_SOFTIRQ
];
230 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_STEAL
];
231 busy_time
+= kcpustat_cpu(cpu
).cpustat
[CPUTIME_NICE
];
233 idle_time
= cur_wall_time
- busy_time
;
235 *wall
= jiffies_to_usecs(cur_wall_time
);
237 return jiffies_to_usecs(idle_time
);
240 /* static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) */
242 /* u64 idle_time = get_cpu_idle_time_us(cpu, NULL); */
244 /* if (idle_time == -1ULL) */
245 /* return get_cpu_idle_time_jiffy(cpu, wall); */
247 /* idle_time += get_cpu_iowait_time_us(cpu, wall); */
249 /* return idle_time; */
252 static inline cputime64_t
get_cpu_iowait_time(unsigned int cpu
, cputime64_t
*wall
)
254 u64 iowait_time
= get_cpu_iowait_time_us(cpu
, wall
);
256 if (iowait_time
== -1ULL)
262 void force_two_core(void)
264 bool raise_freq
= false;
266 mutex_lock(&hp_mutex
);
267 g_cpu_down_count
= 0;
268 g_cpu_down_sum_load
= 0;
269 if (num_online_cpus() < dbs_tuners_ins
.cpu_num_limit
) {
271 g_next_hp_action
= 1;
272 schedule_delayed_work_on(0, &hp_work
, 0);
274 mutex_unlock(&hp_mutex
);
276 if (raise_freq
== true) {
277 wake_up_process(freq_up_task
);
280 mt_gpufreq_target(0);
284 * Find right freq to be set now with powersave_bias on.
285 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
286 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
288 static unsigned int powersave_bias_target(struct cpufreq_policy
*policy
,
289 unsigned int freq_next
,
290 unsigned int relation
)
292 unsigned int freq_req
, freq_reduc
, freq_avg
;
293 unsigned int freq_hi
, freq_lo
;
294 unsigned int index
= 0;
295 unsigned int jiffies_total
, jiffies_hi
, jiffies_lo
;
296 struct cpu_dbs_info_s
*dbs_info
= &per_cpu(hp_cpu_dbs_info
,
299 if (!dbs_info
->freq_table
) {
300 dbs_info
->freq_lo
= 0;
301 dbs_info
->freq_lo_jiffies
= 0;
305 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_next
,
307 freq_req
= dbs_info
->freq_table
[index
].frequency
;
308 freq_reduc
= freq_req
* dbs_tuners_ins
.powersave_bias
/ 1000;
309 freq_avg
= freq_req
- freq_reduc
;
311 /* Find freq bounds for freq_avg in freq_table */
313 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
314 CPUFREQ_RELATION_H
, &index
);
315 freq_lo
= dbs_info
->freq_table
[index
].frequency
;
317 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
318 CPUFREQ_RELATION_L
, &index
);
319 freq_hi
= dbs_info
->freq_table
[index
].frequency
;
321 /* Find out how long we have to be in hi and lo freqs */
322 if (freq_hi
== freq_lo
) {
323 dbs_info
->freq_lo
= 0;
324 dbs_info
->freq_lo_jiffies
= 0;
327 jiffies_total
= usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
328 jiffies_hi
= (freq_avg
- freq_lo
) * jiffies_total
;
329 jiffies_hi
+= ((freq_hi
- freq_lo
) / 2);
330 jiffies_hi
/= (freq_hi
- freq_lo
);
331 jiffies_lo
= jiffies_total
- jiffies_hi
;
332 dbs_info
->freq_lo
= freq_lo
;
333 dbs_info
->freq_lo_jiffies
= jiffies_lo
;
334 dbs_info
->freq_hi_jiffies
= jiffies_hi
;
338 static void hotplug_powersave_bias_init_cpu(int cpu
)
340 struct cpu_dbs_info_s
*dbs_info
= &per_cpu(hp_cpu_dbs_info
, cpu
);
341 dbs_info
->freq_table
= cpufreq_frequency_get_table(cpu
);
342 dbs_info
->freq_lo
= 0;
345 static void hotplug_powersave_bias_init(void)
348 for_each_online_cpu(i
) {
349 hotplug_powersave_bias_init_cpu(i
);
353 /************************** sysfs interface ************************/
355 static ssize_t
show_sampling_rate_min(struct kobject
*kobj
,
356 struct attribute
*attr
, char *buf
)
358 return sprintf(buf
, "%u\n", min_sampling_rate
);
361 define_one_global_ro(sampling_rate_min
);
363 /* cpufreq_hotplug Governor Tunables */
364 #define show_one(file_name, object) \
365 static ssize_t show_##file_name \
366 (struct kobject *kobj, struct attribute *attr, char *buf) \
368 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
371 show_one(sampling_rate
, sampling_rate
);
372 show_one(io_is_busy
, io_is_busy
);
373 show_one(up_threshold
, up_threshold
);
374 show_one(od_threshold
, od_threshold
);
375 show_one(down_differential
, down_differential
);
376 show_one(sampling_down_factor
, sampling_down_factor
);
377 show_one(ignore_nice_load
, ignore_nice
);
378 show_one(powersave_bias
, powersave_bias
);
379 show_one(cpu_up_threshold
, cpu_up_threshold
);
380 show_one(cpu_down_differential
, cpu_down_differential
);
381 show_one(cpu_up_avg_times
, cpu_up_avg_times
);
382 show_one(cpu_down_avg_times
, cpu_down_avg_times
);
383 show_one(thermal_dispatch_avg_times
, thermal_dispatch_avg_times
);
384 show_one(cpu_num_limit
, cpu_num_limit
);
385 show_one(cpu_num_base
, cpu_num_base
);
386 show_one(is_cpu_hotplug_disable
, is_cpu_hotplug_disable
);
388 show_one(cpu_input_boost_enable
, cpu_input_boost_enable
);
392 * update_sampling_rate - update sampling rate effective immediately if needed.
393 * @new_rate: new sampling rate
395 * If new rate is smaller than the old, simply updaing
396 * dbs_tuners_int.sampling_rate might not be appropriate. For example,
397 * if the original sampling_rate was 1 second and the requested new sampling
398 * rate is 10 ms because the user needs immediate reaction from hotplug
399 * governor, but not sure if higher frequency will be required or not,
400 * then, the governor may change the sampling rate too late; up to 1 second
401 * later. Thus, if we are reducing the sampling rate, we need to make the
402 * new value effective immediately.
404 static void update_sampling_rate(unsigned int new_rate
)
408 dbs_tuners_ins
.sampling_rate
= new_rate
409 = max(new_rate
, min_sampling_rate
);
411 for_each_online_cpu(cpu
) {
412 struct cpufreq_policy
*policy
;
413 struct cpu_dbs_info_s
*dbs_info
;
414 unsigned long next_sampling
, appointed_at
;
416 policy
= cpufreq_cpu_get(cpu
);
419 dbs_info
= &per_cpu(hp_cpu_dbs_info
, policy
->cpu
);
420 cpufreq_cpu_put(policy
);
422 mutex_lock(&dbs_info
->timer_mutex
);
424 if (!delayed_work_pending(&dbs_info
->work
)) {
425 mutex_unlock(&dbs_info
->timer_mutex
);
429 next_sampling
= jiffies
+ usecs_to_jiffies(new_rate
);
430 appointed_at
= dbs_info
->work
.timer
.expires
;
433 if (time_before(next_sampling
, appointed_at
)) {
435 mutex_unlock(&dbs_info
->timer_mutex
);
436 cancel_delayed_work_sync(&dbs_info
->work
);
437 mutex_lock(&dbs_info
->timer_mutex
);
439 schedule_delayed_work_on(dbs_info
->cpu
, &dbs_info
->work
,
440 usecs_to_jiffies(new_rate
));
443 mutex_unlock(&dbs_info
->timer_mutex
);
447 void bl_enable_timer(int enable
)
449 static unsigned int sampling_rate_backup
= 0;
451 if (enable
&& !sampling_rate_backup
)
455 update_sampling_rate(sampling_rate_backup
);
457 struct cpufreq_policy
*policy
;
458 struct cpu_dbs_info_s
*dbs_info
;
459 unsigned int new_rate
= 30000 * 100; // change to 3s
461 /* restore original sampling rate */
462 sampling_rate_backup
= dbs_tuners_ins
.sampling_rate
;
463 update_sampling_rate(new_rate
);
465 policy
= cpufreq_cpu_get(0);
469 dbs_info
= &per_cpu(hp_cpu_dbs_info
, 0);
470 cpufreq_cpu_put(policy
);
472 mutex_lock(&dbs_info
->timer_mutex
);
474 if (!delayed_work_pending(&dbs_info
->work
)) {
475 mutex_unlock(&dbs_info
->timer_mutex
);
479 mutex_unlock(&dbs_info
->timer_mutex
);
481 cancel_delayed_work_sync(&dbs_info
->work
);
483 mutex_lock(&dbs_info
->timer_mutex
);
485 schedule_delayed_work_on(dbs_info
->cpu
, &dbs_info
->work
,
486 usecs_to_jiffies(new_rate
));
488 mutex_unlock(&dbs_info
->timer_mutex
);
491 EXPORT_SYMBOL(bl_enable_timer
);
493 static ssize_t
store_sampling_rate(struct kobject
*a
, struct attribute
*b
,
494 const char *buf
, size_t count
)
498 ret
= sscanf(buf
, "%u", &input
);
501 update_sampling_rate(input
);
505 static ssize_t
store_io_is_busy(struct kobject
*a
, struct attribute
*b
,
506 const char *buf
, size_t count
)
511 ret
= sscanf(buf
, "%u", &input
);
514 dbs_tuners_ins
.io_is_busy
= !!input
;
518 static ssize_t
store_up_threshold(struct kobject
*a
, struct attribute
*b
,
519 const char *buf
, size_t count
)
523 ret
= sscanf(buf
, "%u", &input
);
525 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
526 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
529 dbs_tuners_ins
.up_threshold
= input
;
533 static ssize_t
store_od_threshold(struct kobject
*a
, struct attribute
*b
,
534 const char *buf
, size_t count
)
538 ret
= sscanf(buf
, "%u", &input
);
540 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
541 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
544 dbs_tuners_ins
.od_threshold
= input
;
548 static ssize_t
store_down_differential(struct kobject
*a
, struct attribute
*b
,
549 const char *buf
, size_t count
)
553 ret
= sscanf(buf
, "%u", &input
);
555 if (ret
!= 1 || input
> MAX_FREQUENCY_DOWN_DIFFERENTIAL
||
556 input
< MIN_FREQUENCY_DOWN_DIFFERENTIAL
) {
559 dbs_tuners_ins
.down_differential
= input
;
563 static ssize_t
store_sampling_down_factor(struct kobject
*a
,
564 struct attribute
*b
, const char *buf
, size_t count
)
566 unsigned int input
, j
;
568 ret
= sscanf(buf
, "%u", &input
);
570 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
572 dbs_tuners_ins
.sampling_down_factor
= input
;
574 /* Reset down sampling multiplier in case it was active */
575 for_each_online_cpu(j
) {
576 struct cpu_dbs_info_s
*dbs_info
;
577 dbs_info
= &per_cpu(hp_cpu_dbs_info
, j
);
578 dbs_info
->rate_mult
= 1;
583 static ssize_t
store_ignore_nice_load(struct kobject
*a
, struct attribute
*b
,
584 const char *buf
, size_t count
)
591 ret
= sscanf(buf
, "%u", &input
);
598 if (input
== dbs_tuners_ins
.ignore_nice
) { /* nothing to do */
601 dbs_tuners_ins
.ignore_nice
= input
;
603 /* we need to re-evaluate prev_cpu_idle */
604 for_each_online_cpu(j
) {
605 struct cpu_dbs_info_s
*dbs_info
;
606 dbs_info
= &per_cpu(hp_cpu_dbs_info
, j
);
607 dbs_info
->prev_cpu_idle
= get_cpu_idle_time(j
,
608 &dbs_info
->prev_cpu_wall
,
609 dbs_tuners_ins
.io_is_busy
);
610 if (dbs_tuners_ins
.ignore_nice
)
611 dbs_info
->prev_cpu_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
617 static ssize_t
store_powersave_bias(struct kobject
*a
, struct attribute
*b
,
618 const char *buf
, size_t count
)
622 ret
= sscanf(buf
, "%u", &input
);
630 dbs_tuners_ins
.powersave_bias
= input
;
631 hotplug_powersave_bias_init();
635 static ssize_t
store_cpu_up_threshold(struct kobject
*a
, struct attribute
*b
,
636 const char *buf
, size_t count
)
640 ret
= sscanf(buf
, "%u", &input
);
642 if (ret
!= 1 || input
> MAX_CPU_UP_THRESHOLD
||
643 input
< MIN_CPU_UP_THRESHOLD
) {
646 dbs_tuners_ins
.cpu_up_threshold
= input
;
650 static ssize_t
store_cpu_down_differential(struct kobject
*a
, struct attribute
*b
,
651 const char *buf
, size_t count
)
655 ret
= sscanf(buf
, "%u", &input
);
657 if (ret
!= 1 || input
> MAX_CPU_DOWN_DIFFERENTIAL
||
658 input
< MIN_CPU_DOWN_DIFFERENTIAL
) {
661 dbs_tuners_ins
.cpu_down_differential
= input
;
665 static ssize_t
store_cpu_up_avg_times(struct kobject
*a
, struct attribute
*b
,
666 const char *buf
, size_t count
)
670 ret
= sscanf(buf
, "%u", &input
);
672 dbs_tuners_ins
.cpu_up_avg_times
= input
;
676 static ssize_t
store_cpu_down_avg_times(struct kobject
*a
, struct attribute
*b
,
677 const char *buf
, size_t count
)
681 ret
= sscanf(buf
, "%u", &input
);
683 dbs_tuners_ins
.cpu_down_avg_times
= input
;
687 static ssize_t
store_thermal_dispatch_avg_times(struct kobject
*a
, struct attribute
*b
,
688 const char *buf
, size_t count
)
692 ret
= sscanf(buf
, "%u", &input
);
694 dbs_tuners_ins
.thermal_dispatch_avg_times
= input
;
698 static ssize_t
store_cpu_num_limit(struct kobject
*a
, struct attribute
*b
,
699 const char *buf
, size_t count
)
703 ret
= sscanf(buf
, "%u", &input
);
705 dbs_tuners_ins
.cpu_num_limit
= input
;
709 static ssize_t
store_cpu_num_base(struct kobject
*a
, struct attribute
*b
,
710 const char *buf
, size_t count
)
713 bool raise_freq
= false;
715 struct cpufreq_policy
*policy
;
717 policy
= cpufreq_cpu_get(0);
718 ret
= sscanf(buf
, "%u", &input
);
720 dbs_tuners_ins
.cpu_num_base
= input
;
721 mutex_lock(&hp_mutex
);
722 if (num_online_cpus() < dbs_tuners_ins
.cpu_num_base
&& num_online_cpus() < dbs_tuners_ins
.cpu_num_limit
) {
724 g_next_hp_action
= 1;
725 schedule_delayed_work_on(0, &hp_work
, 0);
727 mutex_unlock(&hp_mutex
);
729 if(raise_freq
== true)
730 dbs_freq_increase(policy
, policy
->max
);
735 static ssize_t
store_is_cpu_hotplug_disable(struct kobject
*a
, struct attribute
*b
,
736 const char *buf
, size_t count
)
740 ret
= sscanf(buf
, "%u", &input
);
742 dbs_tuners_ins
.is_cpu_hotplug_disable
= input
;
747 static ssize_t
store_cpu_input_boost_enable(struct kobject
*a
, struct attribute
*b
,
748 const char *buf
, size_t count
)
752 ret
= sscanf(buf
, "%u", &input
);
754 if (ret
!= 1 || input
> 1 ||
759 mutex_lock(&hp_mutex
);
760 dbs_tuners_ins
.cpu_input_boost_enable
= input
;
761 mutex_unlock(&hp_mutex
);
767 define_one_global_rw(sampling_rate
);
768 define_one_global_rw(io_is_busy
);
769 define_one_global_rw(up_threshold
);
770 define_one_global_rw(od_threshold
);
771 define_one_global_rw(down_differential
);
772 define_one_global_rw(sampling_down_factor
);
773 define_one_global_rw(ignore_nice_load
);
774 define_one_global_rw(powersave_bias
);
775 define_one_global_rw(cpu_up_threshold
);
776 define_one_global_rw(cpu_down_differential
);
777 define_one_global_rw(cpu_up_avg_times
);
778 define_one_global_rw(cpu_down_avg_times
);
779 define_one_global_rw(thermal_dispatch_avg_times
);
780 define_one_global_rw(cpu_num_limit
);
781 define_one_global_rw(cpu_num_base
);
782 define_one_global_rw(is_cpu_hotplug_disable
);
784 define_one_global_rw(cpu_input_boost_enable
);
787 static struct attribute
*dbs_attributes
[] = {
788 &sampling_rate_min
.attr
,
792 &down_differential
.attr
,
793 &sampling_down_factor
.attr
,
794 &ignore_nice_load
.attr
,
795 &powersave_bias
.attr
,
797 &cpu_up_threshold
.attr
,
798 &cpu_down_differential
.attr
,
799 &cpu_up_avg_times
.attr
,
800 &cpu_down_avg_times
.attr
,
801 &thermal_dispatch_avg_times
.attr
,
804 &is_cpu_hotplug_disable
.attr
,
806 &cpu_input_boost_enable
.attr
,
811 static struct attribute_group dbs_attr_group
= {
812 .attrs
= dbs_attributes
,
816 /************************** sysfs end ************************/
818 static void dbs_freq_increase(struct cpufreq_policy
*p
, unsigned int freq
)
820 if (dbs_tuners_ins
.powersave_bias
)
821 freq
= powersave_bias_target(p
, freq
, CPUFREQ_RELATION_H
);
822 else if (p
->cur
== p
->max
)
830 __cpufreq_driver_target(p
, freq
, dbs_tuners_ins
.powersave_bias
?
831 CPUFREQ_RELATION_L
: CPUFREQ_RELATION_H
);
834 int mt_cpufreq_cur_load(void)
838 EXPORT_SYMBOL(mt_cpufreq_cur_load
);
840 void hp_set_dynamic_cpu_hotplug_enable(int enable
)
842 mutex_lock(&hp_mutex
);
843 dbs_tuners_ins
.is_cpu_hotplug_disable
= !enable
;
844 mutex_unlock(&hp_mutex
);
846 EXPORT_SYMBOL(hp_set_dynamic_cpu_hotplug_enable
);
848 void hp_limited_cpu_num(int num
)
850 mutex_lock(&hp_mutex
);
851 dbs_tuners_ins
.cpu_num_limit
= num
;
853 if (num
< num_online_cpus()) {
854 printk("%s: CPU off due to thermal protection! limit_num = %d < online = %d\n",
855 __func__
, num
, num_online_cpus());
856 g_next_hp_action
= 0;
857 schedule_delayed_work_on(0, &hp_work
, 0);
858 g_cpu_down_count
= 0;
859 g_cpu_down_sum_load
= 0;
862 mutex_unlock(&hp_mutex
);
864 EXPORT_SYMBOL(hp_limited_cpu_num
);
865 void hp_based_cpu_num(int num
)
867 mutex_lock(&hp_mutex
);
868 dbs_tuners_ins
.cpu_num_base
= num
;
869 mutex_unlock(&hp_mutex
);
871 EXPORT_SYMBOL(hp_based_cpu_num
);
875 static void __cpuinit
hp_work_handler(struct work_struct
*work
)
877 if (mutex_trylock(&bl_onoff_mutex
))
879 if (!dbs_tuners_ins
.is_cpu_hotplug_disable
)
881 int onlines_cpu_n
= num_online_cpus();
883 if (g_next_hp_action
) // turn on CPU
885 if (onlines_cpu_n
< num_possible_cpus())
887 printk("hp_work_handler: cpu_up(%d) kick off\n", onlines_cpu_n
);
888 cpu_up(onlines_cpu_n
);
889 printk("hp_work_handler: cpu_up(%d) completion\n", onlines_cpu_n
);
891 dbs_ignore
= 0; // force trigger frequency scaling
896 if (onlines_cpu_n
> 1)
898 printk("hp_work_handler: cpu_down(%d) kick off\n", (onlines_cpu_n
- 1));
899 cpu_down((onlines_cpu_n
- 1));
900 printk("hp_work_handler: cpu_down(%d) completion\n", (onlines_cpu_n
- 1));
902 dbs_ignore
= 0; // force trigger frequency scaling
906 mutex_unlock(&bl_onoff_mutex
);
912 static void dbs_check_cpu(struct cpu_dbs_info_s
*this_dbs_info
)
914 unsigned int max_load_freq
;
915 bool raise_freq
= false;
917 struct cpufreq_policy
*policy
;
920 this_dbs_info
->freq_lo
= 0;
921 policy
= this_dbs_info
->cur_policy
;
924 * Every sampling_rate, we check, if current idle time is less
925 * than 20% (default), then we try to increase frequency
926 * Every sampling_rate, we look for a the lowest
927 * frequency which can sustain the load while keeping idle time over
928 * 30%. If such a frequency exist, we try to decrease to this frequency.
930 * Any frequency increase takes it to the maximum frequency.
931 * Frequency reduction happens at minimum steps of
932 * 5% (default) of current frequency
935 /* Get Absolute Load - in terms of freq */
939 for_each_cpu(j
, policy
->cpus
) {
940 struct cpu_dbs_info_s
*j_dbs_info
;
941 cputime64_t cur_wall_time
, cur_idle_time
, cur_iowait_time
;
942 unsigned int idle_time
, wall_time
, iowait_time
;
943 unsigned int load
, load_freq
;
946 j_dbs_info
= &per_cpu(hp_cpu_dbs_info
, j
);
948 cur_idle_time
= get_cpu_idle_time(j
, &cur_wall_time
,
949 dbs_tuners_ins
.io_is_busy
);
950 cur_iowait_time
= get_cpu_iowait_time(j
, &cur_wall_time
);
952 wall_time
= (unsigned int)
953 (cur_wall_time
- j_dbs_info
->prev_cpu_wall
);
954 j_dbs_info
->prev_cpu_wall
= cur_wall_time
;
956 idle_time
= (unsigned int)
957 (cur_idle_time
- j_dbs_info
->prev_cpu_idle
);
958 j_dbs_info
->prev_cpu_idle
= cur_idle_time
;
960 iowait_time
= (unsigned int)
961 (cur_iowait_time
- j_dbs_info
->prev_cpu_iowait
);
962 j_dbs_info
->prev_cpu_iowait
= cur_iowait_time
;
964 if (dbs_tuners_ins
.ignore_nice
) {
966 unsigned long cur_nice_jiffies
;
968 cur_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
] -
969 j_dbs_info
->prev_cpu_nice
;
971 * Assumption: nice time between sampling periods will
972 * be less than 2^32 jiffies for 32 bit sys
974 cur_nice_jiffies
= (unsigned long)
975 cputime64_to_jiffies64(cur_nice
);
977 j_dbs_info
->prev_cpu_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
978 idle_time
+= jiffies_to_usecs(cur_nice_jiffies
);
982 * For the purpose of hotplug, waiting for disk IO is an
983 * indication that you're performance critical, and not that
984 * the system is actually idle. So subtract the iowait time
985 * from the cpu idle time.
988 if (dbs_tuners_ins
.io_is_busy
&& idle_time
>= iowait_time
)
989 idle_time
-= iowait_time
;
991 if (unlikely(!wall_time
|| wall_time
< idle_time
))
994 load
= 100 * (wall_time
- idle_time
) / wall_time
;
996 cpus_sum_load
+= load
;
998 freq_avg
= __cpufreq_driver_getavg(policy
, j
);
1000 freq_avg
= policy
->cur
;
1002 load_freq
= load
* freq_avg
;
1003 if (load_freq
> max_load_freq
)
1004 max_load_freq
= load_freq
;
1007 printk("dbs_check_cpu: cpu = %d\n", j
);
1008 printk("dbs_check_cpu: wall_time = %d, idle_time = %d, load = %d\n", wall_time
, idle_time
, load
);
1009 printk("dbs_check_cpu: freq_avg = %d, max_load_freq = %d, cpus_sum_load = %d\n", freq_avg
, max_load_freq
, cpus_sum_load
);
1012 // record loading information
1013 cpu_loading
= max_load_freq
/ policy
->cur
;
1014 // dispatch power budget
1015 if(g_thermal_count
>= dbs_tuners_ins
.thermal_dispatch_avg_times
) {
1016 g_thermal_count
= 0;
1017 mt_dvfs_power_dispatch_safe();
1018 if ((dbs_thermal_limited
== 1) && (policy
->cur
> dbs_thermal_limited_freq
))
1019 __cpufreq_driver_target(policy
, dbs_thermal_limited_freq
, CPUFREQ_RELATION_L
);
1024 if (policy
->cur
>= get_normal_max_freq()){
1025 if ((max_load_freq
> dbs_tuners_ins
.od_threshold
* policy
->cur
) && (num_online_cpus() == num_possible_cpus())){
1026 g_max_cpu_persist_count
++;
1028 printk("dvfs_od: g_max_cpu_persist_count: %d\n", g_max_cpu_persist_count
);
1030 if(g_max_cpu_persist_count
== DEF_CPU_PERSIST_COUNT
){
1031 //only ramp up to OD OPP here
1033 printk("dvfs_od: cpu loading = %d\n", max_load_freq
/policy
->cur
);
1035 if (policy
->cur
< policy
->max
)
1036 this_dbs_info
->rate_mult
=
1037 dbs_tuners_ins
.sampling_down_factor
;
1038 dbs_freq_increase(policy
, policy
->max
);
1040 printk("reset g_max_cpu_persist_count, count = 10\n");
1042 g_max_cpu_persist_count
= 0;
1047 g_max_cpu_persist_count
= 0;
1051 if (max_load_freq
> dbs_tuners_ins
.up_threshold
* policy
->cur
) {
1052 /* If switching to max speed, apply sampling_down_factor */
1053 if (policy
->cur
< get_normal_max_freq())
1054 this_dbs_info
->rate_mult
=
1055 dbs_tuners_ins
.sampling_down_factor
;
1056 dbs_freq_increase(policy
, get_normal_max_freq());
1057 if(g_max_cpu_persist_count
!= 0){
1058 g_max_cpu_persist_count
= 0;
1060 printk("reset g_max_cpu_persist_count, and fallback to normal max\n");
1067 /* Check for frequency decrease */
1068 /* if we cannot reduce the frequency anymore, break out early */
1069 if (policy
->cur
== policy
->min
)
1073 * The optimal frequency is the frequency that is the lowest that
1074 * can support the current CPU usage without triggering the up
1075 * policy. To be safe, we focus 10 points under the threshold.
1078 (dbs_tuners_ins
.up_threshold
- dbs_tuners_ins
.down_differential
) *
1080 unsigned int freq_next
;
1081 freq_next
= max_load_freq
/
1082 (dbs_tuners_ins
.up_threshold
-
1083 dbs_tuners_ins
.down_differential
);
1085 /* No longer fully busy, reset rate_mult */
1086 this_dbs_info
->rate_mult
= 1;
1088 if (freq_next
< policy
->min
)
1089 freq_next
= policy
->min
;
1091 if(g_max_cpu_persist_count
!= 0){
1092 g_max_cpu_persist_count
= 0;
1094 printk("reset g_max_cpu_persist_count, decrease freq accrording to loading\n");
1098 if (!dbs_tuners_ins
.powersave_bias
) {
1099 __cpufreq_driver_target(policy
, freq_next
,
1100 CPUFREQ_RELATION_L
);
1102 int freq
= powersave_bias_target(policy
, freq_next
,
1103 CPUFREQ_RELATION_L
);
1104 __cpufreq_driver_target(policy
, freq
,
1105 CPUFREQ_RELATION_L
);
1111 /* If Hot Plug policy disable, return directly */
1112 if (dbs_tuners_ins
.is_cpu_hotplug_disable
)
1116 mutex_lock(&hp_mutex
);
1118 /* Check CPU loading to power up slave CPU */
1119 if (num_online_cpus() < dbs_tuners_ins
.cpu_num_base
&& num_online_cpus() < dbs_tuners_ins
.cpu_num_limit
) {
1121 printk("dbs_check_cpu: turn on CPU by perf service\n");
1122 g_next_hp_action
= 1;
1123 schedule_delayed_work_on(0, &hp_work
, 0);
1124 } else if (num_online_cpus() < num_possible_cpus() && num_online_cpus() < dbs_tuners_ins
.cpu_num_limit
) {
1126 g_cpu_up_sum_load
+= cpus_sum_load
;
1127 if (g_cpu_up_count
== dbs_tuners_ins
.cpu_up_avg_times
) {
1128 g_cpu_up_sum_load
/= dbs_tuners_ins
.cpu_up_avg_times
;
1129 if (g_cpu_up_sum_load
>
1130 (dbs_tuners_ins
.cpu_up_threshold
* num_online_cpus())) {
1132 printk("dbs_check_cpu: g_cpu_up_sum_load = %d\n", g_cpu_up_sum_load
);
1135 printk("dbs_check_cpu: turn on CPU\n");
1136 g_next_hp_action
= 1;
1137 schedule_delayed_work_on(0, &hp_work
, 0);
1140 g_cpu_up_sum_load
= 0;
1143 printk("dbs_check_cpu: g_cpu_up_count = %d, g_cpu_up_sum_load = %d\n", g_cpu_up_count
, g_cpu_up_sum_load
);
1144 printk("dbs_check_cpu: cpu_up_threshold = %d\n", (dbs_tuners_ins
.cpu_up_threshold
* num_online_cpus()));
1148 /* Check CPU loading to power down slave CPU */
1149 if (num_online_cpus() > 1) {
1151 g_cpu_down_sum_load
+= cpus_sum_load
;
1152 if (g_cpu_down_count
== dbs_tuners_ins
.cpu_down_avg_times
) {
1153 g_cpu_down_sum_load
/= dbs_tuners_ins
.cpu_down_avg_times
;
1154 if (g_cpu_down_sum_load
<
1155 ((dbs_tuners_ins
.cpu_up_threshold
- dbs_tuners_ins
.cpu_down_differential
) * (num_online_cpus() - 1))) {
1156 if (num_online_cpus() > dbs_tuners_ins
.cpu_num_base
) {
1158 printk("dbs_check_cpu: g_cpu_down_sum_load = %d\n", g_cpu_down_sum_load
);
1161 printk("dbs_check_cpu: turn off CPU\n");
1162 g_next_hp_action
= 0;
1163 schedule_delayed_work_on(0, &hp_work
, 0);
1166 g_cpu_down_count
= 0;
1167 g_cpu_down_sum_load
= 0;
1170 printk("dbs_check_cpu: g_cpu_down_count = %d, g_cpu_down_sum_load = %d\n", g_cpu_down_count
, g_cpu_down_sum_load
);
1171 printk("dbs_check_cpu: cpu_down_threshold = %d\n", ((dbs_tuners_ins
.cpu_up_threshold
- dbs_tuners_ins
.cpu_down_differential
) * (num_online_cpus() - 1)));
1175 mutex_unlock(&hp_mutex
);
1177 // need to retrieve dbs_freq_increase out of hp_mutex
1178 // in case of self-deadlock
1179 if(raise_freq
== true)
1180 dbs_freq_increase(policy
, policy
->max
);
1185 static void do_dbs_timer(struct work_struct
*work
)
1187 struct cpu_dbs_info_s
*dbs_info
=
1188 container_of(work
, struct cpu_dbs_info_s
, work
.work
);
1189 unsigned int cpu
= dbs_info
->cpu
;
1190 int sample_type
= dbs_info
->sample_type
;
1194 mutex_lock(&dbs_info
->timer_mutex
);
1196 /* Common NORMAL_SAMPLE setup */
1197 dbs_info
->sample_type
= DBS_NORMAL_SAMPLE
;
1198 if (!dbs_tuners_ins
.powersave_bias
||
1199 sample_type
== DBS_NORMAL_SAMPLE
) {
1200 dbs_check_cpu(dbs_info
);
1201 if (dbs_info
->freq_lo
) {
1202 /* Setup timer for SUB_SAMPLE */
1203 dbs_info
->sample_type
= DBS_SUB_SAMPLE
;
1204 delay
= dbs_info
->freq_hi_jiffies
;
1206 /* We want all CPUs to do sampling nearly on
1209 delay
= usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
1210 * dbs_info
->rate_mult
);
1212 if (num_online_cpus() > 1)
1213 delay
-= jiffies
% delay
;
1216 __cpufreq_driver_target(dbs_info
->cur_policy
,
1217 dbs_info
->freq_lo
, CPUFREQ_RELATION_H
);
1218 delay
= dbs_info
->freq_lo_jiffies
;
1220 schedule_delayed_work_on(cpu
, &dbs_info
->work
, delay
);
1221 mutex_unlock(&dbs_info
->timer_mutex
);
1224 static inline void dbs_timer_init(struct cpu_dbs_info_s
*dbs_info
)
1226 /* We want all CPUs to do sampling nearly on same jiffy */
1227 int delay
= usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
1229 if (num_online_cpus() > 1)
1230 delay
-= jiffies
% delay
;
1232 dbs_info
->sample_type
= DBS_NORMAL_SAMPLE
;
1233 INIT_DELAYED_WORK(&dbs_info
->work
, do_dbs_timer
);
1234 schedule_delayed_work_on(dbs_info
->cpu
, &dbs_info
->work
, delay
);
1237 static inline void dbs_timer_exit(struct cpu_dbs_info_s
*dbs_info
)
1239 cancel_delayed_work_sync(&dbs_info
->work
);
1243 * Not all CPUs want IO time to be accounted as busy; this dependson how
1244 * efficient idling at a higher frequency/voltage is.
1245 * Pavel Machek says this is not so for various generations of AMD and old
1247 * Mike Chan (androidlcom) calis this is also not true for ARM.
1248 * Because of this, whitelist specific known (series) of CPUs by default, and
1249 * leave all others up to the user.
1251 static int should_io_be_busy(void)
1253 #if defined(CONFIG_X86)
1255 * For Intel, Core 2 (model 15) andl later have an efficient idle.
1257 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
1258 boot_cpu_data
.x86
== 6 &&
1259 boot_cpu_data
.x86_model
>= 15)
1262 return 1; // io wait time should be subtracted from idle time
1266 static void dbs_input_event(struct input_handle
*handle
, unsigned int type
,
1267 unsigned int code
, int value
)
1269 if ((type
== EV_KEY
) && (code
== BTN_TOUCH
) && (value
== 1) && (dbs_tuners_ins
.cpu_input_boost_enable
))
1275 static int dbs_input_connect(struct input_handler
*handler
,
1276 struct input_dev
*dev
, const struct input_device_id
*id
)
1278 struct input_handle
*handle
;
1281 handle
= kzalloc(sizeof(struct input_handle
), GFP_KERNEL
);
1286 handle
->handler
= handler
;
1287 handle
->name
= "cpufreq_balance";
1289 error
= input_register_handle(handle
);
1293 error
= input_open_device(handle
);
1299 input_unregister_handle(handle
);
1305 static void dbs_input_disconnect(struct input_handle
*handle
)
1307 input_close_device(handle
);
1308 input_unregister_handle(handle
);
1312 static const struct input_device_id dbs_ids
[] = {
1314 .flags
= INPUT_DEVICE_ID_MATCH_EVBIT
|
1315 INPUT_DEVICE_ID_MATCH_ABSBIT
,
1316 .evbit
= { BIT_MASK(EV_ABS
) },
1317 .absbit
= { [BIT_WORD(ABS_MT_POSITION_X
)] =
1318 BIT_MASK(ABS_MT_POSITION_X
) |
1319 BIT_MASK(ABS_MT_POSITION_Y
) },
1320 }, /* multi-touch touchscreen */
1322 .flags
= INPUT_DEVICE_ID_MATCH_KEYBIT
|
1323 INPUT_DEVICE_ID_MATCH_ABSBIT
,
1324 .keybit
= { [BIT_WORD(BTN_TOUCH
)] = BIT_MASK(BTN_TOUCH
) },
1325 .absbit
= { [BIT_WORD(ABS_X
)] =
1326 BIT_MASK(ABS_X
) | BIT_MASK(ABS_Y
) },
1331 static struct input_handler dbs_input_handler
= {
1332 .event
= dbs_input_event
,
1333 .connect
= dbs_input_connect
,
1334 .disconnect
= dbs_input_disconnect
,
1335 .name
= "cpufreq_balance",
1336 .id_table
= dbs_ids
,
1338 #endif //#ifdef CONFIG_HOTPLUG_CPU
1342 static int cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
1345 unsigned int cpu
= policy
->cpu
;
1346 struct cpu_dbs_info_s
*this_dbs_info
;
1350 this_dbs_info
= &per_cpu(hp_cpu_dbs_info
, cpu
);
1353 case CPUFREQ_GOV_START
:
1354 if ((!cpu_online(cpu
)) || (!policy
->cur
))
1357 mutex_lock(&dbs_mutex
);
1360 for_each_cpu(j
, policy
->cpus
) {
1361 struct cpu_dbs_info_s
*j_dbs_info
;
1362 j_dbs_info
= &per_cpu(hp_cpu_dbs_info
, j
);
1363 j_dbs_info
->cur_policy
= policy
;
1365 j_dbs_info
->prev_cpu_idle
= get_cpu_idle_time(j
,
1366 &j_dbs_info
->prev_cpu_wall
,
1367 dbs_tuners_ins
.io_is_busy
);
1369 if (dbs_tuners_ins
.ignore_nice
)
1370 j_dbs_info
->prev_cpu_nice
=
1371 kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
1373 this_dbs_info
->cpu
= cpu
;
1374 this_dbs_info
->rate_mult
= 1;
1375 hotplug_powersave_bias_init_cpu(cpu
);
1377 * Start the timerschedule work, when this governor
1378 * is used for first time
1380 if (dbs_enable
== 1) {
1381 unsigned int latency
;
1383 rc
= sysfs_create_group(cpufreq_global_kobject
,
1386 mutex_unlock(&dbs_mutex
);
1390 /* policy latency is in nS. Convert it to uS first */
1391 latency
= policy
->cpuinfo
.transition_latency
/ 1000;
1394 /* Bring kernel and HW constraints together */
1395 min_sampling_rate
= max(min_sampling_rate
,
1396 MIN_LATENCY_MULTIPLIER
* latency
);
1397 dbs_tuners_ins
.sampling_rate
=
1398 max(min_sampling_rate
,
1399 latency
* LATENCY_MULTIPLIER
);
1400 dbs_tuners_ins
.io_is_busy
= should_io_be_busy();
1403 printk("cpufreq_governor_dbs: min_sampling_rate = %d\n", min_sampling_rate
);
1404 printk("cpufreq_governor_dbs: dbs_tuners_ins.sampling_rate = %d\n", dbs_tuners_ins
.sampling_rate
);
1405 printk("cpufreq_governor_dbs: dbs_tuners_ins.io_is_busy = %d\n", dbs_tuners_ins
.io_is_busy
);
1410 rc
= input_register_handler(&dbs_input_handler
);
1412 mutex_unlock(&dbs_mutex
);
1414 mutex_init(&this_dbs_info
->timer_mutex
);
1415 dbs_timer_init(this_dbs_info
);
1418 case CPUFREQ_GOV_STOP
:
1419 dbs_timer_exit(this_dbs_info
);
1421 mutex_lock(&dbs_mutex
);
1422 mutex_destroy(&this_dbs_info
->timer_mutex
);
1426 input_unregister_handler(&dbs_input_handler
);
1429 mutex_unlock(&dbs_mutex
);
1431 sysfs_remove_group(cpufreq_global_kobject
,
1436 case CPUFREQ_GOV_LIMITS
:
1437 mutex_lock(&this_dbs_info
->timer_mutex
);
1438 if (get_normal_max_freq() < this_dbs_info
->cur_policy
->cur
)
1439 __cpufreq_driver_target(this_dbs_info
->cur_policy
,
1440 get_normal_max_freq(), CPUFREQ_RELATION_H
);
1441 else if (policy
->min
> this_dbs_info
->cur_policy
->cur
)
1442 __cpufreq_driver_target(this_dbs_info
->cur_policy
,
1443 policy
->min
, CPUFREQ_RELATION_L
);
1444 mutex_unlock(&this_dbs_info
->timer_mutex
);
1450 /*int cpufreq_gov_dbs_get_sum_load(void)
1452 return cpus_sum_load;
1456 static int touch_freq_up_task(void *data
)
1458 struct cpufreq_policy
*policy
;
1461 policy
= cpufreq_cpu_get(0);
1464 dbs_freq_increase(policy
, policy
->max
);
1465 cpufreq_cpu_put(policy
);
1467 set_current_state(TASK_INTERRUPTIBLE
);
1470 if (kthread_should_stop())
1478 static int __init
cpufreq_gov_dbs_init(void)
1481 int cpu
= get_cpu();
1484 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
1487 idle_time
= get_cpu_idle_time_us(cpu
, NULL
);
1489 if (idle_time
!= -1ULL) {
1490 /* Idle micro accounting is supported. Use finer thresholds */
1491 dbs_tuners_ins
.up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
;
1492 dbs_tuners_ins
.down_differential
=
1493 MICRO_FREQUENCY_DOWN_DIFFERENTIAL
;
1494 dbs_tuners_ins
.cpu_up_threshold
=
1495 MICRO_CPU_UP_THRESHOLD
;
1496 dbs_tuners_ins
.cpu_down_differential
=
1497 MICRO_CPU_DOWN_DIFFERENTIAL
;
1499 * In nohz/micro accounting case we set the minimum frequency
1500 * not depending on HZ, but fixed (very low). The deferred
1501 * timer might skip some samples if idle/sleeping as needed.
1503 min_sampling_rate
= MICRO_FREQUENCY_MIN_SAMPLE_RATE
;
1505 /* For correct statistics, we need 10 ticks for each measure */
1507 MIN_SAMPLING_RATE_RATIO
* jiffies_to_usecs(10);
1510 dbs_tuners_ins
.cpu_num_limit
= num_possible_cpus();
1511 dbs_tuners_ins
.cpu_num_base
= 1;
1513 if (dbs_tuners_ins
.cpu_num_limit
> 1)
1514 dbs_tuners_ins
.is_cpu_hotplug_disable
= 0;
1517 INIT_DELAYED_WORK(&hp_work
, hp_work_handler
);
1522 freq_up_task
= kthread_create(touch_freq_up_task
, NULL
,
1523 "touch_freq_up_task");
1524 if (IS_ERR(freq_up_task
))
1525 return PTR_ERR(freq_up_task
);
1527 sched_setscheduler_nocheck(freq_up_task
, SCHED_FIFO
, ¶m
);
1528 get_task_struct(freq_up_task
);
1532 printk("cpufreq_gov_dbs_init: min_sampling_rate = %d\n", min_sampling_rate
);
1533 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.up_threshold = %d\n", dbs_tuners_ins
.up_threshold
);
1534 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.od_threshold = %d\n", dbs_tuners_ins
.od_threshold
);
1535 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.down_differential = %d\n", dbs_tuners_ins
.down_differential
);
1536 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_up_threshold = %d\n", dbs_tuners_ins
.cpu_up_threshold
);
1537 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_down_differential = %d\n", dbs_tuners_ins
.cpu_down_differential
);
1538 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_up_avg_times = %d\n", dbs_tuners_ins
.cpu_up_avg_times
);
1539 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_down_avg_times = %d\n", dbs_tuners_ins
.cpu_down_avg_times
);
1540 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.thermal_di_avg_times = %d\n", dbs_tuners_ins
.thermal_dispatch_avg_times
);
1541 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_num_limit = %d\n", dbs_tuners_ins
.cpu_num_limit
);
1542 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_num_base = %d\n", dbs_tuners_ins
.cpu_num_base
);
1543 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.is_cpu_hotplug_disable = %d\n", dbs_tuners_ins
.is_cpu_hotplug_disable
);
1545 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_input_boost_enable = %d\n", dbs_tuners_ins
.cpu_input_boost_enable
);
1546 #endif /* INPUT_BOOST */
1547 #endif /* DEBUG_LOG */
1549 return cpufreq_register_governor(&cpufreq_gov_balance
);
1552 static void __exit
cpufreq_gov_dbs_exit(void)
1555 cancel_delayed_work_sync(&hp_work
);
1558 cpufreq_unregister_governor(&cpufreq_gov_balance
);
1561 kthread_stop(freq_up_task
);
1562 put_task_struct(freq_up_task
);
1567 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
1568 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
1569 MODULE_DESCRIPTION("'cpufreq_balance' - A dynamic cpufreq governor for "
1570 "Low Latency Frequency Transition capable processors");
1571 MODULE_LICENSE("GPL");
1573 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_BALANCE
1574 fs_initcall(cpufreq_gov_dbs_init
);
1576 module_init(cpufreq_gov_dbs_init
);
1578 module_exit(cpufreq_gov_dbs_exit
);