2 * drivers/cpufreq/cpufreq_hotplug.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cpufreq.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/kobject.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/percpu-defs.h>
23 #include <linux/slab.h>
24 #include <linux/sysfs.h>
25 #include <linux/tick.h>
26 #include <linux/types.h>
27 #include <linux/cpu.h>
28 #include <linux/sched.h>
29 #include <linux/sched/rt.h>
30 #include <linux/kthread.h>
31 #include <linux/input.h> /* <-XXX */
32 #include <linux/slab.h> /* <-XXX */
33 #include "mach/mt_cpufreq.h" /* <-XXX */
35 #include "cpufreq_governor.h"
37 /* Hot-plug governor macros */
38 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
39 #define DEF_FREQUENCY_UP_THRESHOLD (80)
40 #define DEF_SAMPLING_DOWN_FACTOR (1)
41 #define MAX_SAMPLING_DOWN_FACTOR (100000)
42 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (15)
43 #define MIN_FREQUENCY_DOWN_DIFFERENTIAL (5) /* <-XXX */
44 #define MAX_FREQUENCY_DOWN_DIFFERENTIAL (20) /* <-XXX */
45 #define MICRO_FREQUENCY_UP_THRESHOLD (85)
46 #ifdef CONFIG_MTK_SDIOAUTOK_SUPPORT
47 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (27000)
49 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (30000)
51 #define MIN_FREQUENCY_UP_THRESHOLD (21)
52 #define MAX_FREQUENCY_UP_THRESHOLD (100)
54 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
58 #define DEF_CPU_DOWN_DIFFERENTIAL (10)
59 #define MICRO_CPU_DOWN_DIFFERENTIAL (10)
60 #define MIN_CPU_DOWN_DIFFERENTIAL (0)
61 #define MAX_CPU_DOWN_DIFFERENTIAL (30)
63 #define DEF_CPU_UP_THRESHOLD (90)
64 #define MICRO_CPU_UP_THRESHOLD (90)
65 #define MIN_CPU_UP_THRESHOLD (50)
66 #define MAX_CPU_UP_THRESHOLD (100)
68 #define DEF_CPU_UP_AVG_TIMES (10)
69 #define MIN_CPU_UP_AVG_TIMES (1)
70 #define MAX_CPU_UP_AVG_TIMES (20)
72 #define DEF_CPU_DOWN_AVG_TIMES (100)
73 #define MIN_CPU_DOWN_AVG_TIMES (20)
74 #define MAX_CPU_DOWN_AVG_TIMES (200)
76 #define DEF_CPU_INPUT_BOOST_ENABLE (1)
77 #define DEF_CPU_INPUT_BOOST_NUM (2)
79 #define DEF_CPU_RUSH_BOOST_ENABLE (1)
81 #define DEF_CPU_RUSH_THRESHOLD (98)
82 #define MICRO_CPU_RUSH_THRESHOLD (98)
83 #define MIN_CPU_RUSH_THRESHOLD (80)
84 #define MAX_CPU_RUSH_THRESHOLD (100)
86 #define DEF_CPU_RUSH_AVG_TIMES (5)
87 #define MIN_CPU_RUSH_AVG_TIMES (1)
88 #define MAX_CPU_RUSH_AVG_TIMES (10)
90 #define DEF_CPU_RUSH_TLP_TIMES (5)
91 #define MIN_CPU_RUSH_TLP_TIMES (1)
92 #define MAX_CPU_RUSH_TLP_TIMES (10)
94 /* #define DEBUG_LOG */
100 CPU_HOTPLUG_WORK_TYPE_NONE
= 0,
101 CPU_HOTPLUG_WORK_TYPE_BASE
,
102 CPU_HOTPLUG_WORK_TYPE_LIMIT
,
103 CPU_HOTPLUG_WORK_TYPE_UP
,
104 CPU_HOTPLUG_WORK_TYPE_DOWN
,
105 CPU_HOTPLUG_WORK_TYPE_RUSH
,
106 } cpu_hotplug_work_type_t
;
109 * cpu hotplug - global variable, function declaration
111 static DEFINE_MUTEX(hp_mutex
);
112 DEFINE_MUTEX(hp_onoff_mutex
);
114 int g_cpus_sum_load_current
= 0; /* set global for information purpose */
115 #ifdef CONFIG_HOTPLUG_CPU
117 static long g_cpu_up_sum_load
;
118 static int g_cpu_up_count
;
119 static int g_cpu_up_load_index
;
120 static long g_cpu_up_load_history
[MAX_CPU_UP_AVG_TIMES
] = { 0 };
122 static long g_cpu_down_sum_load
;
123 static int g_cpu_down_count
;
124 static int g_cpu_down_load_index
;
125 static long g_cpu_down_load_history
[MAX_CPU_DOWN_AVG_TIMES
] = { 0 };
127 static cpu_hotplug_work_type_t g_trigger_hp_work
;
128 static unsigned int g_next_hp_action
;
129 static struct delayed_work hp_work
;
130 struct workqueue_struct
*hp_wq
= NULL
;
132 static int g_tlp_avg_current
; /* set global for information purpose */
133 static int g_tlp_avg_sum
;
134 static int g_tlp_avg_count
;
135 static int g_tlp_avg_index
;
136 static int g_tlp_avg_average
; /* set global for information purpose */
137 static int g_tlp_avg_history
[MAX_CPU_RUSH_TLP_TIMES
] = { 0 };
139 static int g_tlp_iowait_av
;
141 static int g_cpu_rush_count
;
143 static void hp_reset_strategy_nolock(void);
144 static void hp_reset_strategy(void);
146 #else /* #ifdef CONFIG_HOTPLUG_CPU */
148 static void hp_reset_strategy_nolock(void)
152 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
154 /* dvfs - function declaration */
155 static void dbs_freq_increase(struct cpufreq_policy
*p
, unsigned int freq
);
157 #if defined(CONFIG_THERMAL_LIMIT_TEST)
158 extern unsigned int mt_cpufreq_thermal_test_limited_load(void);
161 static unsigned int dbs_ignore
= 1;
162 static unsigned int dbs_thermal_limited
;
163 static unsigned int dbs_thermal_limited_freq
;
165 /* dvfs thermal limit */
166 void dbs_freq_thermal_limited(unsigned int limited
, unsigned int freq
)
168 dbs_thermal_limited
= limited
;
169 dbs_thermal_limited_freq
= freq
;
171 EXPORT_SYMBOL(dbs_freq_thermal_limited
);
174 void (*cpufreq_freq_check
) (enum mt_cpu_dvfs_id id
) = NULL
;
175 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
177 static DEFINE_PER_CPU(struct hp_cpu_dbs_info_s
, hp_cpu_dbs_info
);
179 static struct hp_ops hp_ops
;
181 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_HOTPLUG
182 static struct cpufreq_governor cpufreq_gov_hotplug
;
185 static unsigned int default_powersave_bias
;
187 static void hotplug_powersave_bias_init_cpu(int cpu
)
189 struct hp_cpu_dbs_info_s
*dbs_info
= &per_cpu(hp_cpu_dbs_info
, cpu
);
191 dbs_info
->freq_table
= cpufreq_frequency_get_table(cpu
);
192 dbs_info
->freq_lo
= 0;
196 * Not all CPUs want IO time to be accounted as busy; this depends on how
197 * efficient idling at a higher frequency/voltage is.
198 * Pavel Machek says this is not so for various generations of AMD and old
200 * Mike Chan (android.com) claims this is also not true for ARM.
201 * Because of this, whitelist specific known (series) of CPUs by default, and
202 * leave all others up to the user.
204 static int should_io_be_busy(void)
206 #if defined(CONFIG_X86)
208 * For Intel, Core 2 (model 15) and later have an efficient idle.
210 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
211 boot_cpu_data
.x86
== 6 && boot_cpu_data
.x86_model
>= 15)
214 return 1; /* io wait time should be subtracted from idle time // <-XXX */
218 * Find right freq to be set now with powersave_bias on.
219 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
220 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
222 static unsigned int generic_powersave_bias_target(struct cpufreq_policy
*policy
,
223 unsigned int freq_next
, unsigned int relation
)
225 unsigned int freq_req
, freq_reduc
, freq_avg
;
226 unsigned int freq_hi
, freq_lo
;
227 unsigned int index
= 0;
228 unsigned int jiffies_total
, jiffies_hi
, jiffies_lo
;
229 struct hp_cpu_dbs_info_s
*dbs_info
= &per_cpu(hp_cpu_dbs_info
,
231 struct dbs_data
*dbs_data
= policy
->governor_data
;
232 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
234 if (!dbs_info
->freq_table
) {
235 dbs_info
->freq_lo
= 0;
236 dbs_info
->freq_lo_jiffies
= 0;
240 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_next
, relation
, &index
);
241 freq_req
= dbs_info
->freq_table
[index
].frequency
;
242 freq_reduc
= freq_req
* hp_tuners
->powersave_bias
/ 1000;
243 freq_avg
= freq_req
- freq_reduc
;
245 /* Find freq bounds for freq_avg in freq_table */
247 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
248 CPUFREQ_RELATION_H
, &index
);
249 freq_lo
= dbs_info
->freq_table
[index
].frequency
;
251 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
252 CPUFREQ_RELATION_L
, &index
);
253 freq_hi
= dbs_info
->freq_table
[index
].frequency
;
255 /* Find out how long we have to be in hi and lo freqs */
256 if (freq_hi
== freq_lo
) {
257 dbs_info
->freq_lo
= 0;
258 dbs_info
->freq_lo_jiffies
= 0;
261 jiffies_total
= usecs_to_jiffies(hp_tuners
->sampling_rate
);
262 jiffies_hi
= (freq_avg
- freq_lo
) * jiffies_total
;
263 jiffies_hi
+= ((freq_hi
- freq_lo
) / 2);
264 jiffies_hi
/= (freq_hi
- freq_lo
);
265 jiffies_lo
= jiffies_total
- jiffies_hi
;
266 dbs_info
->freq_lo
= freq_lo
;
267 dbs_info
->freq_lo_jiffies
= jiffies_lo
;
268 dbs_info
->freq_hi_jiffies
= jiffies_hi
;
272 static void hotplug_powersave_bias_init(void)
275 for_each_online_cpu(i
) {
276 hotplug_powersave_bias_init_cpu(i
);
280 static void dbs_freq_increase(struct cpufreq_policy
*p
, unsigned int freq
)
282 struct dbs_data
*dbs_data
= p
->governor_data
;
283 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
285 if (hp_tuners
->powersave_bias
)
286 freq
= hp_ops
.powersave_bias_target(p
, freq
, CPUFREQ_RELATION_H
);
287 else if (p
->cur
== p
->max
) {
288 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
289 if (dbs_ignore
== 0) {
290 if ((dbs_thermal_limited
== 1) && (freq
> dbs_thermal_limited_freq
)) {
291 freq
= dbs_thermal_limited_freq
;
292 pr_debug("[dbs_freq_increase] thermal limit freq = %d\n", freq
);
297 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
301 __cpufreq_driver_target(p
, freq
, hp_tuners
->powersave_bias
?
302 CPUFREQ_RELATION_L
: CPUFREQ_RELATION_H
);
305 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
307 * cpu hotplug - function definition
309 int hp_get_dynamic_cpu_hotplug_enable(void)
311 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
312 struct hp_dbs_tuners
*hp_tuners
;
316 hp_tuners
= dbs_data
->tuners
;
320 return !(hp_tuners
->is_cpu_hotplug_disable
);
322 EXPORT_SYMBOL(hp_get_dynamic_cpu_hotplug_enable
);
324 void hp_set_dynamic_cpu_hotplug_enable(int enable
)
326 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
327 struct hp_dbs_tuners
*hp_tuners
;
331 hp_tuners
= dbs_data
->tuners
;
335 if (enable
> 1 || enable
< 0)
338 mutex_lock(&hp_mutex
);
340 if (hp_tuners
->is_cpu_hotplug_disable
&& enable
)
341 hp_reset_strategy_nolock();
343 hp_tuners
->is_cpu_hotplug_disable
= !enable
;
344 mutex_unlock(&hp_mutex
);
346 EXPORT_SYMBOL(hp_set_dynamic_cpu_hotplug_enable
);
348 void hp_limited_cpu_num(int num
)
350 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
351 struct hp_dbs_tuners
*hp_tuners
;
355 hp_tuners
= dbs_data
->tuners
;
359 if (num
> num_possible_cpus() || num
< 1)
362 mutex_lock(&hp_mutex
);
363 hp_tuners
->cpu_num_limit
= num
;
364 mutex_unlock(&hp_mutex
);
366 EXPORT_SYMBOL(hp_limited_cpu_num
);
368 void hp_based_cpu_num(int num
)
370 unsigned int online_cpus_count
;
371 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
372 struct hp_dbs_tuners
*hp_tuners
;
376 hp_tuners
= dbs_data
->tuners
;
380 if (num
> num_possible_cpus() || num
< 1)
383 mutex_lock(&hp_mutex
);
385 hp_tuners
->cpu_num_base
= num
;
386 online_cpus_count
= num_online_cpus();
387 #ifdef CONFIG_HOTPLUG_CPU
389 if (online_cpus_count
< num
&& online_cpus_count
< hp_tuners
->cpu_num_limit
) {
390 struct hp_cpu_dbs_info_s
*dbs_info
;
391 struct cpufreq_policy
*policy
;
393 dbs_info
= &per_cpu(hp_cpu_dbs_info
, 0); /* TODO: FIXME, cpu = 0 */
394 policy
= dbs_info
->cdbs
.cur_policy
;
396 dbs_freq_increase(policy
, policy
->max
);
397 g_trigger_hp_work
= CPU_HOTPLUG_WORK_TYPE_BASE
;
398 /* schedule_delayed_work_on(0, &hp_work, 0); */
400 pr_emerg("[power/hotplug] %s():%d, impossible\n", __func__
, __LINE__
);
402 queue_delayed_work_on(0, hp_wq
, &hp_work
, 0);
406 mutex_unlock(&hp_mutex
);
408 EXPORT_SYMBOL(hp_based_cpu_num
);
410 int hp_get_cpu_rush_boost_enable(void)
412 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
413 struct hp_dbs_tuners
*hp_tuners
;
417 hp_tuners
= dbs_data
->tuners
;
421 return hp_tuners
->cpu_rush_boost_enable
;
423 EXPORT_SYMBOL(hp_get_cpu_rush_boost_enable
);
425 void hp_set_cpu_rush_boost_enable(int enable
)
427 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
428 struct hp_dbs_tuners
*hp_tuners
;
432 hp_tuners
= dbs_data
->tuners
;
436 if (enable
> 1 || enable
< 0)
439 mutex_lock(&hp_mutex
);
440 hp_tuners
->cpu_rush_boost_enable
= enable
;
441 mutex_unlock(&hp_mutex
);
443 EXPORT_SYMBOL(hp_set_cpu_rush_boost_enable
);
445 #ifdef CONFIG_HOTPLUG_CPU
447 #ifdef CONFIG_MTK_SCHED_RQAVG_KS
448 extern void sched_get_nr_running_avg(int *avg
, int *iowait_avg
);
449 #else /* #ifdef CONFIG_MTK_SCHED_RQAVG_KS */
450 static void sched_get_nr_running_avg(int *avg
, int *iowait_avg
)
452 *avg
= num_possible_cpus() * 100;
454 #endif /* #ifdef CONFIG_MTK_SCHED_RQAVG_KS */
456 static void hp_reset_strategy_nolock(void)
458 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
459 struct hp_dbs_tuners
*hp_tuners
;
463 hp_tuners
= dbs_data
->tuners
;
468 g_cpu_up_sum_load
= 0;
469 g_cpu_up_load_index
= 0;
470 g_cpu_up_load_history
[hp_tuners
->cpu_up_avg_times
- 1] = 0;
471 /* memset(g_cpu_up_load_history, 0, sizeof(long) * MAX_CPU_UP_AVG_TIMES); */
473 g_cpu_down_count
= 0;
474 g_cpu_down_sum_load
= 0;
475 g_cpu_down_load_index
= 0;
476 g_cpu_down_load_history
[hp_tuners
->cpu_down_avg_times
- 1] = 0;
477 /* memset(g_cpu_down_load_history, 0, sizeof(long) * MAX_CPU_DOWN_AVG_TIMES); */
482 g_tlp_avg_history
[hp_tuners
->cpu_rush_tlp_times
- 1] = 0;
483 g_cpu_rush_count
= 0;
485 g_trigger_hp_work
= CPU_HOTPLUG_WORK_TYPE_NONE
;
488 static void hp_reset_strategy(void)
490 mutex_lock(&hp_mutex
);
492 hp_reset_strategy_nolock();
494 mutex_unlock(&hp_mutex
);
497 static void hp_work_handler(struct work_struct
*work
)
499 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
500 struct hp_dbs_tuners
*hp_tuners
;
504 hp_tuners
= dbs_data
->tuners
;
508 if (mutex_trylock(&hp_onoff_mutex
)) {
509 if (!hp_tuners
->is_cpu_hotplug_disable
) {
510 unsigned int online_cpus_count
= num_online_cpus();
514 ("[power/hotplug] hp_work_handler(%d)(%d)(%d)(%d)(%ld)(%ld)(%d)(%d) begin\n",
515 g_trigger_hp_work
, g_tlp_avg_average
, g_tlp_avg_current
,
516 g_cpus_sum_load_current
, g_cpu_up_sum_load
, g_cpu_down_sum_load
,
517 hp_tuners
->cpu_num_base
, hp_tuners
->cpu_num_limit
);
519 switch (g_trigger_hp_work
) {
520 case CPU_HOTPLUG_WORK_TYPE_RUSH
:
521 for (i
= online_cpus_count
;
522 i
< min(g_next_hp_action
, hp_tuners
->cpu_num_limit
); ++i
)
527 case CPU_HOTPLUG_WORK_TYPE_BASE
:
528 for (i
= online_cpus_count
;
529 i
< min(hp_tuners
->cpu_num_base
, hp_tuners
->cpu_num_limit
);
535 case CPU_HOTPLUG_WORK_TYPE_LIMIT
:
536 for (i
= online_cpus_count
- 1; i
>= hp_tuners
->cpu_num_limit
; --i
)
541 case CPU_HOTPLUG_WORK_TYPE_UP
:
542 for (i
= online_cpus_count
; i
< g_next_hp_action
; ++i
)
547 case CPU_HOTPLUG_WORK_TYPE_DOWN
:
548 for (i
= online_cpus_count
- 1; i
>= g_next_hp_action
; --i
)
554 for (i
= online_cpus_count
;
555 i
< min(hp_tuners
->cpu_input_boost_num
,
556 hp_tuners
->cpu_num_limit
); ++i
)
559 /* pr_debug("[power/hotplug] cpu input boost\n"); */
564 dbs_ignore
= 0; /* force trigger frequency scaling */
566 pr_debug("[power/hotplug] hp_work_handler end\n");
569 if (g_next_hp_action) // turn on CPU
571 if (online_cpus_count < num_possible_cpus())
573 pr_debug("hp_work_handler: cpu_up(%d) kick off\n", online_cpus_count);
574 cpu_up(online_cpus_count);
576 pr_debug("hp_work_handler: cpu_up(%d) completion\n", online_cpus_count);
578 dbs_ignore = 0; // force trigger frequency scaling
583 if (online_cpus_count > 1)
585 pr_debug("hp_work_handler: cpu_down(%d) kick off\n", (online_cpus_count - 1));
586 cpu_down((online_cpus_count - 1));
588 pr_debug("hp_work_handler: cpu_down(%d) completion\n", (online_cpus_count - 1));
590 dbs_ignore = 0; // force trigger frequency scaling
596 mutex_unlock(&hp_onoff_mutex
);
599 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
600 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
603 * Every sampling_rate, we check, if current idle time is less than 20%
604 * (default), then we try to increase frequency. Every sampling_rate, we look
605 * for the lowest frequency which can sustain the load while keeping idle time
606 * over 30%. If such a frequency exist, we try to decrease to this frequency.
608 * Any frequency increase takes it to the maximum frequency. Frequency reduction
609 * happens at minimum steps of 5% (default) of current frequency
611 static void hp_check_cpu(int cpu
, unsigned int load_freq
)
613 struct hp_cpu_dbs_info_s
*dbs_info
= &per_cpu(hp_cpu_dbs_info
, cpu
);
614 struct cpufreq_policy
*policy
= dbs_info
->cdbs
.cur_policy
;
615 struct dbs_data
*dbs_data
= policy
->governor_data
;
616 struct hp_dbs_tuners
*hp_tuners
;
620 hp_tuners
= dbs_data
->tuners
;
624 dbs_info
->freq_lo
= 0;
626 /* pr_emerg("***** cpu: %d, load_freq: %u, smp_processor_id: %d *****\n", cpu, load_freq, smp_processor_id()); */
628 /* Check for frequency increase */
629 if (load_freq
> hp_tuners
->up_threshold
* policy
->cur
) {
630 /* If switching to max speed, apply sampling_down_factor */
631 if (policy
->cur
< policy
->max
)
632 dbs_info
->rate_mult
= hp_tuners
->sampling_down_factor
;
633 dbs_freq_increase(policy
, policy
->max
);
634 goto hp_check
; /* <-XXX */
637 /* Check for frequency decrease */
638 /* if we cannot reduce the frequency anymore, break out early */
639 if (policy
->cur
== policy
->min
)
640 goto hp_check
; /* <-XXX */
643 * The optimal frequency is the frequency that is the lowest that can
644 * support the current CPU usage without triggering the up policy. To be
645 * safe, we focus 10 points under the threshold.
647 if (load_freq
< hp_tuners
->adj_up_threshold
* policy
->cur
) {
648 unsigned int freq_next
;
649 freq_next
= load_freq
/ hp_tuners
->adj_up_threshold
;
651 /* No longer fully busy, reset rate_mult */
652 dbs_info
->rate_mult
= 1;
654 if (freq_next
< policy
->min
)
655 freq_next
= policy
->min
;
657 if (!hp_tuners
->powersave_bias
) {
658 __cpufreq_driver_target(policy
, freq_next
, CPUFREQ_RELATION_L
);
660 freq_next
= hp_ops
.powersave_bias_target(policy
, freq_next
,
662 __cpufreq_driver_target(policy
, freq_next
, CPUFREQ_RELATION_L
);
665 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
667 #ifdef CONFIG_HOTPLUG_CPU
668 long cpus_sum_load_last_up
= 0;
669 long cpus_sum_load_last_down
= 0;
670 unsigned int online_cpus_count
;
672 int v_tlp_avg_last
= 0;
675 /* If Hot Plug policy disable, return directly */
676 if (hp_tuners
->is_cpu_hotplug_disable
)
679 #ifdef CONFIG_HOTPLUG_CPU
681 if (g_trigger_hp_work
!= CPU_HOTPLUG_WORK_TYPE_NONE
)
684 mutex_lock(&hp_mutex
);
686 online_cpus_count
= num_online_cpus();
688 sched_get_nr_running_avg(&g_tlp_avg_current
, &g_tlp_iowait_av
);
690 v_tlp_avg_last
= g_tlp_avg_history
[g_tlp_avg_index
];
691 g_tlp_avg_history
[g_tlp_avg_index
] = g_tlp_avg_current
;
692 g_tlp_avg_sum
+= g_tlp_avg_current
;
695 (g_tlp_avg_index
+ 1 ==
696 hp_tuners
->cpu_rush_tlp_times
) ? 0 : g_tlp_avg_index
+ 1;
699 if (g_tlp_avg_count
>= hp_tuners
->cpu_rush_tlp_times
) {
700 if (g_tlp_avg_sum
> v_tlp_avg_last
)
701 g_tlp_avg_sum
-= v_tlp_avg_last
;
706 g_tlp_avg_average
= g_tlp_avg_sum
/ hp_tuners
->cpu_rush_tlp_times
;
708 if (hp_tuners
->cpu_rush_boost_enable
) {
709 /* pr_debug("@@@@@@@@@@@@@@@@@@@@@@@@@@@ tlp: %d @@@@@@@@@@@@@@@@@@@@@@@@@@@\n", g_tlp_avg_average); */
711 if (g_cpus_sum_load_current
>
712 hp_tuners
->cpu_rush_threshold
* online_cpus_count
)
715 g_cpu_rush_count
= 0;
717 if ((g_cpu_rush_count
>= hp_tuners
->cpu_rush_avg_times
) &&
718 (online_cpus_count
* 100 < g_tlp_avg_average
) &&
719 (online_cpus_count
< hp_tuners
->cpu_num_limit
) &&
720 (online_cpus_count
< num_possible_cpus())) {
721 dbs_freq_increase(policy
, policy
->max
);
722 pr_debug("dbs_check_cpu: turn on CPU\n");
724 g_tlp_avg_average
/ 100 + (g_tlp_avg_average
% 100 ? 1 : 0);
726 if (g_next_hp_action
> num_possible_cpus())
727 g_next_hp_action
= num_possible_cpus();
729 g_trigger_hp_work
= CPU_HOTPLUG_WORK_TYPE_RUSH
;
730 /* schedule_delayed_work_on(0, &hp_work, 0); */
732 pr_emerg("[power/hotplug] %s():%d, impossible\n", __func__
, __LINE__
);
734 queue_delayed_work_on(0, hp_wq
, &hp_work
, 0);
740 if (online_cpus_count
< hp_tuners
->cpu_num_base
741 && online_cpus_count
< hp_tuners
->cpu_num_limit
) {
742 dbs_freq_increase(policy
, policy
->max
);
743 pr_debug("dbs_check_cpu: turn on CPU\n");
744 g_trigger_hp_work
= CPU_HOTPLUG_WORK_TYPE_BASE
;
745 /* schedule_delayed_work_on(0, &hp_work, 0); */
747 pr_emerg("[power/hotplug] %s():%d, impossible\n", __func__
, __LINE__
);
749 queue_delayed_work_on(0, hp_wq
, &hp_work
, 0);
754 if (online_cpus_count
> hp_tuners
->cpu_num_limit
) {
755 dbs_freq_increase(policy
, policy
->max
);
756 pr_debug("dbs_check_cpu: turn off CPU\n");
757 g_trigger_hp_work
= CPU_HOTPLUG_WORK_TYPE_LIMIT
;
758 /* schedule_delayed_work_on(0, &hp_work, 0); */
760 pr_emerg("[power/hotplug] %s():%d, impossible\n", __func__
, __LINE__
);
762 queue_delayed_work_on(0, hp_wq
, &hp_work
, 0);
767 /* Check CPU loading to power up slave CPU */
768 if (online_cpus_count
< num_possible_cpus()) {
769 cpus_sum_load_last_up
= g_cpu_up_load_history
[g_cpu_up_load_index
];
770 g_cpu_up_load_history
[g_cpu_up_load_index
] = g_cpus_sum_load_current
;
771 g_cpu_up_sum_load
+= g_cpus_sum_load_current
;
774 g_cpu_up_load_index
=
775 (g_cpu_up_load_index
+ 1 ==
776 hp_tuners
->cpu_up_avg_times
) ? 0 : g_cpu_up_load_index
+ 1;
778 if (g_cpu_up_count
>= hp_tuners
->cpu_up_avg_times
) {
779 if (g_cpu_up_sum_load
> cpus_sum_load_last_up
)
780 g_cpu_up_sum_load
-= cpus_sum_load_last_up
;
782 g_cpu_up_sum_load
= 0;
784 /* g_cpu_up_sum_load /= hp_tuners->cpu_up_avg_times; */
785 if (g_cpu_up_sum_load
>
786 (hp_tuners
->cpu_up_threshold
* online_cpus_count
*
787 hp_tuners
->cpu_up_avg_times
)) {
788 if (online_cpus_count
< hp_tuners
->cpu_num_limit
) {
790 pr_debug("dbs_check_cpu: g_cpu_up_sum_load = %d\n",
793 dbs_freq_increase(policy
, policy
->max
);
794 pr_debug("dbs_check_cpu: turn on CPU\n");
795 g_next_hp_action
= online_cpus_count
+ 1;
796 g_trigger_hp_work
= CPU_HOTPLUG_WORK_TYPE_UP
;
797 /* schedule_delayed_work_on(0, &hp_work, 0); */
799 pr_emerg("[power/hotplug] %s():%d, impossible\n", __func__
, __LINE__
);
801 queue_delayed_work_on(0, hp_wq
, &hp_work
, 0);
808 pr_debug("dbs_check_cpu: g_cpu_up_count = %d, g_cpu_up_sum_load = %d\n",
809 g_cpu_up_count
, g_cpu_up_sum_load
);
810 pr_debug("dbs_check_cpu: cpu_up_threshold = %d\n",
811 (hp_tuners
->cpu_up_threshold
* online_cpus_count
));
816 /* Check CPU loading to power down slave CPU */
817 if (online_cpus_count
> 1) {
818 cpus_sum_load_last_down
= g_cpu_down_load_history
[g_cpu_down_load_index
];
819 g_cpu_down_load_history
[g_cpu_down_load_index
] = g_cpus_sum_load_current
;
820 g_cpu_down_sum_load
+= g_cpus_sum_load_current
;
823 g_cpu_down_load_index
=
824 (g_cpu_down_load_index
+ 1 ==
825 hp_tuners
->cpu_down_avg_times
) ? 0 : g_cpu_down_load_index
+ 1;
827 if (g_cpu_down_count
>= hp_tuners
->cpu_down_avg_times
) {
828 long cpu_down_threshold
;
830 if (g_cpu_down_sum_load
> cpus_sum_load_last_down
)
831 g_cpu_down_sum_load
-= cpus_sum_load_last_down
;
833 g_cpu_down_sum_load
= 0;
835 g_next_hp_action
= online_cpus_count
;
837 ((hp_tuners
->cpu_up_threshold
-
838 hp_tuners
->cpu_down_differential
) *
839 hp_tuners
->cpu_down_avg_times
);
841 while ((g_cpu_down_sum_load
<
842 cpu_down_threshold
* (g_next_hp_action
- 1)) &&
843 /* (g_next_hp_action > tlp_cpu_num) && */
844 (g_next_hp_action
> hp_tuners
->cpu_num_base
))
847 /* pr_debug("### g_next_hp_action: %d, tlp_cpu_num: %d, g_cpu_down_sum_load / hp_tuners->cpu_down_avg_times: %d ###\n", g_next_hp_action, tlp_cpu_num, g_cpu_down_sum_load / hp_tuners->cpu_down_avg_times); */
848 if (g_next_hp_action
< online_cpus_count
) {
850 pr_debug("dbs_check_cpu: g_cpu_down_sum_load = %d\n",
851 g_cpu_down_sum_load
);
853 dbs_freq_increase(policy
, policy
->max
);
854 pr_debug("dbs_check_cpu: turn off CPU\n");
855 g_trigger_hp_work
= CPU_HOTPLUG_WORK_TYPE_DOWN
;
856 /* schedule_delayed_work_on(0, &hp_work, 0); */
858 pr_emerg("[power/hotplug] %s():%d, impossible\n", __func__
, __LINE__
);
860 queue_delayed_work_on(0, hp_wq
, &hp_work
, 0);
864 pr_debug("dbs_check_cpu: g_cpu_down_count = %d, g_cpu_down_sum_load = %d\n",
865 g_cpu_down_count
, g_cpu_down_sum_load
);
866 pr_debug("dbs_check_cpu: cpu_down_threshold = %d\n",
867 ((hp_tuners
->cpu_up_threshold
-
868 hp_tuners
->cpu_down_differential
) * (online_cpus_count
- 1)));
873 mutex_unlock(&hp_mutex
);
875 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
877 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
880 static void hp_dbs_timer(struct work_struct
*work
)
882 struct hp_cpu_dbs_info_s
*dbs_info
=
883 container_of(work
, struct hp_cpu_dbs_info_s
, cdbs
.work
.work
);
884 unsigned int cpu
= dbs_info
->cdbs
.cur_policy
->cpu
;
885 struct hp_cpu_dbs_info_s
*core_dbs_info
= &per_cpu(hp_cpu_dbs_info
,
887 struct dbs_data
*dbs_data
= dbs_info
->cdbs
.cur_policy
->governor_data
;
888 struct hp_dbs_tuners
*hp_tuners
;
890 int delay
= 0, sample_type
= core_dbs_info
->sample_type
;
891 bool modify_all
= true;
895 hp_tuners
= dbs_data
->tuners
;
899 mutex_lock(&core_dbs_info
->cdbs
.timer_mutex
);
900 if (!need_load_eval(&core_dbs_info
->cdbs
, hp_tuners
->sampling_rate
)) {
905 /* Common NORMAL_SAMPLE setup */
906 core_dbs_info
->sample_type
= HP_NORMAL_SAMPLE
;
907 if (sample_type
== HP_SUB_SAMPLE
) {
908 delay
= core_dbs_info
->freq_lo_jiffies
;
909 __cpufreq_driver_target(core_dbs_info
->cdbs
.cur_policy
,
910 core_dbs_info
->freq_lo
, CPUFREQ_RELATION_H
);
912 dbs_check_cpu(dbs_data
, cpu
);
913 if (core_dbs_info
->freq_lo
) {
914 /* Setup timer for SUB_SAMPLE */
915 core_dbs_info
->sample_type
= HP_SUB_SAMPLE
;
916 delay
= core_dbs_info
->freq_hi_jiffies
;
922 delay
= delay_for_sampling_rate(hp_tuners
->sampling_rate
923 * core_dbs_info
->rate_mult
);
925 gov_queue_work(dbs_data
, dbs_info
->cdbs
.cur_policy
, delay
, modify_all
);
926 mutex_unlock(&core_dbs_info
->cdbs
.timer_mutex
);
928 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
929 /* for downgrade */ /* TODO: FIXME */
930 if (cpufreq_freq_check
)
931 cpufreq_freq_check(0); /* TODO: FIXME, fix cpuid = 0 */
932 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
935 /************************** sysfs interface ************************/
936 static struct common_dbs_data hp_dbs_cdata
;
939 * update_sampling_rate - update sampling rate effective immediately if needed.
940 * @new_rate: new sampling rate
942 * If new rate is smaller than the old, simply updating
943 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
944 * original sampling_rate was 1 second and the requested new sampling rate is 10
945 * ms because the user needs immediate reaction from hotplug governor, but not
946 * sure if higher frequency will be required or not, then, the governor may
947 * change the sampling rate too late; up to 1 second later. Thus, if we are
948 * reducing the sampling rate, we need to make the new value effective
951 static void update_sampling_rate(struct dbs_data
*dbs_data
, unsigned int new_rate
)
953 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
955 hp_tuners
->sampling_rate
= new_rate
= max(new_rate
, dbs_data
->min_sampling_rate
);
958 struct cpufreq_policy
*policy
;
959 struct hp_cpu_dbs_info_s
*dbs_info
;
960 unsigned long next_sampling
, appointed_at
;
962 policy
= cpufreq_cpu_get(0);
965 if (policy
->governor
!= &cpufreq_gov_hotplug
) {
966 cpufreq_cpu_put(policy
);
969 dbs_info
= &per_cpu(hp_cpu_dbs_info
, 0);
970 cpufreq_cpu_put(policy
);
972 mutex_lock(&dbs_info
->cdbs
.timer_mutex
);
974 if (!delayed_work_pending(&dbs_info
->cdbs
.work
)) {
975 mutex_unlock(&dbs_info
->cdbs
.timer_mutex
);
979 next_sampling
= jiffies
+ usecs_to_jiffies(new_rate
);
980 appointed_at
= dbs_info
->cdbs
.work
.timer
.expires
;
982 if (time_before(next_sampling
, appointed_at
)) {
984 mutex_unlock(&dbs_info
->cdbs
.timer_mutex
);
985 cancel_delayed_work_sync(&dbs_info
->cdbs
.work
);
986 mutex_lock(&dbs_info
->cdbs
.timer_mutex
);
988 gov_queue_work(dbs_data
, dbs_info
->cdbs
.cur_policy
,
989 usecs_to_jiffies(new_rate
), true);
992 mutex_unlock(&dbs_info
->cdbs
.timer_mutex
);
996 void hp_enable_timer(int enable
)
999 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
1000 static unsigned int sampling_rate_backup
= 0;
1002 if (!dbs_data
|| dbs_data
->cdata
->governor
!= GOV_HOTPLUG
|| (enable
&& !sampling_rate_backup
))
1006 update_sampling_rate(dbs_data
, sampling_rate_backup
);
1008 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1010 sampling_rate_backup
= hp_tuners
->sampling_rate
;
1011 update_sampling_rate(dbs_data
, 30000 * 100);
1014 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
1016 struct cpufreq_policy
*policy
;
1017 struct hp_dbs_tuners
*hp_tuners
;
1018 struct hp_cpu_dbs_info_s
*dbs_info
;
1020 policy
= cpufreq_cpu_get(cpu
);
1023 if (policy
->governor
!= &cpufreq_gov_hotplug
) {
1024 cpufreq_cpu_put(policy
);
1027 dbs_info
= &per_cpu(hp_cpu_dbs_info
, cpu
);
1028 cpufreq_cpu_put(policy
);
1031 hp_tuners
= dbs_data
->tuners
;
1032 mutex_lock(&dbs_info
->cdbs
.timer_mutex
);
1033 gov_queue_work(dbs_data
, dbs_info
->cdbs
.cur_policy
, usecs_to_jiffies(hp_tuners
->sampling_rate
), true);
1034 mutex_unlock(&dbs_info
->cdbs
.timer_mutex
);
1036 cancel_delayed_work_sync(&dbs_info
->cdbs
.work
);
1040 EXPORT_SYMBOL(hp_enable_timer
);
1042 static ssize_t
store_sampling_rate(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1046 ret
= sscanf(buf
, "%u", &input
);
1050 update_sampling_rate(dbs_data
, input
);
1054 static ssize_t
store_io_is_busy(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1056 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1061 ret
= sscanf(buf
, "%u", &input
);
1064 hp_tuners
->io_is_busy
= !!input
;
1066 /* we need to re-evaluate prev_cpu_idle */
1067 for_each_online_cpu(j
) {
1068 struct hp_cpu_dbs_info_s
*dbs_info
= &per_cpu(hp_cpu_dbs_info
,
1070 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
1071 &dbs_info
->cdbs
.prev_cpu_wall
,
1072 hp_tuners
->io_is_busy
);
1077 static ssize_t
store_up_threshold(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1079 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1082 ret
= sscanf(buf
, "%u", &input
);
1084 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
|| input
< MIN_FREQUENCY_UP_THRESHOLD
)
1087 /* Calculate the new adj_up_threshold */
1088 hp_tuners
->adj_up_threshold
+= input
;
1089 hp_tuners
->adj_up_threshold
-= hp_tuners
->up_threshold
;
1091 hp_tuners
->up_threshold
= input
;
1095 static ssize_t
store_sampling_down_factor(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1097 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1098 unsigned int input
, j
;
1100 ret
= sscanf(buf
, "%u", &input
);
1102 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
1104 hp_tuners
->sampling_down_factor
= input
;
1106 /* Reset down sampling multiplier in case it was active */
1107 for_each_online_cpu(j
) {
1108 struct hp_cpu_dbs_info_s
*dbs_info
= &per_cpu(hp_cpu_dbs_info
,
1110 dbs_info
->rate_mult
= 1;
1115 static ssize_t
store_ignore_nice_load(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1117 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1123 ret
= sscanf(buf
, "%u", &input
);
1130 if (input
== hp_tuners
->ignore_nice_load
) /* nothing to do */
1133 hp_tuners
->ignore_nice_load
= input
;
1135 /* we need to re-evaluate prev_cpu_idle */
1136 for_each_online_cpu(j
) {
1137 struct hp_cpu_dbs_info_s
*dbs_info
;
1138 dbs_info
= &per_cpu(hp_cpu_dbs_info
, j
);
1139 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
1140 &dbs_info
->cdbs
.prev_cpu_wall
,
1141 hp_tuners
->io_is_busy
);
1142 if (hp_tuners
->ignore_nice_load
)
1143 dbs_info
->cdbs
.prev_cpu_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
1149 static ssize_t
store_powersave_bias(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1151 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1154 ret
= sscanf(buf
, "%u", &input
);
1162 hp_tuners
->powersave_bias
= input
;
1163 hotplug_powersave_bias_init();
1167 show_store_one(hp
, sampling_rate
);
1168 show_store_one(hp
, io_is_busy
);
1169 show_store_one(hp
, up_threshold
);
1170 show_store_one(hp
, sampling_down_factor
);
1171 show_store_one(hp
, ignore_nice_load
);
1172 show_store_one(hp
, powersave_bias
);
1173 declare_show_sampling_rate_min(hp
);
1175 gov_sys_pol_attr_rw(sampling_rate
);
1176 gov_sys_pol_attr_rw(io_is_busy
);
1177 gov_sys_pol_attr_rw(up_threshold
);
1178 gov_sys_pol_attr_rw(sampling_down_factor
);
1179 gov_sys_pol_attr_rw(ignore_nice_load
);
1180 gov_sys_pol_attr_rw(powersave_bias
);
1181 gov_sys_pol_attr_ro(sampling_rate_min
);
1183 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
1184 static ssize_t
store_down_differential(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1186 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1189 ret
= sscanf(buf
, "%u", &input
);
1192 || input
> MAX_FREQUENCY_DOWN_DIFFERENTIAL
|| input
< MIN_FREQUENCY_DOWN_DIFFERENTIAL
)
1195 hp_tuners
->down_differential
= input
;
1201 * cpu hotplug - function definition of sysfs
1203 static ssize_t
store_cpu_up_threshold(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1205 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1208 ret
= sscanf(buf
, "%u", &input
);
1210 if (ret
!= 1 || input
> MAX_CPU_UP_THRESHOLD
|| input
< MIN_CPU_UP_THRESHOLD
)
1213 mutex_lock(&hp_mutex
);
1214 hp_tuners
->cpu_up_threshold
= input
;
1215 hp_reset_strategy_nolock();
1216 mutex_unlock(&hp_mutex
);
1221 static ssize_t
store_cpu_down_differential(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1223 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1226 ret
= sscanf(buf
, "%u", &input
);
1228 if (ret
!= 1 || input
> MAX_CPU_DOWN_DIFFERENTIAL
|| input
< MIN_CPU_DOWN_DIFFERENTIAL
)
1231 mutex_lock(&hp_mutex
);
1232 hp_tuners
->cpu_down_differential
= input
;
1233 hp_reset_strategy_nolock();
1234 mutex_unlock(&hp_mutex
);
1239 static ssize_t
store_cpu_up_avg_times(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1241 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1244 ret
= sscanf(buf
, "%u", &input
);
1246 if (ret
!= 1 || input
> MAX_CPU_UP_AVG_TIMES
|| input
< MIN_CPU_UP_AVG_TIMES
)
1249 mutex_lock(&hp_mutex
);
1250 hp_tuners
->cpu_up_avg_times
= input
;
1251 hp_reset_strategy_nolock();
1252 mutex_unlock(&hp_mutex
);
1257 static ssize_t
store_cpu_down_avg_times(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1259 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1262 ret
= sscanf(buf
, "%u", &input
);
1264 if (ret
!= 1 || input
> MAX_CPU_DOWN_AVG_TIMES
|| input
< MIN_CPU_DOWN_AVG_TIMES
)
1267 mutex_lock(&hp_mutex
);
1268 hp_tuners
->cpu_down_avg_times
= input
;
1269 hp_reset_strategy_nolock();
1270 mutex_unlock(&hp_mutex
);
1275 static ssize_t
store_cpu_num_limit(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1277 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1280 ret
= sscanf(buf
, "%u", &input
);
1282 if (ret
!= 1 || input
> num_possible_cpus()
1286 mutex_lock(&hp_mutex
);
1287 hp_tuners
->cpu_num_limit
= input
;
1288 mutex_unlock(&hp_mutex
);
1293 static ssize_t
store_cpu_num_base(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1295 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1297 unsigned int online_cpus_count
;
1299 ret
= sscanf(buf
, "%u", &input
);
1301 if (ret
!= 1 || input
> num_possible_cpus()
1305 mutex_lock(&hp_mutex
);
1307 hp_tuners
->cpu_num_base
= input
;
1308 online_cpus_count
= num_online_cpus();
1309 #ifdef CONFIG_HOTPLUG_CPU
1311 if (online_cpus_count
< input
&& online_cpus_count
< hp_tuners
->cpu_num_limit
) {
1312 struct cpufreq_policy
*policy
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
; /* TODO: FIXME, cpu = 0 */
1314 dbs_freq_increase(policy
, policy
->max
);
1315 g_trigger_hp_work
= CPU_HOTPLUG_WORK_TYPE_BASE
;
1316 /* schedule_delayed_work_on(0, &hp_work, 0); */
1318 pr_emerg("[power/hotplug] %s():%d, impossible\n", __func__
, __LINE__
);
1320 queue_delayed_work_on(0, hp_wq
, &hp_work
, 0);
1324 mutex_unlock(&hp_mutex
);
1329 static ssize_t
store_is_cpu_hotplug_disable(struct dbs_data
*dbs_data
, const char *buf
,
1332 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1335 ret
= sscanf(buf
, "%u", &input
);
1337 if (ret
!= 1 || input
> 1 || input
< 0)
1340 mutex_lock(&hp_mutex
);
1342 if (hp_tuners
->is_cpu_hotplug_disable
&& !input
)
1343 hp_reset_strategy_nolock();
1345 hp_tuners
->is_cpu_hotplug_disable
= input
;
1346 mutex_unlock(&hp_mutex
);
1351 static ssize_t
store_cpu_input_boost_enable(struct dbs_data
*dbs_data
, const char *buf
,
1354 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1357 ret
= sscanf(buf
, "%u", &input
);
1359 if (ret
!= 1 || input
> 1 || input
< 0)
1362 mutex_lock(&hp_mutex
);
1363 hp_tuners
->cpu_input_boost_enable
= input
;
1364 mutex_unlock(&hp_mutex
);
1369 static ssize_t
store_cpu_input_boost_num(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1371 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1374 ret
= sscanf(buf
, "%u", &input
);
1376 if (ret
!= 1 || input
> num_possible_cpus()
1380 mutex_lock(&hp_mutex
);
1381 hp_tuners
->cpu_input_boost_num
= input
;
1382 mutex_unlock(&hp_mutex
);
1387 static ssize_t
store_cpu_rush_boost_enable(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1389 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1392 ret
= sscanf(buf
, "%u", &input
);
1394 if (ret
!= 1 || input
> 1 || input
< 0)
1397 mutex_lock(&hp_mutex
);
1398 hp_tuners
->cpu_rush_boost_enable
= input
;
1399 mutex_unlock(&hp_mutex
);
1404 static ssize_t
store_cpu_rush_boost_num(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1406 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1409 ret
= sscanf(buf
, "%u", &input
);
1411 if (ret
!= 1 || input
> num_possible_cpus()
1415 mutex_lock(&hp_mutex
);
1416 hp_tuners
->cpu_rush_boost_num
= input
;
1417 mutex_unlock(&hp_mutex
);
1422 static ssize_t
store_cpu_rush_threshold(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1424 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1427 ret
= sscanf(buf
, "%u", &input
);
1429 if (ret
!= 1 || input
> MAX_CPU_RUSH_THRESHOLD
|| input
< MIN_CPU_RUSH_THRESHOLD
)
1432 mutex_lock(&hp_mutex
);
1433 hp_tuners
->cpu_rush_threshold
= input
;
1434 /* hp_reset_strategy_nolock(); //no need */
1435 mutex_unlock(&hp_mutex
);
1440 static ssize_t
store_cpu_rush_tlp_times(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1442 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1445 ret
= sscanf(buf
, "%u", &input
);
1447 if (ret
!= 1 || input
> MAX_CPU_RUSH_TLP_TIMES
|| input
< MIN_CPU_RUSH_TLP_TIMES
)
1450 mutex_lock(&hp_mutex
);
1451 hp_tuners
->cpu_rush_tlp_times
= input
;
1452 hp_reset_strategy_nolock();
1453 mutex_unlock(&hp_mutex
);
1458 static ssize_t
store_cpu_rush_avg_times(struct dbs_data
*dbs_data
, const char *buf
, size_t count
)
1460 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1463 ret
= sscanf(buf
, "%u", &input
);
1465 if (ret
!= 1 || input
> MAX_CPU_RUSH_AVG_TIMES
|| input
< MIN_CPU_RUSH_AVG_TIMES
)
1468 mutex_lock(&hp_mutex
);
1469 hp_tuners
->cpu_rush_avg_times
= input
;
1470 hp_reset_strategy_nolock();
1471 mutex_unlock(&hp_mutex
);
1476 show_store_one(hp
, down_differential
);
1477 show_store_one(hp
, cpu_up_threshold
);
1478 show_store_one(hp
, cpu_down_differential
);
1479 show_store_one(hp
, cpu_up_avg_times
);
1480 show_store_one(hp
, cpu_down_avg_times
);
1481 show_store_one(hp
, cpu_num_limit
);
1482 show_store_one(hp
, cpu_num_base
);
1483 show_store_one(hp
, is_cpu_hotplug_disable
);
1484 show_store_one(hp
, cpu_input_boost_enable
);
1485 show_store_one(hp
, cpu_input_boost_num
);
1486 show_store_one(hp
, cpu_rush_boost_enable
);
1487 show_store_one(hp
, cpu_rush_boost_num
);
1488 show_store_one(hp
, cpu_rush_threshold
);
1489 show_store_one(hp
, cpu_rush_tlp_times
);
1490 show_store_one(hp
, cpu_rush_avg_times
);
1492 gov_sys_pol_attr_rw(down_differential
);
1493 gov_sys_pol_attr_rw(cpu_up_threshold
);
1494 gov_sys_pol_attr_rw(cpu_down_differential
);
1495 gov_sys_pol_attr_rw(cpu_up_avg_times
);
1496 gov_sys_pol_attr_rw(cpu_down_avg_times
);
1497 gov_sys_pol_attr_rw(cpu_num_limit
);
1498 gov_sys_pol_attr_rw(cpu_num_base
);
1499 gov_sys_pol_attr_rw(is_cpu_hotplug_disable
);
1500 gov_sys_pol_attr_rw(cpu_input_boost_enable
);
1501 gov_sys_pol_attr_rw(cpu_input_boost_num
);
1502 gov_sys_pol_attr_rw(cpu_rush_boost_enable
);
1503 gov_sys_pol_attr_rw(cpu_rush_boost_num
);
1504 gov_sys_pol_attr_rw(cpu_rush_threshold
);
1505 gov_sys_pol_attr_rw(cpu_rush_tlp_times
);
1506 gov_sys_pol_attr_rw(cpu_rush_avg_times
);
1507 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1509 static struct attribute
*dbs_attributes_gov_sys
[] = {
1510 &sampling_rate_min_gov_sys
.attr
,
1511 &sampling_rate_gov_sys
.attr
,
1512 &up_threshold_gov_sys
.attr
,
1513 &sampling_down_factor_gov_sys
.attr
,
1514 &ignore_nice_load_gov_sys
.attr
,
1515 &powersave_bias_gov_sys
.attr
,
1516 &io_is_busy_gov_sys
.attr
,
1517 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
1518 &down_differential_gov_sys
.attr
,
1519 &cpu_up_threshold_gov_sys
.attr
,
1520 &cpu_down_differential_gov_sys
.attr
,
1521 &cpu_up_avg_times_gov_sys
.attr
,
1522 &cpu_down_avg_times_gov_sys
.attr
,
1523 &cpu_num_limit_gov_sys
.attr
,
1524 &cpu_num_base_gov_sys
.attr
,
1525 &is_cpu_hotplug_disable_gov_sys
.attr
,
1526 &cpu_input_boost_enable_gov_sys
.attr
,
1527 &cpu_input_boost_num_gov_sys
.attr
,
1528 &cpu_rush_boost_enable_gov_sys
.attr
,
1529 &cpu_rush_boost_num_gov_sys
.attr
,
1530 &cpu_rush_threshold_gov_sys
.attr
,
1531 &cpu_rush_tlp_times_gov_sys
.attr
,
1532 &cpu_rush_avg_times_gov_sys
.attr
,
1533 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1537 static struct attribute_group hp_attr_group_gov_sys
= {
1538 .attrs
= dbs_attributes_gov_sys
,
1542 static struct attribute
*dbs_attributes_gov_pol
[] = {
1543 &sampling_rate_min_gov_pol
.attr
,
1544 &sampling_rate_gov_pol
.attr
,
1545 &up_threshold_gov_pol
.attr
,
1546 &sampling_down_factor_gov_pol
.attr
,
1547 &ignore_nice_load_gov_pol
.attr
,
1548 &powersave_bias_gov_pol
.attr
,
1549 &io_is_busy_gov_pol
.attr
,
1550 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
1551 &down_differential_gov_pol
.attr
,
1552 &cpu_up_threshold_gov_pol
.attr
,
1553 &cpu_down_differential_gov_pol
.attr
,
1554 &cpu_up_avg_times_gov_pol
.attr
,
1555 &cpu_down_avg_times_gov_pol
.attr
,
1556 &cpu_num_limit_gov_pol
.attr
,
1557 &cpu_num_base_gov_pol
.attr
,
1558 &is_cpu_hotplug_disable_gov_pol
.attr
,
1559 &cpu_input_boost_enable_gov_pol
.attr
,
1560 &cpu_input_boost_num_gov_pol
.attr
,
1561 &cpu_rush_boost_enable_gov_pol
.attr
,
1562 &cpu_rush_boost_num_gov_pol
.attr
,
1563 &cpu_rush_threshold_gov_pol
.attr
,
1564 &cpu_rush_tlp_times_gov_pol
.attr
,
1565 &cpu_rush_avg_times_gov_pol
.attr
,
1566 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1570 static struct attribute_group hp_attr_group_gov_pol
= {
1571 .attrs
= dbs_attributes_gov_pol
,
1575 /************************** sysfs end ************************/
1577 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
1579 #ifdef CONFIG_HOTPLUG_CPU
1581 static struct task_struct
*freq_up_task
;
1583 static int touch_freq_up_task(void *data
)
1585 struct cpufreq_policy
*policy
;
1588 policy
= cpufreq_cpu_get(0);
1589 dbs_freq_increase(policy
, policy
->max
);
1590 cpufreq_cpu_put(policy
);
1591 /* mt_cpufreq_set_ramp_down_count_const(0, 100); */
1592 pr_debug("@%s():%d\n", __func__
, __LINE__
);
1594 set_current_state(TASK_INTERRUPTIBLE
);
1597 if (kthread_should_stop())
1604 static void dbs_input_event(struct input_handle
*handle
, unsigned int type
,
1605 unsigned int code
, int value
)
1609 /* if ((dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) || */
1610 /* (dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) { */
1615 /* for_each_online_cpu(i) { */
1616 /* queue_work_on(i, input_wq, &per_cpu(dbs_refresh_work, i)); */
1618 /* pr_debug("$$$ in_interrupt(): %d, in_irq(): %d, type: %d, code: %d, value: %d $$$\n", in_interrupt(), in_irq(), type, code, value); */
1620 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
1621 struct hp_dbs_tuners
*hp_tuners
;
1625 hp_tuners
= dbs_data
->tuners
;
1629 if ((type
== EV_KEY
) && (code
== BTN_TOUCH
) && (value
== 1)
1630 && (dbs_data
->cdata
->governor
== GOV_HOTPLUG
&& hp_tuners
->cpu_input_boost_enable
)) {
1631 /* if (!in_interrupt()) */
1633 unsigned int online_cpus_count
= num_online_cpus();
1635 pr_debug("@%s():%d, online_cpus_count = %d, cpu_input_boost_num = %d\n", __func__
, __LINE__
, online_cpus_count
, hp_tuners
->cpu_input_boost_num
);
1637 if (online_cpus_count
< hp_tuners
->cpu_input_boost_num
&& online_cpus_count
< hp_tuners
->cpu_num_limit
) {
1638 /* schedule_delayed_work_on(0, &hp_work, 0); */
1640 pr_emerg("[power/hotplug] %s():%d, impossible\n", __func__
, __LINE__
);
1642 queue_delayed_work_on(0, hp_wq
, &hp_work
, 0);
1645 if (online_cpus_count
<= hp_tuners
->cpu_input_boost_num
&& online_cpus_count
<= hp_tuners
->cpu_num_limit
)
1646 wake_up_process(freq_up_task
);
1652 static int dbs_input_connect(struct input_handler
*handler
,
1653 struct input_dev
*dev
, const struct input_device_id
*id
)
1655 struct input_handle
*handle
;
1658 handle
= kzalloc(sizeof(struct input_handle
), GFP_KERNEL
);
1664 handle
->handler
= handler
;
1665 handle
->name
= "cpufreq";
1667 error
= input_register_handle(handle
);
1672 error
= input_open_device(handle
);
1679 input_unregister_handle(handle
);
1685 static void dbs_input_disconnect(struct input_handle
*handle
)
1687 input_close_device(handle
);
1688 input_unregister_handle(handle
);
1692 static const struct input_device_id dbs_ids
[] = {
1697 static struct input_handler dbs_input_handler
= {
1698 .event
= dbs_input_event
,
1699 .connect
= dbs_input_connect
,
1700 .disconnect
= dbs_input_disconnect
,
1701 .name
= "cpufreq_ond",
1702 .id_table
= dbs_ids
,
1704 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1706 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1708 static int hp_init(struct dbs_data
*dbs_data
)
1710 struct hp_dbs_tuners
*tuners
;
1714 tuners
= kzalloc(sizeof(struct hp_dbs_tuners
), GFP_KERNEL
);
1716 pr_err("%s: kzalloc failed\n", __func__
);
1721 idle_time
= get_cpu_idle_time_us(cpu
, NULL
);
1723 if (idle_time
!= -1ULL) {
1724 /* Idle micro accounting is supported. Use finer thresholds */
1725 tuners
->up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
;
1726 tuners
->adj_up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
-
1727 MICRO_FREQUENCY_DOWN_DIFFERENTIAL
;
1728 tuners
->down_differential
= MICRO_FREQUENCY_DOWN_DIFFERENTIAL
; /* <-XXX */
1729 tuners
->cpu_up_threshold
= MICRO_CPU_UP_THRESHOLD
; /* <-XXX */
1730 tuners
->cpu_down_differential
= MICRO_CPU_DOWN_DIFFERENTIAL
; /* <-XXX */
1732 * In nohz/micro accounting case we set the minimum frequency
1733 * not depending on HZ, but fixed (very low). The deferred
1734 * timer might skip some samples if idle/sleeping as needed.
1736 dbs_data
->min_sampling_rate
= MICRO_FREQUENCY_MIN_SAMPLE_RATE
;
1738 /* cpu rush boost */
1739 tuners
->cpu_rush_threshold
= MICRO_CPU_RUSH_THRESHOLD
; /* <-XXX */
1741 tuners
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
1742 tuners
->adj_up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
-
1743 DEF_FREQUENCY_DOWN_DIFFERENTIAL
;
1744 tuners
->down_differential
= DEF_FREQUENCY_DOWN_DIFFERENTIAL
; /* <-XXX */
1745 tuners
->cpu_up_threshold
= DEF_CPU_UP_THRESHOLD
; /* <-XXX */
1746 tuners
->cpu_down_differential
= DEF_CPU_DOWN_DIFFERENTIAL
; /* <-XXX */
1748 /* For correct statistics, we need 10 ticks for each measure */
1749 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
* jiffies_to_usecs(10);
1751 /* cpu rush boost */
1752 tuners
->cpu_rush_threshold
= DEF_CPU_RUSH_THRESHOLD
; /* <-XXX */
1755 tuners
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
1756 tuners
->ignore_nice_load
= 0;
1757 tuners
->powersave_bias
= default_powersave_bias
;
1758 tuners
->io_is_busy
= should_io_be_busy();
1759 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
1760 tuners
->cpu_up_avg_times
= DEF_CPU_UP_AVG_TIMES
;
1761 tuners
->cpu_down_avg_times
= DEF_CPU_DOWN_AVG_TIMES
;
1762 tuners
->cpu_num_limit
= num_possible_cpus();
1763 tuners
->cpu_num_base
= 1;
1764 tuners
->is_cpu_hotplug_disable
= (tuners
->cpu_num_limit
> 1) ? 0 : 1;
1765 tuners
->cpu_input_boost_enable
= DEF_CPU_INPUT_BOOST_ENABLE
;
1766 tuners
->cpu_input_boost_num
= DEF_CPU_INPUT_BOOST_NUM
;
1767 tuners
->cpu_rush_boost_enable
= DEF_CPU_RUSH_BOOST_ENABLE
;
1768 tuners
->cpu_rush_boost_num
= num_possible_cpus();
1769 tuners
->cpu_rush_tlp_times
= DEF_CPU_RUSH_TLP_TIMES
;
1770 tuners
->cpu_rush_avg_times
= DEF_CPU_RUSH_AVG_TIMES
;
1772 #ifdef CONFIG_HOTPLUG_CPU
1773 INIT_DEFERRABLE_WORK(&hp_work
, hp_work_handler
);
1774 hp_wq
= alloc_workqueue("hp_work_handler", WQ_HIGHPRI
, 0);
1775 g_next_hp_action
= num_online_cpus();
1779 pr_debug("cpufreq_gov_dbs_init: min_sampling_rate = %d\n", dbs_data
->min_sampling_rate
);
1780 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.up_threshold = %d\n", tuners
->up_threshold
);
1781 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.down_differential = %d\n",
1782 tuners
->down_differential
);
1783 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_up_threshold = %d\n",
1784 tuners
->cpu_up_threshold
);
1785 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_down_differential = %d\n",
1786 tuners
->cpu_down_differential
);
1787 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_up_avg_times = %d\n",
1788 tuners
->cpu_up_avg_times
);
1789 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_down_avg_times = %d\n",
1790 tuners
->cpu_down_avg_times
);
1791 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_num_limit = %d\n",
1792 tuners
->cpu_num_limit
);
1793 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_num_base = %d\n", tuners
->cpu_num_base
);
1794 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.is_cpu_hotplug_disable = %d\n",
1795 tuners
->is_cpu_hotplug_disable
);
1796 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_input_boost_enable = %d\n",
1797 tuners
->cpu_input_boost_enable
);
1798 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_input_boost_num = %d\n",
1799 tuners
->cpu_input_boost_num
);
1800 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_rush_boost_enable = %d\n",
1801 tuners
->cpu_rush_boost_enable
);
1802 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_rush_boost_num = %d\n",
1803 tuners
->cpu_rush_boost_num
);
1804 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_rush_threshold = %d\n",
1805 tuners
->cpu_rush_threshold
);
1806 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_rush_tlp_times = %d\n",
1807 tuners
->cpu_rush_tlp_times
);
1808 pr_debug("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_rush_avg_times = %d\n",
1809 tuners
->cpu_rush_avg_times
);
1811 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1813 dbs_data
->tuners
= tuners
;
1814 mutex_init(&dbs_data
->mutex
);
1818 static void hp_exit(struct dbs_data
*dbs_data
)
1820 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
1821 #ifdef CONFIG_HOTPLUG_CPU
1822 cancel_delayed_work_sync(&hp_work
);
1824 destroy_workqueue(hp_wq
);
1826 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1827 kfree(dbs_data
->tuners
);
1830 define_get_cpu_dbs_routines(hp_cpu_dbs_info
);
1832 static struct hp_ops hp_ops
= {
1833 .powersave_bias_init_cpu
= hotplug_powersave_bias_init_cpu
,
1834 .powersave_bias_target
= generic_powersave_bias_target
,
1835 .freq_increase
= dbs_freq_increase
,
1836 .input_handler
= &dbs_input_handler
,
1839 static struct common_dbs_data hp_dbs_cdata
= {
1840 .governor
= GOV_HOTPLUG
,
1841 .attr_group_gov_sys
= &hp_attr_group_gov_sys
,
1842 .attr_group_gov_pol
= &hp_attr_group_gov_pol
,
1843 .get_cpu_cdbs
= get_cpu_cdbs
,
1844 .get_cpu_dbs_info_s
= get_cpu_dbs_info_s
,
1845 .gov_dbs_timer
= hp_dbs_timer
,
1846 .gov_check_cpu
= hp_check_cpu
,
1852 static void hp_set_powersave_bias(unsigned int powersave_bias
)
1854 struct cpufreq_policy
*policy
;
1855 struct dbs_data
*dbs_data
;
1856 struct hp_dbs_tuners
*hp_tuners
;
1860 default_powersave_bias
= powersave_bias
;
1861 cpumask_clear(&done
);
1864 for_each_online_cpu(cpu
) {
1865 if (cpumask_test_cpu(cpu
, &done
))
1868 policy
= per_cpu(hp_cpu_dbs_info
, cpu
).cdbs
.cur_policy
;
1872 cpumask_or(&done
, &done
, policy
->cpus
);
1874 if (policy
->governor
!= &cpufreq_gov_hotplug
)
1877 dbs_data
= policy
->governor_data
;
1878 hp_tuners
= dbs_data
->tuners
;
1879 hp_tuners
->powersave_bias
= default_powersave_bias
;
1884 void hp_register_powersave_bias_handler(unsigned int (*f
)
1885 (struct cpufreq_policy
*, unsigned int, unsigned int),
1886 unsigned int powersave_bias
)
1888 hp_ops
.powersave_bias_target
= f
;
1889 hp_set_powersave_bias(powersave_bias
);
1891 EXPORT_SYMBOL_GPL(hp_register_powersave_bias_handler
);
1893 void hp_unregister_powersave_bias_handler(void)
1895 hp_ops
.powersave_bias_target
= generic_powersave_bias_target
;
1896 hp_set_powersave_bias(0);
1898 EXPORT_SYMBOL_GPL(hp_unregister_powersave_bias_handler
);
1900 static int hp_cpufreq_governor_dbs(struct cpufreq_policy
*policy
, unsigned int event
)
1902 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
1903 struct dbs_data
*dbs_data
;
1906 if (have_governor_per_policy())
1907 dbs_data
= policy
->governor_data
;
1909 dbs_data
= hp_dbs_cdata
.gdbs_data
;
1911 /* pr_emerg("***** policy->cpu: %d, event: %u, smp_processor_id: %d, have_governor_per_policy: %d *****\n", policy->cpu, event, smp_processor_id(), have_governor_per_policy()); */
1913 case CPUFREQ_GOV_START
:
1916 struct hp_dbs_tuners
*hp_tuners
= dbs_data
->tuners
;
1918 BUG_ON(NULL
== dbs_data
);
1919 BUG_ON(NULL
== dbs_data
->tuners
);
1921 pr_debug("cpufreq_governor_dbs: min_sampling_rate = %d\n",
1922 dbs_data
->min_sampling_rate
);
1923 pr_debug("cpufreq_governor_dbs: dbs_tuners_ins.sampling_rate = %d\n",
1924 hp_tuners
->sampling_rate
);
1925 pr_debug("cpufreq_governor_dbs: dbs_tuners_ins.io_is_busy = %d\n",
1926 hp_tuners
->io_is_busy
);
1929 #ifdef CONFIG_HOTPLUG_CPU
1930 if (0) /* (!policy->cpu) // <-XXX */
1931 rc
= input_register_handler(&dbs_input_handler
);
1935 case CPUFREQ_GOV_STOP
:
1936 #ifdef CONFIG_HOTPLUG_CPU
1937 if (0) /* (!policy->cpu) // <-XXX */
1938 input_unregister_handler(&dbs_input_handler
);
1944 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1945 return cpufreq_governor_dbs(policy
, &hp_dbs_cdata
, event
);
1948 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
1950 int cpufreq_gov_dbs_get_sum_load(void)
1952 return g_cpus_sum_load_current
;
1955 /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1957 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_HOTPLUG
1960 struct cpufreq_governor cpufreq_gov_hotplug
= {
1962 .governor
= hp_cpufreq_governor_dbs
,
1963 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
1964 .owner
= THIS_MODULE
,
1967 #ifdef CONFIG_MTK_SDIOAUTOK_SUPPORT
1968 void cpufreq_min_sampling_rate_change(unsigned int sample_rate
)
1970 struct dbs_data
*dbs_data
= per_cpu(hp_cpu_dbs_info
, 0).cdbs
.cur_policy
->governor_data
; /* TODO: FIXME, cpu = 0 */
1975 dbs_data
->min_sampling_rate
= sample_rate
;
1976 update_sampling_rate(dbs_data
, sample_rate
);
1978 EXPORT_SYMBOL(cpufreq_min_sampling_rate_change
);
1981 static int __init
cpufreq_gov_dbs_init(void)
1983 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
1985 freq_up_task
= kthread_create(touch_freq_up_task
, NULL
, "touch_freq_up_task");
1987 if (IS_ERR(freq_up_task
))
1988 return PTR_ERR(freq_up_task
);
1990 sched_setscheduler_nocheck(freq_up_task
, SCHED_FIFO
, ¶m
);
1991 get_task_struct(freq_up_task
);
1993 return cpufreq_register_governor(&cpufreq_gov_hotplug
);
1996 static void __exit
cpufreq_gov_dbs_exit(void)
1998 cpufreq_unregister_governor(&cpufreq_gov_hotplug
);
2000 kthread_stop(freq_up_task
);
2001 put_task_struct(freq_up_task
);
2004 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
2005 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
2006 MODULE_DESCRIPTION("'cpufreq_hotplug' - A dynamic cpufreq governor for "
2007 "Low Latency Frequency Transition capable processors");
2008 MODULE_LICENSE("GPL");
2010 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_HOTPLUG
2011 fs_initcall(cpufreq_gov_dbs_init
);
2013 module_init(cpufreq_gov_dbs_init
);
2015 module_exit(cpufreq_gov_dbs_exit
);