fix section mismatch warnings
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpufreq / cpufreq_balance.c
CommitLineData
6fa3eb70
S
1/*
2 * drivers/cpufreq/cpufreq_hotplug.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/cpufreq.h>
17#include <linux/cpu.h>
18#include <linux/jiffies.h>
19#include <linux/kernel_stat.h>
20#include <linux/mutex.h>
21#include <linux/hrtimer.h>
22#include <linux/tick.h>
23#include <linux/ktime.h>
24#include <linux/sched.h>
25#include <linux/input.h>
26#include <linux/slab.h>
27#include <linux/sched/rt.h>
28#include <linux/kthread.h>
29
30extern unsigned int get_normal_max_freq(void);
31extern unsigned int mt_dvfs_power_dispatch_safe(void);
32extern int mt_gpufreq_target(int idx);
33/*
34 * dbs is used in this file as a shortform for demandbased switching
35 * It helps to keep variable names smaller, simpler
36 */
37
38#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
39#define DEF_FREQUENCY_OD_THRESHOLD (98)
40#define DEF_FREQUENCY_UP_THRESHOLD (80)
41#define DEF_SAMPLING_DOWN_FACTOR (1)
42#define MAX_SAMPLING_DOWN_FACTOR (100000)
43#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (15)
44#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (5)
45#define MAX_FREQUENCY_DOWN_DIFFERENTIAL (20)
46#define MICRO_FREQUENCY_UP_THRESHOLD (85)
47#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (30000)
48#define MIN_FREQUENCY_UP_THRESHOLD (21)
49#define MAX_FREQUENCY_UP_THRESHOLD (100)
50
51#define DEF_CPU_DOWN_DIFFERENTIAL (10)
52#define MICRO_CPU_DOWN_DIFFERENTIAL (10)
53#define MIN_CPU_DOWN_DIFFERENTIAL (0)
54#define MAX_CPU_DOWN_DIFFERENTIAL (30)
55
56#define DEF_CPU_UP_THRESHOLD (90)
57#define MICRO_CPU_UP_THRESHOLD (90)
58#define MIN_CPU_UP_THRESHOLD (80)
59#define MAX_CPU_UP_THRESHOLD (100)
60
61#define CPU_UP_AVG_TIMES (10)
62#define CPU_DOWN_AVG_TIMES (50)
63#define THERMAL_DISPATCH_AVG_TIMES (30)
64
65#define DEF_CPU_PERSIST_COUNT (10)
66
67//#define DEBUG_LOG
68#define INPUT_BOOST (1)
69
70/*
71 * The polling frequency of this governor depends on the capability of
72 * the processor. Default polling frequency is 1000 times the transition
73 * latency of the processor. The governor will work on any processor with
74 * transition latency <= 10mS, using appropriate sampling
75 * rate.
76 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
77 * this governor will not work.
78 * All times here are in uS.
79 */
80#define MIN_SAMPLING_RATE_RATIO (2)
81
82static unsigned int min_sampling_rate;
83
84#define LATENCY_MULTIPLIER (1000)
85#define MIN_LATENCY_MULTIPLIER (100)
86#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
87
88static void do_dbs_timer(struct work_struct *work);
89static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
90 unsigned int event);
91
92#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_BALANCE
93static
94#endif
95struct cpufreq_governor cpufreq_gov_balance = {
96 .name = "hotplug",
97 .governor = cpufreq_governor_dbs,
98 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
99 .owner = THIS_MODULE,
100};
101
102#ifdef CONFIG_SMP
103
104static int g_next_hp_action = 0;
105
106static long g_cpu_up_sum_load = 0;
107static int g_cpu_up_count = 0;
108
109static long g_cpu_down_sum_load = 0;
110static int g_cpu_down_count = 0;
111static int g_max_cpu_persist_count = 0;
112static int g_thermal_count = 0;
113
114static void hp_work_handler(struct work_struct *work);
115static struct delayed_work hp_work;
116
117#if INPUT_BOOST
118static struct task_struct *freq_up_task;
119#endif
120
121#endif
122
123static int cpu_loading = 0;
124static int cpus_sum_load = 0;
125/* Sampling types */
126enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
127
128struct cpu_dbs_info_s {
129 cputime64_t prev_cpu_idle;
130 cputime64_t prev_cpu_iowait;
131 cputime64_t prev_cpu_wall;
132 cputime64_t prev_cpu_nice;
133 struct cpufreq_policy *cur_policy;
134 struct delayed_work work;
135 struct cpufreq_frequency_table *freq_table;
136 unsigned int freq_lo;
137 unsigned int freq_lo_jiffies;
138 unsigned int freq_hi_jiffies;
139 unsigned int rate_mult;
140 int cpu;
141 unsigned int sample_type:1;
142 /*
143 * percpu mutex that serializes governor limit change with
144 * do_dbs_timer invocation. We do not want do_dbs_timer to run
145 * when user is changing the governor or limits.
146 */
147 struct mutex timer_mutex;
148};
149static DEFINE_PER_CPU(struct cpu_dbs_info_s, hp_cpu_dbs_info);
150
151static unsigned int dbs_enable; /* number of CPUs using this policy */
152static unsigned int dbs_ignore = 1;
153static unsigned int dbs_thermal_limited;
154static unsigned int dbs_thermal_limited_freq;
155
156/* dvfs thermal limit */
157void dbs_freq_thermal_limited(unsigned int limited, unsigned int freq)
158{
159 dbs_thermal_limited = limited;
160 dbs_thermal_limited_freq = freq;
161}
162EXPORT_SYMBOL(dbs_freq_thermal_limited);
163
164/*
165 * dbs_mutex protects dbs_enable in governor start/stop.
166 */
167static DEFINE_MUTEX(dbs_mutex);
168
169/*
170 * dbs_hotplug protects all hotplug related global variables
171 */
172static DEFINE_MUTEX(hp_mutex);
173
174DEFINE_MUTEX(bl_onoff_mutex);
175
176static struct dbs_tuners {
177 unsigned int sampling_rate;
178 unsigned int od_threshold;
179 unsigned int up_threshold;
180 unsigned int down_differential;
181 unsigned int ignore_nice;
182 unsigned int sampling_down_factor;
183 unsigned int powersave_bias;
184 unsigned int io_is_busy;
185 unsigned int cpu_up_threshold;
186 unsigned int cpu_down_differential;
187 unsigned int cpu_up_avg_times;
188 unsigned int cpu_down_avg_times;
189 unsigned int thermal_dispatch_avg_times;
190 unsigned int cpu_num_limit;
191 unsigned int cpu_num_base;
192 unsigned int is_cpu_hotplug_disable;
193#if INPUT_BOOST
194 unsigned int cpu_input_boost_enable;
195#endif
196} dbs_tuners_ins = {
197 .od_threshold = DEF_FREQUENCY_OD_THRESHOLD,
198 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
199 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
200 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
201 .ignore_nice = 0,
202 .powersave_bias = 0,
203 .cpu_up_threshold = DEF_CPU_UP_THRESHOLD,
204 .cpu_down_differential = DEF_CPU_DOWN_DIFFERENTIAL,
205 .cpu_up_avg_times = CPU_UP_AVG_TIMES,
206 .cpu_down_avg_times = CPU_DOWN_AVG_TIMES,
207 .thermal_dispatch_avg_times = THERMAL_DISPATCH_AVG_TIMES,
208 .cpu_num_limit = 1,
209 .cpu_num_base = 1,
210 .is_cpu_hotplug_disable = 1,
211#if INPUT_BOOST
212 .cpu_input_boost_enable = 1,
213#endif
214};
215
216static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq);
217
218static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
219{
220 u64 idle_time;
221 u64 cur_wall_time;
222 u64 busy_time;
223
224 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
225
226 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
227 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
228 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
229 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
230 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
231 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
232
233 idle_time = cur_wall_time - busy_time;
234 if (wall)
235 *wall = jiffies_to_usecs(cur_wall_time);
236
237 return jiffies_to_usecs(idle_time);
238}
239
240/* static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) */
241/* { */
242/* u64 idle_time = get_cpu_idle_time_us(cpu, NULL); */
243
244/* if (idle_time == -1ULL) */
245/* return get_cpu_idle_time_jiffy(cpu, wall); */
246/* else */
247/* idle_time += get_cpu_iowait_time_us(cpu, wall); */
248
249/* return idle_time; */
250/* } */
251
252static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
253{
254 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
255
256 if (iowait_time == -1ULL)
257 return 0;
258
259 return iowait_time;
260}
261
262void force_two_core(void)
263{
264 bool raise_freq = false;
265
266 mutex_lock(&hp_mutex);
267 g_cpu_down_count = 0;
268 g_cpu_down_sum_load = 0;
269 if (num_online_cpus() < dbs_tuners_ins.cpu_num_limit) {
270 raise_freq = true;
271 g_next_hp_action = 1;
272 schedule_delayed_work_on(0, &hp_work, 0);
273 }
274 mutex_unlock(&hp_mutex);
275
276 if (raise_freq == true) {
277 wake_up_process(freq_up_task);
278 }
279
280 mt_gpufreq_target(0);
281}
282
283/*
284 * Find right freq to be set now with powersave_bias on.
285 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
286 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
287 */
288static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
289 unsigned int freq_next,
290 unsigned int relation)
291{
292 unsigned int freq_req, freq_reduc, freq_avg;
293 unsigned int freq_hi, freq_lo;
294 unsigned int index = 0;
295 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
296 struct cpu_dbs_info_s *dbs_info = &per_cpu(hp_cpu_dbs_info,
297 policy->cpu);
298
299 if (!dbs_info->freq_table) {
300 dbs_info->freq_lo = 0;
301 dbs_info->freq_lo_jiffies = 0;
302 return freq_next;
303 }
304
305 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
306 relation, &index);
307 freq_req = dbs_info->freq_table[index].frequency;
308 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
309 freq_avg = freq_req - freq_reduc;
310
311 /* Find freq bounds for freq_avg in freq_table */
312 index = 0;
313 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
314 CPUFREQ_RELATION_H, &index);
315 freq_lo = dbs_info->freq_table[index].frequency;
316 index = 0;
317 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
318 CPUFREQ_RELATION_L, &index);
319 freq_hi = dbs_info->freq_table[index].frequency;
320
321 /* Find out how long we have to be in hi and lo freqs */
322 if (freq_hi == freq_lo) {
323 dbs_info->freq_lo = 0;
324 dbs_info->freq_lo_jiffies = 0;
325 return freq_lo;
326 }
327 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
328 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
329 jiffies_hi += ((freq_hi - freq_lo) / 2);
330 jiffies_hi /= (freq_hi - freq_lo);
331 jiffies_lo = jiffies_total - jiffies_hi;
332 dbs_info->freq_lo = freq_lo;
333 dbs_info->freq_lo_jiffies = jiffies_lo;
334 dbs_info->freq_hi_jiffies = jiffies_hi;
335 return freq_hi;
336}
337
338static void hotplug_powersave_bias_init_cpu(int cpu)
339{
340 struct cpu_dbs_info_s *dbs_info = &per_cpu(hp_cpu_dbs_info, cpu);
341 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
342 dbs_info->freq_lo = 0;
343}
344
345static void hotplug_powersave_bias_init(void)
346{
347 int i;
348 for_each_online_cpu(i) {
349 hotplug_powersave_bias_init_cpu(i);
350 }
351}
352
353/************************** sysfs interface ************************/
354
355static ssize_t show_sampling_rate_min(struct kobject *kobj,
356 struct attribute *attr, char *buf)
357{
358 return sprintf(buf, "%u\n", min_sampling_rate);
359}
360
361define_one_global_ro(sampling_rate_min);
362
363/* cpufreq_hotplug Governor Tunables */
364#define show_one(file_name, object) \
365static ssize_t show_##file_name \
366(struct kobject *kobj, struct attribute *attr, char *buf) \
367{ \
368 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
369}
370
371show_one(sampling_rate, sampling_rate);
372show_one(io_is_busy, io_is_busy);
373show_one(up_threshold, up_threshold);
374show_one(od_threshold, od_threshold);
375show_one(down_differential, down_differential);
376show_one(sampling_down_factor, sampling_down_factor);
377show_one(ignore_nice_load, ignore_nice);
378show_one(powersave_bias, powersave_bias);
379show_one(cpu_up_threshold, cpu_up_threshold);
380show_one(cpu_down_differential, cpu_down_differential);
381show_one(cpu_up_avg_times, cpu_up_avg_times);
382show_one(cpu_down_avg_times, cpu_down_avg_times);
383show_one(thermal_dispatch_avg_times, thermal_dispatch_avg_times);
384show_one(cpu_num_limit, cpu_num_limit);
385show_one(cpu_num_base, cpu_num_base);
386show_one(is_cpu_hotplug_disable, is_cpu_hotplug_disable);
387#if INPUT_BOOST
388show_one(cpu_input_boost_enable, cpu_input_boost_enable);
389#endif
390
391/**
392 * update_sampling_rate - update sampling rate effective immediately if needed.
393 * @new_rate: new sampling rate
394 *
395 * If new rate is smaller than the old, simply updaing
396 * dbs_tuners_int.sampling_rate might not be appropriate. For example,
397 * if the original sampling_rate was 1 second and the requested new sampling
398 * rate is 10 ms because the user needs immediate reaction from hotplug
399 * governor, but not sure if higher frequency will be required or not,
400 * then, the governor may change the sampling rate too late; up to 1 second
401 * later. Thus, if we are reducing the sampling rate, we need to make the
402 * new value effective immediately.
403 */
404static void update_sampling_rate(unsigned int new_rate)
405{
406 int cpu;
407
408 dbs_tuners_ins.sampling_rate = new_rate
409 = max(new_rate, min_sampling_rate);
410
411 for_each_online_cpu(cpu) {
412 struct cpufreq_policy *policy;
413 struct cpu_dbs_info_s *dbs_info;
414 unsigned long next_sampling, appointed_at;
415
416 policy = cpufreq_cpu_get(cpu);
417 if (!policy)
418 continue;
419 dbs_info = &per_cpu(hp_cpu_dbs_info, policy->cpu);
420 cpufreq_cpu_put(policy);
421
422 mutex_lock(&dbs_info->timer_mutex);
423
424 if (!delayed_work_pending(&dbs_info->work)) {
425 mutex_unlock(&dbs_info->timer_mutex);
426 continue;
427 }
428
429 next_sampling = jiffies + usecs_to_jiffies(new_rate);
430 appointed_at = dbs_info->work.timer.expires;
431
432
433 if (time_before(next_sampling, appointed_at)) {
434
435 mutex_unlock(&dbs_info->timer_mutex);
436 cancel_delayed_work_sync(&dbs_info->work);
437 mutex_lock(&dbs_info->timer_mutex);
438
439 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
440 usecs_to_jiffies(new_rate));
441
442 }
443 mutex_unlock(&dbs_info->timer_mutex);
444 }
445}
446
447void bl_enable_timer(int enable)
448{
449 static unsigned int sampling_rate_backup = 0;
450
451 if (enable && !sampling_rate_backup)
452 return;
453
454 if (enable)
455 update_sampling_rate(sampling_rate_backup);
456 else {
457 struct cpufreq_policy *policy;
458 struct cpu_dbs_info_s *dbs_info;
459 unsigned int new_rate = 30000 * 100; // change to 3s
460
461 /* restore original sampling rate */
462 sampling_rate_backup = dbs_tuners_ins.sampling_rate;
463 update_sampling_rate(new_rate);
464
465 policy = cpufreq_cpu_get(0);
466 if (!policy)
467 return;
468
469 dbs_info = &per_cpu(hp_cpu_dbs_info, 0);
470 cpufreq_cpu_put(policy);
471
472 mutex_lock(&dbs_info->timer_mutex);
473
474 if (!delayed_work_pending(&dbs_info->work)) {
475 mutex_unlock(&dbs_info->timer_mutex);
476 return;
477 }
478
479 mutex_unlock(&dbs_info->timer_mutex);
480
481 cancel_delayed_work_sync(&dbs_info->work);
482
483 mutex_lock(&dbs_info->timer_mutex);
484
485 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
486 usecs_to_jiffies(new_rate));
487
488 mutex_unlock(&dbs_info->timer_mutex);
489 }
490}
491EXPORT_SYMBOL(bl_enable_timer);
492
493static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
494 const char *buf, size_t count)
495{
496 unsigned int input;
497 int ret;
498 ret = sscanf(buf, "%u", &input);
499 if (ret != 1)
500 return -EINVAL;
501 update_sampling_rate(input);
502 return count;
503}
504
505static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
506 const char *buf, size_t count)
507{
508 unsigned int input;
509 int ret;
510
511 ret = sscanf(buf, "%u", &input);
512 if (ret != 1)
513 return -EINVAL;
514 dbs_tuners_ins.io_is_busy = !!input;
515 return count;
516}
517
518static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
519 const char *buf, size_t count)
520{
521 unsigned int input;
522 int ret;
523 ret = sscanf(buf, "%u", &input);
524
525 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
526 input < MIN_FREQUENCY_UP_THRESHOLD) {
527 return -EINVAL;
528 }
529 dbs_tuners_ins.up_threshold = input;
530 return count;
531}
532
533static ssize_t store_od_threshold(struct kobject *a, struct attribute *b,
534 const char *buf, size_t count)
535{
536 unsigned int input;
537 int ret;
538 ret = sscanf(buf, "%u", &input);
539
540 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
541 input < MIN_FREQUENCY_UP_THRESHOLD) {
542 return -EINVAL;
543 }
544 dbs_tuners_ins.od_threshold = input;
545 return count;
546}
547
548static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
549 const char *buf, size_t count)
550{
551 unsigned int input;
552 int ret;
553 ret = sscanf(buf, "%u", &input);
554
555 if (ret != 1 || input > MAX_FREQUENCY_DOWN_DIFFERENTIAL ||
556 input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) {
557 return -EINVAL;
558 }
559 dbs_tuners_ins.down_differential = input;
560 return count;
561}
562
563static ssize_t store_sampling_down_factor(struct kobject *a,
564 struct attribute *b, const char *buf, size_t count)
565{
566 unsigned int input, j;
567 int ret;
568 ret = sscanf(buf, "%u", &input);
569
570 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
571 return -EINVAL;
572 dbs_tuners_ins.sampling_down_factor = input;
573
574 /* Reset down sampling multiplier in case it was active */
575 for_each_online_cpu(j) {
576 struct cpu_dbs_info_s *dbs_info;
577 dbs_info = &per_cpu(hp_cpu_dbs_info, j);
578 dbs_info->rate_mult = 1;
579 }
580 return count;
581}
582
583static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
584 const char *buf, size_t count)
585{
586 unsigned int input;
587 int ret;
588
589 unsigned int j;
590
591 ret = sscanf(buf, "%u", &input);
592 if (ret != 1)
593 return -EINVAL;
594
595 if (input > 1)
596 input = 1;
597
598 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
599 return count;
600 }
601 dbs_tuners_ins.ignore_nice = input;
602
603 /* we need to re-evaluate prev_cpu_idle */
604 for_each_online_cpu(j) {
605 struct cpu_dbs_info_s *dbs_info;
606 dbs_info = &per_cpu(hp_cpu_dbs_info, j);
607 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
608 &dbs_info->prev_cpu_wall,
609 dbs_tuners_ins.io_is_busy);
610 if (dbs_tuners_ins.ignore_nice)
611 dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
612
613 }
614 return count;
615}
616
617static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
618 const char *buf, size_t count)
619{
620 unsigned int input;
621 int ret;
622 ret = sscanf(buf, "%u", &input);
623
624 if (ret != 1)
625 return -EINVAL;
626
627 if (input > 1000)
628 input = 1000;
629
630 dbs_tuners_ins.powersave_bias = input;
631 hotplug_powersave_bias_init();
632 return count;
633}
634
635static ssize_t store_cpu_up_threshold(struct kobject *a, struct attribute *b,
636 const char *buf, size_t count)
637{
638 unsigned int input;
639 int ret;
640 ret = sscanf(buf, "%u", &input);
641
642 if (ret != 1 || input > MAX_CPU_UP_THRESHOLD ||
643 input < MIN_CPU_UP_THRESHOLD) {
644 return -EINVAL;
645 }
646 dbs_tuners_ins.cpu_up_threshold = input;
647 return count;
648}
649
650static ssize_t store_cpu_down_differential(struct kobject *a, struct attribute *b,
651 const char *buf, size_t count)
652{
653 unsigned int input;
654 int ret;
655 ret = sscanf(buf, "%u", &input);
656
657 if (ret != 1 || input > MAX_CPU_DOWN_DIFFERENTIAL ||
658 input < MIN_CPU_DOWN_DIFFERENTIAL) {
659 return -EINVAL;
660 }
661 dbs_tuners_ins.cpu_down_differential = input;
662 return count;
663}
664
665static ssize_t store_cpu_up_avg_times(struct kobject *a, struct attribute *b,
666 const char *buf, size_t count)
667{
668 unsigned int input;
669 int ret;
670 ret = sscanf(buf, "%u", &input);
671
672 dbs_tuners_ins.cpu_up_avg_times = input;
673 return count;
674}
675
676static ssize_t store_cpu_down_avg_times(struct kobject *a, struct attribute *b,
677 const char *buf, size_t count)
678{
679 unsigned int input;
680 int ret;
681 ret = sscanf(buf, "%u", &input);
682
683 dbs_tuners_ins.cpu_down_avg_times = input;
684 return count;
685}
686
687static ssize_t store_thermal_dispatch_avg_times(struct kobject *a, struct attribute *b,
688 const char *buf, size_t count)
689{
690 unsigned int input;
691 int ret;
692 ret = sscanf(buf, "%u", &input);
693
694 dbs_tuners_ins.thermal_dispatch_avg_times = input;
695 return count;
696}
697
698static ssize_t store_cpu_num_limit(struct kobject *a, struct attribute *b,
699 const char *buf, size_t count)
700{
701 unsigned int input;
702 int ret;
703 ret = sscanf(buf, "%u", &input);
704
705 dbs_tuners_ins.cpu_num_limit = input;
706 return count;
707}
708
709static ssize_t store_cpu_num_base(struct kobject *a, struct attribute *b,
710 const char *buf, size_t count)
711{
712 unsigned int input;
713 bool raise_freq = false;
714 int ret;
715 struct cpufreq_policy *policy;
716
717 policy = cpufreq_cpu_get(0);
718 ret = sscanf(buf, "%u", &input);
719
720 dbs_tuners_ins.cpu_num_base = input;
721 mutex_lock(&hp_mutex);
722 if (num_online_cpus() < dbs_tuners_ins.cpu_num_base && num_online_cpus() < dbs_tuners_ins.cpu_num_limit) {
723 raise_freq = true;
724 g_next_hp_action = 1;
725 schedule_delayed_work_on(0, &hp_work, 0);
726 }
727 mutex_unlock(&hp_mutex);
728
729 if(raise_freq == true)
730 dbs_freq_increase(policy, policy->max);
731
732 return count;
733}
734
735static ssize_t store_is_cpu_hotplug_disable(struct kobject *a, struct attribute *b,
736 const char *buf, size_t count)
737{
738 unsigned int input;
739 int ret;
740 ret = sscanf(buf, "%u", &input);
741
742 dbs_tuners_ins.is_cpu_hotplug_disable = input;
743 return count;
744}
745
746#if INPUT_BOOST
747static ssize_t store_cpu_input_boost_enable(struct kobject *a, struct attribute *b,
748 const char *buf, size_t count)
749{
750 unsigned int input;
751 int ret;
752 ret = sscanf(buf, "%u", &input);
753
754 if (ret != 1 || input > 1 ||
755 input < 0) {
756 return -EINVAL;
757 }
758
759 mutex_lock(&hp_mutex);
760 dbs_tuners_ins.cpu_input_boost_enable = input;
761 mutex_unlock(&hp_mutex);
762
763 return count;
764}
765#endif
766
767define_one_global_rw(sampling_rate);
768define_one_global_rw(io_is_busy);
769define_one_global_rw(up_threshold);
770define_one_global_rw(od_threshold);
771define_one_global_rw(down_differential);
772define_one_global_rw(sampling_down_factor);
773define_one_global_rw(ignore_nice_load);
774define_one_global_rw(powersave_bias);
775define_one_global_rw(cpu_up_threshold);
776define_one_global_rw(cpu_down_differential);
777define_one_global_rw(cpu_up_avg_times);
778define_one_global_rw(cpu_down_avg_times);
779define_one_global_rw(thermal_dispatch_avg_times);
780define_one_global_rw(cpu_num_limit);
781define_one_global_rw(cpu_num_base);
782define_one_global_rw(is_cpu_hotplug_disable);
783#if INPUT_BOOST
784define_one_global_rw(cpu_input_boost_enable);
785#endif
786
787static struct attribute *dbs_attributes[] = {
788 &sampling_rate_min.attr,
789 &sampling_rate.attr,
790 &up_threshold.attr,
791 &od_threshold.attr,
792 &down_differential.attr,
793 &sampling_down_factor.attr,
794 &ignore_nice_load.attr,
795 &powersave_bias.attr,
796 &io_is_busy.attr,
797 &cpu_up_threshold.attr,
798 &cpu_down_differential.attr,
799 &cpu_up_avg_times.attr,
800 &cpu_down_avg_times.attr,
801 &thermal_dispatch_avg_times.attr,
802 &cpu_num_limit.attr,
803 &cpu_num_base.attr,
804 &is_cpu_hotplug_disable.attr,
805#if INPUT_BOOST
806 &cpu_input_boost_enable.attr,
807#endif
808 NULL
809};
810
811static struct attribute_group dbs_attr_group = {
812 .attrs = dbs_attributes,
813 .name = "hotplug",
814};
815
816/************************** sysfs end ************************/
817
818static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
819{
820 if (dbs_tuners_ins.powersave_bias)
821 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
822 else if (p->cur == p->max)
823 {
824 if (dbs_ignore == 0)
825 dbs_ignore = 1;
826 else
827 return;
828 }
829
830 __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
831 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
832}
833
834int mt_cpufreq_cur_load(void)
835{
836 return cpu_loading;
837}
838EXPORT_SYMBOL(mt_cpufreq_cur_load);
839
840void hp_set_dynamic_cpu_hotplug_enable(int enable)
841{
842 mutex_lock(&hp_mutex);
843 dbs_tuners_ins.is_cpu_hotplug_disable = !enable;
844 mutex_unlock(&hp_mutex);
845}
846EXPORT_SYMBOL(hp_set_dynamic_cpu_hotplug_enable);
847
848void hp_limited_cpu_num(int num)
849{
850 mutex_lock(&hp_mutex);
851 dbs_tuners_ins.cpu_num_limit = num;
852
853 if (num < num_online_cpus()) {
854 printk("%s: CPU off due to thermal protection! limit_num = %d < online = %d\n",
855 __func__, num, num_online_cpus());
856 g_next_hp_action = 0;
857 schedule_delayed_work_on(0, &hp_work, 0);
858 g_cpu_down_count = 0;
859 g_cpu_down_sum_load = 0;
860 }
861
862 mutex_unlock(&hp_mutex);
863}
864EXPORT_SYMBOL(hp_limited_cpu_num);
865void hp_based_cpu_num(int num)
866{
867 mutex_lock(&hp_mutex);
868 dbs_tuners_ins.cpu_num_base = num;
869 mutex_unlock(&hp_mutex);
870}
871EXPORT_SYMBOL(hp_based_cpu_num);
872
873#ifdef CONFIG_SMP
874
8ca3027e 875static void __cpuinit hp_work_handler(struct work_struct *work)
6fa3eb70
S
876{
877 if (mutex_trylock(&bl_onoff_mutex))
878 {
879 if (!dbs_tuners_ins.is_cpu_hotplug_disable)
880 {
881 int onlines_cpu_n = num_online_cpus();
882
883 if (g_next_hp_action) // turn on CPU
884 {
885 if (onlines_cpu_n < num_possible_cpus())
886 {
887 printk("hp_work_handler: cpu_up(%d) kick off\n", onlines_cpu_n);
888 cpu_up(onlines_cpu_n);
889 printk("hp_work_handler: cpu_up(%d) completion\n", onlines_cpu_n);
890
891 dbs_ignore = 0; // force trigger frequency scaling
892 }
893 }
894 else // turn off CPU
895 {
896 if (onlines_cpu_n > 1)
897 {
898 printk("hp_work_handler: cpu_down(%d) kick off\n", (onlines_cpu_n - 1));
899 cpu_down((onlines_cpu_n - 1));
900 printk("hp_work_handler: cpu_down(%d) completion\n", (onlines_cpu_n - 1));
901
902 dbs_ignore = 0; // force trigger frequency scaling
903 }
904 }
905 }
906 mutex_unlock(&bl_onoff_mutex);
907 }
908}
909
910#endif
911
912static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
913{
914 unsigned int max_load_freq;
915 bool raise_freq = false;
916
917 struct cpufreq_policy *policy;
918 unsigned int j;
919
920 this_dbs_info->freq_lo = 0;
921 policy = this_dbs_info->cur_policy;
922
923 /*
924 * Every sampling_rate, we check, if current idle time is less
925 * than 20% (default), then we try to increase frequency
926 * Every sampling_rate, we look for a the lowest
927 * frequency which can sustain the load while keeping idle time over
928 * 30%. If such a frequency exist, we try to decrease to this frequency.
929 *
930 * Any frequency increase takes it to the maximum frequency.
931 * Frequency reduction happens at minimum steps of
932 * 5% (default) of current frequency
933 */
934
935 /* Get Absolute Load - in terms of freq */
936 max_load_freq = 0;
937 cpus_sum_load = 0;
938
939 for_each_cpu(j, policy->cpus) {
940 struct cpu_dbs_info_s *j_dbs_info;
941 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
942 unsigned int idle_time, wall_time, iowait_time;
943 unsigned int load, load_freq;
944 int freq_avg;
945
946 j_dbs_info = &per_cpu(hp_cpu_dbs_info, j);
947
948 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time,
949 dbs_tuners_ins.io_is_busy);
950 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
951
952 wall_time = (unsigned int)
953 (cur_wall_time - j_dbs_info->prev_cpu_wall);
954 j_dbs_info->prev_cpu_wall = cur_wall_time;
955
956 idle_time = (unsigned int)
957 (cur_idle_time - j_dbs_info->prev_cpu_idle);
958 j_dbs_info->prev_cpu_idle = cur_idle_time;
959
960 iowait_time = (unsigned int)
961 (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
962 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
963
964 if (dbs_tuners_ins.ignore_nice) {
965 u64 cur_nice;
966 unsigned long cur_nice_jiffies;
967
968 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
969 j_dbs_info->prev_cpu_nice;
970 /*
971 * Assumption: nice time between sampling periods will
972 * be less than 2^32 jiffies for 32 bit sys
973 */
974 cur_nice_jiffies = (unsigned long)
975 cputime64_to_jiffies64(cur_nice);
976
977 j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
978 idle_time += jiffies_to_usecs(cur_nice_jiffies);
979 }
980
981 /*
982 * For the purpose of hotplug, waiting for disk IO is an
983 * indication that you're performance critical, and not that
984 * the system is actually idle. So subtract the iowait time
985 * from the cpu idle time.
986 */
987
988 if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
989 idle_time -= iowait_time;
990
991 if (unlikely(!wall_time || wall_time < idle_time))
992 continue;
993
994 load = 100 * (wall_time - idle_time) / wall_time;
995
996 cpus_sum_load += load;
997
998 freq_avg = __cpufreq_driver_getavg(policy, j);
999 if (freq_avg <= 0)
1000 freq_avg = policy->cur;
1001
1002 load_freq = load * freq_avg;
1003 if (load_freq > max_load_freq)
1004 max_load_freq = load_freq;
1005
1006 #ifdef DEBUG_LOG
1007 printk("dbs_check_cpu: cpu = %d\n", j);
1008 printk("dbs_check_cpu: wall_time = %d, idle_time = %d, load = %d\n", wall_time, idle_time, load);
1009 printk("dbs_check_cpu: freq_avg = %d, max_load_freq = %d, cpus_sum_load = %d\n", freq_avg, max_load_freq, cpus_sum_load);
1010 #endif
1011 }
1012 // record loading information
1013 cpu_loading = max_load_freq / policy->cur;
1014 // dispatch power budget
1015 if(g_thermal_count >= dbs_tuners_ins.thermal_dispatch_avg_times) {
1016 g_thermal_count = 0;
1017 mt_dvfs_power_dispatch_safe();
1018 if ((dbs_thermal_limited == 1) && (policy->cur > dbs_thermal_limited_freq))
1019 __cpufreq_driver_target(policy, dbs_thermal_limited_freq, CPUFREQ_RELATION_L);
1020 }
1021 else
1022 g_thermal_count++;
1023
1024 if (policy->cur >= get_normal_max_freq()){
1025 if ((max_load_freq > dbs_tuners_ins.od_threshold * policy->cur) && (num_online_cpus() == num_possible_cpus())){
1026 g_max_cpu_persist_count++;
1027 #ifdef DEBUG_LOG
1028 printk("dvfs_od: g_max_cpu_persist_count: %d\n", g_max_cpu_persist_count);
1029 #endif
1030 if(g_max_cpu_persist_count == DEF_CPU_PERSIST_COUNT){
1031 //only ramp up to OD OPP here
1032 #ifdef DEBUG_LOG
1033 printk("dvfs_od: cpu loading = %d\n", max_load_freq/policy->cur);
1034 #endif
1035 if (policy->cur < policy->max)
1036 this_dbs_info->rate_mult =
1037 dbs_tuners_ins.sampling_down_factor;
1038 dbs_freq_increase(policy, policy->max);
1039 #ifdef DEBUG_LOG
1040 printk("reset g_max_cpu_persist_count, count = 10\n");
1041 #endif
1042 g_max_cpu_persist_count = 0;
1043 goto hp_check;
1044 }
1045 }
1046 else {
1047 g_max_cpu_persist_count = 0;
1048 }
1049 }
1050 else{
1051 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
1052 /* If switching to max speed, apply sampling_down_factor */
1053 if (policy->cur < get_normal_max_freq())
1054 this_dbs_info->rate_mult =
1055 dbs_tuners_ins.sampling_down_factor;
1056 dbs_freq_increase(policy, get_normal_max_freq());
1057 if(g_max_cpu_persist_count != 0){
1058 g_max_cpu_persist_count = 0;
1059 #ifdef DEBUG_LOG
1060 printk("reset g_max_cpu_persist_count, and fallback to normal max\n");
1061 #endif
1062 }
1063 goto hp_check;
1064 }
1065 }
1066
1067 /* Check for frequency decrease */
1068 /* if we cannot reduce the frequency anymore, break out early */
1069 if (policy->cur == policy->min)
1070 goto hp_check;
1071
1072 /*
1073 * The optimal frequency is the frequency that is the lowest that
1074 * can support the current CPU usage without triggering the up
1075 * policy. To be safe, we focus 10 points under the threshold.
1076 */
1077 if (max_load_freq <
1078 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
1079 policy->cur) {
1080 unsigned int freq_next;
1081 freq_next = max_load_freq /
1082 (dbs_tuners_ins.up_threshold -
1083 dbs_tuners_ins.down_differential);
1084
1085 /* No longer fully busy, reset rate_mult */
1086 this_dbs_info->rate_mult = 1;
1087
1088 if (freq_next < policy->min)
1089 freq_next = policy->min;
1090
1091 if(g_max_cpu_persist_count != 0){
1092 g_max_cpu_persist_count = 0;
1093 #ifdef DEBUG_LOG
1094 printk("reset g_max_cpu_persist_count, decrease freq accrording to loading\n");
1095 #endif
1096 }
1097
1098 if (!dbs_tuners_ins.powersave_bias) {
1099 __cpufreq_driver_target(policy, freq_next,
1100 CPUFREQ_RELATION_L);
1101 } else {
1102 int freq = powersave_bias_target(policy, freq_next,
1103 CPUFREQ_RELATION_L);
1104 __cpufreq_driver_target(policy, freq,
1105 CPUFREQ_RELATION_L);
1106 }
1107 }
1108
1109hp_check:
1110
1111 /* If Hot Plug policy disable, return directly */
1112 if (dbs_tuners_ins.is_cpu_hotplug_disable)
1113 return;
1114
1115 #ifdef CONFIG_SMP
1116 mutex_lock(&hp_mutex);
1117
1118 /* Check CPU loading to power up slave CPU */
1119 if (num_online_cpus() < dbs_tuners_ins.cpu_num_base && num_online_cpus() < dbs_tuners_ins.cpu_num_limit) {
1120 raise_freq = true;
1121 printk("dbs_check_cpu: turn on CPU by perf service\n");
1122 g_next_hp_action = 1;
1123 schedule_delayed_work_on(0, &hp_work, 0);
1124 } else if (num_online_cpus() < num_possible_cpus() && num_online_cpus() < dbs_tuners_ins.cpu_num_limit) {
1125 g_cpu_up_count++;
1126 g_cpu_up_sum_load += cpus_sum_load;
1127 if (g_cpu_up_count == dbs_tuners_ins.cpu_up_avg_times) {
1128 g_cpu_up_sum_load /= dbs_tuners_ins.cpu_up_avg_times;
1129 if (g_cpu_up_sum_load >
1130 (dbs_tuners_ins.cpu_up_threshold * num_online_cpus())) {
1131 #ifdef DEBUG_LOG
1132 printk("dbs_check_cpu: g_cpu_up_sum_load = %d\n", g_cpu_up_sum_load);
1133 #endif
1134 raise_freq = true;
1135 printk("dbs_check_cpu: turn on CPU\n");
1136 g_next_hp_action = 1;
1137 schedule_delayed_work_on(0, &hp_work, 0);
1138 }
1139 g_cpu_up_count = 0;
1140 g_cpu_up_sum_load = 0;
1141 }
1142 #ifdef DEBUG_LOG
1143 printk("dbs_check_cpu: g_cpu_up_count = %d, g_cpu_up_sum_load = %d\n", g_cpu_up_count, g_cpu_up_sum_load);
1144 printk("dbs_check_cpu: cpu_up_threshold = %d\n", (dbs_tuners_ins.cpu_up_threshold * num_online_cpus()));
1145 #endif
1146 }
1147
1148 /* Check CPU loading to power down slave CPU */
1149 if (num_online_cpus() > 1) {
1150 g_cpu_down_count++;
1151 g_cpu_down_sum_load += cpus_sum_load;
1152 if (g_cpu_down_count == dbs_tuners_ins.cpu_down_avg_times) {
1153 g_cpu_down_sum_load /= dbs_tuners_ins.cpu_down_avg_times;
1154 if (g_cpu_down_sum_load <
1155 ((dbs_tuners_ins.cpu_up_threshold - dbs_tuners_ins.cpu_down_differential) * (num_online_cpus() - 1))) {
1156 if (num_online_cpus() > dbs_tuners_ins.cpu_num_base) {
1157 #ifdef DEBUG_LOG
1158 printk("dbs_check_cpu: g_cpu_down_sum_load = %d\n", g_cpu_down_sum_load);
1159 #endif
1160 raise_freq = true;
1161 printk("dbs_check_cpu: turn off CPU\n");
1162 g_next_hp_action = 0;
1163 schedule_delayed_work_on(0, &hp_work, 0);
1164 }
1165 }
1166 g_cpu_down_count = 0;
1167 g_cpu_down_sum_load = 0;
1168 }
1169 #ifdef DEBUG_LOG
1170 printk("dbs_check_cpu: g_cpu_down_count = %d, g_cpu_down_sum_load = %d\n", g_cpu_down_count, g_cpu_down_sum_load);
1171 printk("dbs_check_cpu: cpu_down_threshold = %d\n", ((dbs_tuners_ins.cpu_up_threshold - dbs_tuners_ins.cpu_down_differential) * (num_online_cpus() - 1)));
1172 #endif
1173 }
1174
1175 mutex_unlock(&hp_mutex);
1176 #endif
1177 // need to retrieve dbs_freq_increase out of hp_mutex
1178 // in case of self-deadlock
1179 if(raise_freq == true)
1180 dbs_freq_increase(policy, policy->max);
1181
1182 return;
1183}
1184
1185static void do_dbs_timer(struct work_struct *work)
1186{
1187 struct cpu_dbs_info_s *dbs_info =
1188 container_of(work, struct cpu_dbs_info_s, work.work);
1189 unsigned int cpu = dbs_info->cpu;
1190 int sample_type = dbs_info->sample_type;
1191
1192 int delay;
1193
1194 mutex_lock(&dbs_info->timer_mutex);
1195
1196 /* Common NORMAL_SAMPLE setup */
1197 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
1198 if (!dbs_tuners_ins.powersave_bias ||
1199 sample_type == DBS_NORMAL_SAMPLE) {
1200 dbs_check_cpu(dbs_info);
1201 if (dbs_info->freq_lo) {
1202 /* Setup timer for SUB_SAMPLE */
1203 dbs_info->sample_type = DBS_SUB_SAMPLE;
1204 delay = dbs_info->freq_hi_jiffies;
1205 } else {
1206 /* We want all CPUs to do sampling nearly on
1207 * same jiffy
1208 */
1209 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
1210 * dbs_info->rate_mult);
1211
1212 if (num_online_cpus() > 1)
1213 delay -= jiffies % delay;
1214 }
1215 } else {
1216 __cpufreq_driver_target(dbs_info->cur_policy,
1217 dbs_info->freq_lo, CPUFREQ_RELATION_H);
1218 delay = dbs_info->freq_lo_jiffies;
1219 }
1220 schedule_delayed_work_on(cpu, &dbs_info->work, delay);
1221 mutex_unlock(&dbs_info->timer_mutex);
1222}
1223
1224static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
1225{
1226 /* We want all CPUs to do sampling nearly on same jiffy */
1227 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
1228
1229 if (num_online_cpus() > 1)
1230 delay -= jiffies % delay;
1231
1232 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
1233 INIT_DELAYED_WORK(&dbs_info->work, do_dbs_timer);
1234 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
1235}
1236
1237static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
1238{
1239 cancel_delayed_work_sync(&dbs_info->work);
1240}
1241
1242/*
1243 * Not all CPUs want IO time to be accounted as busy; this dependson how
1244 * efficient idling at a higher frequency/voltage is.
1245 * Pavel Machek says this is not so for various generations of AMD and old
1246 * Intel systems.
1247 * Mike Chan (androidlcom) calis this is also not true for ARM.
1248 * Because of this, whitelist specific known (series) of CPUs by default, and
1249 * leave all others up to the user.
1250 */
1251static int should_io_be_busy(void)
1252{
1253#if defined(CONFIG_X86)
1254 /*
1255 * For Intel, Core 2 (model 15) andl later have an efficient idle.
1256 */
1257 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
1258 boot_cpu_data.x86 == 6 &&
1259 boot_cpu_data.x86_model >= 15)
1260 return 1;
1261#endif
1262 return 1; // io wait time should be subtracted from idle time
1263}
1264
1265#if INPUT_BOOST
1266static void dbs_input_event(struct input_handle *handle, unsigned int type,
1267 unsigned int code, int value)
1268{
1269 if ((type == EV_KEY) && (code == BTN_TOUCH) && (value == 1) && (dbs_tuners_ins.cpu_input_boost_enable))
1270 {
1271 force_two_core();
1272 }
1273}
1274
1275static int dbs_input_connect(struct input_handler *handler,
1276 struct input_dev *dev, const struct input_device_id *id)
1277{
1278 struct input_handle *handle;
1279 int error;
1280
1281 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
1282 if (!handle)
1283 return -ENOMEM;
1284
1285 handle->dev = dev;
1286 handle->handler = handler;
1287 handle->name = "cpufreq_balance";
1288
1289 error = input_register_handle(handle);
1290 if (error)
1291 goto err2;
1292
1293 error = input_open_device(handle);
1294 if (error)
1295 goto err1;
1296
1297 return 0;
1298err1:
1299 input_unregister_handle(handle);
1300err2:
1301 kfree(handle);
1302 return error;
1303}
1304
1305static void dbs_input_disconnect(struct input_handle *handle)
1306{
1307 input_close_device(handle);
1308 input_unregister_handle(handle);
1309 kfree(handle);
1310}
1311
1312static const struct input_device_id dbs_ids[] = {
1313 {
1314 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
1315 INPUT_DEVICE_ID_MATCH_ABSBIT,
1316 .evbit = { BIT_MASK(EV_ABS) },
1317 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
1318 BIT_MASK(ABS_MT_POSITION_X) |
1319 BIT_MASK(ABS_MT_POSITION_Y) },
1320 }, /* multi-touch touchscreen */
1321 {
1322 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
1323 INPUT_DEVICE_ID_MATCH_ABSBIT,
1324 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
1325 .absbit = { [BIT_WORD(ABS_X)] =
1326 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
1327 }, /* touchpad */
1328 { },
1329};
1330
1331static struct input_handler dbs_input_handler = {
1332 .event = dbs_input_event,
1333 .connect = dbs_input_connect,
1334 .disconnect = dbs_input_disconnect,
1335 .name = "cpufreq_balance",
1336 .id_table = dbs_ids,
1337};
1338#endif //#ifdef CONFIG_HOTPLUG_CPU
1339
1340
1341
1342static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
1343 unsigned int event)
1344{
1345 unsigned int cpu = policy->cpu;
1346 struct cpu_dbs_info_s *this_dbs_info;
1347 unsigned int j;
1348 int rc;
1349
1350 this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu);
1351
1352 switch (event) {
1353 case CPUFREQ_GOV_START:
1354 if ((!cpu_online(cpu)) || (!policy->cur))
1355 return -EINVAL;
1356
1357 mutex_lock(&dbs_mutex);
1358
1359 dbs_enable++;
1360 for_each_cpu(j, policy->cpus) {
1361 struct cpu_dbs_info_s *j_dbs_info;
1362 j_dbs_info = &per_cpu(hp_cpu_dbs_info, j);
1363 j_dbs_info->cur_policy = policy;
1364
1365 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
1366 &j_dbs_info->prev_cpu_wall,
1367 dbs_tuners_ins.io_is_busy);
1368
1369 if (dbs_tuners_ins.ignore_nice)
1370 j_dbs_info->prev_cpu_nice =
1371 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
1372 }
1373 this_dbs_info->cpu = cpu;
1374 this_dbs_info->rate_mult = 1;
1375 hotplug_powersave_bias_init_cpu(cpu);
1376 /*
1377 * Start the timerschedule work, when this governor
1378 * is used for first time
1379 */
1380 if (dbs_enable == 1) {
1381 unsigned int latency;
1382
1383 rc = sysfs_create_group(cpufreq_global_kobject,
1384 &dbs_attr_group);
1385 if (rc) {
1386 mutex_unlock(&dbs_mutex);
1387 return rc;
1388 }
1389
1390 /* policy latency is in nS. Convert it to uS first */
1391 latency = policy->cpuinfo.transition_latency / 1000;
1392 if (latency == 0)
1393 latency = 1;
1394 /* Bring kernel and HW constraints together */
1395 min_sampling_rate = max(min_sampling_rate,
1396 MIN_LATENCY_MULTIPLIER * latency);
1397 dbs_tuners_ins.sampling_rate =
1398 max(min_sampling_rate,
1399 latency * LATENCY_MULTIPLIER);
1400 dbs_tuners_ins.io_is_busy = should_io_be_busy();
1401
1402 #ifdef DEBUG_LOG
1403 printk("cpufreq_governor_dbs: min_sampling_rate = %d\n", min_sampling_rate);
1404 printk("cpufreq_governor_dbs: dbs_tuners_ins.sampling_rate = %d\n", dbs_tuners_ins.sampling_rate);
1405 printk("cpufreq_governor_dbs: dbs_tuners_ins.io_is_busy = %d\n", dbs_tuners_ins.io_is_busy);
1406 #endif
1407 }
1408#if INPUT_BOOST
1409 if (!cpu)
1410 rc = input_register_handler(&dbs_input_handler);
1411#endif
1412 mutex_unlock(&dbs_mutex);
1413
1414 mutex_init(&this_dbs_info->timer_mutex);
1415 dbs_timer_init(this_dbs_info);
1416 break;
1417
1418 case CPUFREQ_GOV_STOP:
1419 dbs_timer_exit(this_dbs_info);
1420
1421 mutex_lock(&dbs_mutex);
1422 mutex_destroy(&this_dbs_info->timer_mutex);
1423 dbs_enable--;
1424#if INPUT_BOOST
1425 if (!cpu)
1426 input_unregister_handler(&dbs_input_handler);
1427
1428#endif
1429 mutex_unlock(&dbs_mutex);
1430 if (!dbs_enable)
1431 sysfs_remove_group(cpufreq_global_kobject,
1432 &dbs_attr_group);
1433
1434 break;
1435
1436 case CPUFREQ_GOV_LIMITS:
1437 mutex_lock(&this_dbs_info->timer_mutex);
1438 if (get_normal_max_freq() < this_dbs_info->cur_policy->cur)
1439 __cpufreq_driver_target(this_dbs_info->cur_policy,
1440 get_normal_max_freq(), CPUFREQ_RELATION_H);
1441 else if (policy->min > this_dbs_info->cur_policy->cur)
1442 __cpufreq_driver_target(this_dbs_info->cur_policy,
1443 policy->min, CPUFREQ_RELATION_L);
1444 mutex_unlock(&this_dbs_info->timer_mutex);
1445 break;
1446 }
1447 return 0;
1448}
1449
1450/*int cpufreq_gov_dbs_get_sum_load(void)
1451{
1452 return cpus_sum_load;
1453}*/
1454
1455#if INPUT_BOOST
1456static int touch_freq_up_task(void *data)
1457{
1458 struct cpufreq_policy *policy;
1459
1460 while (1) {
1461 policy = cpufreq_cpu_get(0);
1462 if(policy != NULL)
1463 {
1464 dbs_freq_increase(policy, policy->max);
1465 cpufreq_cpu_put(policy);
1466 }
1467 set_current_state(TASK_INTERRUPTIBLE);
1468 schedule();
1469
1470 if (kthread_should_stop())
1471 break;
1472 }
1473
1474 return 0;
1475}
1476#endif
1477
1478static int __init cpufreq_gov_dbs_init(void)
1479{
1480 u64 idle_time;
1481 int cpu = get_cpu();
1482
1483 #if INPUT_BOOST
1484 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1485 #endif
1486
1487 idle_time = get_cpu_idle_time_us(cpu, NULL);
1488 put_cpu();
1489 if (idle_time != -1ULL) {
1490 /* Idle micro accounting is supported. Use finer thresholds */
1491 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
1492 dbs_tuners_ins.down_differential =
1493 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
1494 dbs_tuners_ins.cpu_up_threshold =
1495 MICRO_CPU_UP_THRESHOLD;
1496 dbs_tuners_ins.cpu_down_differential =
1497 MICRO_CPU_DOWN_DIFFERENTIAL;
1498 /*
1499 * In nohz/micro accounting case we set the minimum frequency
1500 * not depending on HZ, but fixed (very low). The deferred
1501 * timer might skip some samples if idle/sleeping as needed.
1502 */
1503 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
1504 } else {
1505 /* For correct statistics, we need 10 ticks for each measure */
1506 min_sampling_rate =
1507 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
1508 }
1509
1510 dbs_tuners_ins.cpu_num_limit = num_possible_cpus();
1511 dbs_tuners_ins.cpu_num_base = 1;
1512
1513 if (dbs_tuners_ins.cpu_num_limit > 1)
1514 dbs_tuners_ins.is_cpu_hotplug_disable = 0;
1515
1516 #ifdef CONFIG_SMP
1517 INIT_DELAYED_WORK(&hp_work, hp_work_handler);
1518 #endif
1519
1520
1521 #if INPUT_BOOST
1522 freq_up_task = kthread_create(touch_freq_up_task, NULL,
1523 "touch_freq_up_task");
1524 if (IS_ERR(freq_up_task))
1525 return PTR_ERR(freq_up_task);
1526
1527 sched_setscheduler_nocheck(freq_up_task, SCHED_FIFO, &param);
1528 get_task_struct(freq_up_task);
1529 #endif
1530
1531 #ifdef DEBUG_LOG
1532 printk("cpufreq_gov_dbs_init: min_sampling_rate = %d\n", min_sampling_rate);
1533 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.up_threshold = %d\n", dbs_tuners_ins.up_threshold);
1534 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.od_threshold = %d\n", dbs_tuners_ins.od_threshold);
1535 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.down_differential = %d\n", dbs_tuners_ins.down_differential);
1536 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_up_threshold = %d\n", dbs_tuners_ins.cpu_up_threshold);
1537 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_down_differential = %d\n", dbs_tuners_ins.cpu_down_differential);
1538 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_up_avg_times = %d\n", dbs_tuners_ins.cpu_up_avg_times);
1539 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_down_avg_times = %d\n", dbs_tuners_ins.cpu_down_avg_times);
1540 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.thermal_di_avg_times = %d\n", dbs_tuners_ins.thermal_dispatch_avg_times);
1541 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_num_limit = %d\n", dbs_tuners_ins.cpu_num_limit);
1542 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_num_base = %d\n", dbs_tuners_ins.cpu_num_base);
1543 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.is_cpu_hotplug_disable = %d\n", dbs_tuners_ins.is_cpu_hotplug_disable);
1544 #if INPUT_BOOST
1545 printk("cpufreq_gov_dbs_init: dbs_tuners_ins.cpu_input_boost_enable = %d\n", dbs_tuners_ins.cpu_input_boost_enable);
1546 #endif /* INPUT_BOOST */
1547 #endif /* DEBUG_LOG */
1548
1549 return cpufreq_register_governor(&cpufreq_gov_balance);
1550}
1551
1552static void __exit cpufreq_gov_dbs_exit(void)
1553{
1554 #ifdef CONFIG_SMP
1555 cancel_delayed_work_sync(&hp_work);
1556 #endif
1557
1558 cpufreq_unregister_governor(&cpufreq_gov_balance);
1559
1560 #if INPUT_BOOST
1561 kthread_stop(freq_up_task);
1562 put_task_struct(freq_up_task);
1563 #endif
1564}
1565
1566
1567MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
1568MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
1569MODULE_DESCRIPTION("'cpufreq_balance' - A dynamic cpufreq governor for "
1570 "Low Latency Frequency Transition capable processors");
1571MODULE_LICENSE("GPL");
1572
1573#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_BALANCE
1574fs_initcall(cpufreq_gov_dbs_init);
1575#else
1576module_init(cpufreq_gov_dbs_init);
1577#endif
1578module_exit(cpufreq_gov_dbs_exit);