Merge tag 'v3.10.59' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 #include "cpufreq_governor.h"
34
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpufreq_interactive.h>
37
38 struct cpufreq_interactive_cpuinfo {
39 struct timer_list cpu_timer;
40 struct timer_list cpu_slack_timer;
41 spinlock_t load_lock; /* protects the next 4 fields */
42 u64 time_in_idle;
43 u64 time_in_idle_timestamp;
44 u64 cputime_speedadj;
45 u64 cputime_speedadj_timestamp;
46 struct cpufreq_policy *policy;
47 struct cpufreq_frequency_table *freq_table;
48 spinlock_t target_freq_lock; /*protects target freq */
49 unsigned int target_freq;
50 unsigned int floor_freq;
51 unsigned int max_freq;
52 u64 floor_validate_time;
53 u64 hispeed_validate_time;
54 struct rw_semaphore enable_sem;
55 int governor_enabled;
56 };
57
58 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
60 /* realtime thread handles frequency scaling */
61 static struct task_struct *speedchange_task;
62 static cpumask_t speedchange_cpumask;
63 static spinlock_t speedchange_cpumask_lock;
64 static struct mutex gov_lock;
65
66 /* Target load. Lower values result in higher CPU speeds. */
67 #define DEFAULT_TARGET_LOAD 90
68 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
69
70 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
71 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
72 static unsigned int default_above_hispeed_delay[] = {
73 DEFAULT_ABOVE_HISPEED_DELAY };
74
75 struct cpufreq_interactive_tunables {
76 int usage_count;
77 /* Hi speed to bump to from lo speed when load burst (default max) */
78 unsigned int hispeed_freq;
79 /* Go to hi speed when CPU load at or above this value. */
80 #define DEFAULT_GO_HISPEED_LOAD 99
81 unsigned long go_hispeed_load;
82 /* Target load. Lower values result in higher CPU speeds. */
83 spinlock_t target_loads_lock;
84 unsigned int *target_loads;
85 int ntarget_loads;
86 /*
87 * The minimum amount of time to spend at a frequency before we can ramp
88 * down.
89 */
90 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
91 unsigned long min_sample_time;
92 /*
93 * The sample rate of the timer used to increase frequency
94 */
95 unsigned long timer_rate;
96 /*
97 * Wait this long before raising speed above hispeed, by default a
98 * single timer interval.
99 */
100 spinlock_t above_hispeed_delay_lock;
101 unsigned int *above_hispeed_delay;
102 int nabove_hispeed_delay;
103 /* Non-zero means indefinite speed boost active */
104 int boost_val;
105 /* Duration of a boot pulse in usecs */
106 int boostpulse_duration_val;
107 /* End time of boost pulse in ktime converted to usecs */
108 u64 boostpulse_endtime;
109 /*
110 * Max additional time to wait in idle, beyond timer_rate, at speeds
111 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
112 */
113 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
114 int timer_slack_val;
115 bool io_is_busy;
116 };
117
118 /* For cases where we have single governor instance for system */
119 static struct cpufreq_interactive_tunables *common_tunables;
120
121 static struct attribute_group *get_sysfs_attr(void);
122
123 static void cpufreq_interactive_timer_resched(
124 struct cpufreq_interactive_cpuinfo *pcpu)
125 {
126 struct cpufreq_interactive_tunables *tunables =
127 pcpu->policy->governor_data;
128 unsigned long expires;
129 unsigned long flags;
130
131 spin_lock_irqsave(&pcpu->load_lock, flags);
132 pcpu->time_in_idle =
133 get_cpu_idle_time(smp_processor_id(),
134 &pcpu->time_in_idle_timestamp,
135 tunables->io_is_busy);
136 pcpu->cputime_speedadj = 0;
137 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
138 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
139 mod_timer_pinned(&pcpu->cpu_timer, expires);
140
141 if (tunables->timer_slack_val >= 0 &&
142 pcpu->target_freq > pcpu->policy->min) {
143 expires += usecs_to_jiffies(tunables->timer_slack_val);
144 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
145 }
146
147 spin_unlock_irqrestore(&pcpu->load_lock, flags);
148 }
149
150 /* The caller shall take enable_sem write semaphore to avoid any timer race.
151 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
152 * function.
153 */
154 static void cpufreq_interactive_timer_start(
155 struct cpufreq_interactive_tunables *tunables, int cpu)
156 {
157 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
158 unsigned long expires = jiffies +
159 usecs_to_jiffies(tunables->timer_rate);
160 unsigned long flags;
161
162 pcpu->cpu_timer.expires = expires;
163 add_timer_on(&pcpu->cpu_timer, cpu);
164 if (tunables->timer_slack_val >= 0 &&
165 pcpu->target_freq > pcpu->policy->min) {
166 expires += usecs_to_jiffies(tunables->timer_slack_val);
167 pcpu->cpu_slack_timer.expires = expires;
168 add_timer_on(&pcpu->cpu_slack_timer, cpu);
169 }
170
171 spin_lock_irqsave(&pcpu->load_lock, flags);
172 pcpu->time_in_idle =
173 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
174 tunables->io_is_busy);
175 pcpu->cputime_speedadj = 0;
176 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
177 spin_unlock_irqrestore(&pcpu->load_lock, flags);
178 }
179
180 static unsigned int freq_to_above_hispeed_delay(
181 struct cpufreq_interactive_tunables *tunables,
182 unsigned int freq)
183 {
184 int i;
185 unsigned int ret;
186 unsigned long flags;
187
188 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
189
190 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
191 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
192 ;
193
194 ret = tunables->above_hispeed_delay[i];
195 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
196 return ret;
197 }
198
199 static unsigned int freq_to_targetload(
200 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
201 {
202 int i;
203 unsigned int ret;
204 unsigned long flags;
205
206 spin_lock_irqsave(&tunables->target_loads_lock, flags);
207
208 for (i = 0; i < tunables->ntarget_loads - 1 &&
209 freq >= tunables->target_loads[i+1]; i += 2)
210 ;
211
212 ret = tunables->target_loads[i];
213 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
214 return ret;
215 }
216
217 /*
218 * If increasing frequencies never map to a lower target load then
219 * choose_freq() will find the minimum frequency that does not exceed its
220 * target load given the current load.
221 */
222 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
223 unsigned int loadadjfreq)
224 {
225 unsigned int freq = pcpu->policy->cur;
226 unsigned int prevfreq, freqmin, freqmax;
227 unsigned int tl;
228 int index;
229
230 freqmin = 0;
231 freqmax = UINT_MAX;
232
233 do {
234 prevfreq = freq;
235 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
236
237 /*
238 * Find the lowest frequency where the computed load is less
239 * than or equal to the target load.
240 */
241
242 if (cpufreq_frequency_table_target(
243 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
244 CPUFREQ_RELATION_L, &index))
245 break;
246 freq = pcpu->freq_table[index].frequency;
247
248 if (freq > prevfreq) {
249 /* The previous frequency is too low. */
250 freqmin = prevfreq;
251
252 if (freq >= freqmax) {
253 /*
254 * Find the highest frequency that is less
255 * than freqmax.
256 */
257 if (cpufreq_frequency_table_target(
258 pcpu->policy, pcpu->freq_table,
259 freqmax - 1, CPUFREQ_RELATION_H,
260 &index))
261 break;
262 freq = pcpu->freq_table[index].frequency;
263
264 if (freq == freqmin) {
265 /*
266 * The first frequency below freqmax
267 * has already been found to be too
268 * low. freqmax is the lowest speed
269 * we found that is fast enough.
270 */
271 freq = freqmax;
272 break;
273 }
274 }
275 } else if (freq < prevfreq) {
276 /* The previous frequency is high enough. */
277 freqmax = prevfreq;
278
279 if (freq <= freqmin) {
280 /*
281 * Find the lowest frequency that is higher
282 * than freqmin.
283 */
284 if (cpufreq_frequency_table_target(
285 pcpu->policy, pcpu->freq_table,
286 freqmin + 1, CPUFREQ_RELATION_L,
287 &index))
288 break;
289 freq = pcpu->freq_table[index].frequency;
290
291 /*
292 * If freqmax is the first frequency above
293 * freqmin then we have already found that
294 * this speed is fast enough.
295 */
296 if (freq == freqmax)
297 break;
298 }
299 }
300
301 /* If same frequency chosen as previous then done. */
302 } while (freq != prevfreq);
303
304 return freq;
305 }
306
307 static u64 update_load(int cpu)
308 {
309 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
310 struct cpufreq_interactive_tunables *tunables =
311 pcpu->policy->governor_data;
312 u64 now;
313 u64 now_idle;
314 u64 delta_idle;
315 u64 delta_time;
316 u64 active_time;
317
318 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
319 delta_idle = (now_idle - pcpu->time_in_idle);
320 delta_time = (now - pcpu->time_in_idle_timestamp);
321
322 if (delta_time <= delta_idle)
323 active_time = 0;
324 else
325 active_time = delta_time - delta_idle;
326
327 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
328
329 pcpu->time_in_idle = now_idle;
330 pcpu->time_in_idle_timestamp = now;
331 return now;
332 }
333
334 static void cpufreq_interactive_timer(unsigned long data)
335 {
336 u64 now;
337 unsigned int delta_time;
338 u64 cputime_speedadj;
339 int cpu_load;
340 struct cpufreq_interactive_cpuinfo *pcpu =
341 &per_cpu(cpuinfo, data);
342 struct cpufreq_interactive_tunables *tunables =
343 pcpu->policy->governor_data;
344 unsigned int new_freq;
345 unsigned int loadadjfreq;
346 unsigned int index;
347 unsigned long flags;
348 bool boosted;
349
350 if (!down_read_trylock(&pcpu->enable_sem))
351 return;
352 if (!pcpu->governor_enabled)
353 goto exit;
354
355 spin_lock_irqsave(&pcpu->load_lock, flags);
356 now = update_load(data);
357 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
358 cputime_speedadj = pcpu->cputime_speedadj;
359 spin_unlock_irqrestore(&pcpu->load_lock, flags);
360
361 if (WARN_ON_ONCE(!delta_time))
362 goto rearm;
363
364 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
365 do_div(cputime_speedadj, delta_time);
366 loadadjfreq = (unsigned int)cputime_speedadj * 100;
367 cpu_load = loadadjfreq / pcpu->target_freq;
368 boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
369
370 if (cpu_load >= tunables->go_hispeed_load || boosted) {
371 if (pcpu->target_freq < tunables->hispeed_freq) {
372 new_freq = tunables->hispeed_freq;
373 } else {
374 new_freq = choose_freq(pcpu, loadadjfreq);
375
376 if (new_freq < tunables->hispeed_freq)
377 new_freq = tunables->hispeed_freq;
378 }
379 } else {
380 new_freq = choose_freq(pcpu, loadadjfreq);
381 if (new_freq > tunables->hispeed_freq &&
382 pcpu->target_freq < tunables->hispeed_freq)
383 new_freq = tunables->hispeed_freq;
384 }
385
386 if (pcpu->target_freq >= tunables->hispeed_freq &&
387 new_freq > pcpu->target_freq &&
388 now - pcpu->hispeed_validate_time <
389 freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
390 trace_cpufreq_interactive_notyet(
391 data, cpu_load, pcpu->target_freq,
392 pcpu->policy->cur, new_freq);
393 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
394 goto rearm;
395 }
396
397 pcpu->hispeed_validate_time = now;
398
399 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
400 new_freq, CPUFREQ_RELATION_L,
401 &index)) {
402 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
403 goto rearm;
404 }
405
406 new_freq = pcpu->freq_table[index].frequency;
407
408 /*
409 * Do not scale below floor_freq unless we have been at or above the
410 * floor frequency for the minimum sample time since last validated.
411 */
412 if (new_freq < pcpu->floor_freq) {
413 if (now - pcpu->floor_validate_time <
414 tunables->min_sample_time) {
415 trace_cpufreq_interactive_notyet(
416 data, cpu_load, pcpu->target_freq,
417 pcpu->policy->cur, new_freq);
418 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
419 goto rearm;
420 }
421 }
422
423 /*
424 * Update the timestamp for checking whether speed has been held at
425 * or above the selected frequency for a minimum of min_sample_time,
426 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
427 * allow the speed to drop as soon as the boostpulse duration expires
428 * (or the indefinite boost is turned off).
429 */
430
431 if (!boosted || new_freq > tunables->hispeed_freq) {
432 pcpu->floor_freq = new_freq;
433 pcpu->floor_validate_time = now;
434 }
435
436 if (pcpu->target_freq == new_freq) {
437 trace_cpufreq_interactive_already(
438 data, cpu_load, pcpu->target_freq,
439 pcpu->policy->cur, new_freq);
440 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
441 goto rearm_if_notmax;
442 }
443
444 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
445 pcpu->policy->cur, new_freq);
446
447 pcpu->target_freq = new_freq;
448 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
449 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
450 cpumask_set_cpu(data, &speedchange_cpumask);
451 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
452 wake_up_process(speedchange_task);
453
454 rearm_if_notmax:
455 /*
456 * Already set max speed and don't see a need to change that,
457 * wait until next idle to re-evaluate, don't need timer.
458 */
459 if (pcpu->target_freq == pcpu->policy->max)
460 goto exit;
461
462 rearm:
463 if (!timer_pending(&pcpu->cpu_timer))
464 cpufreq_interactive_timer_resched(pcpu);
465
466 exit:
467 up_read(&pcpu->enable_sem);
468 return;
469 }
470
471 static void cpufreq_interactive_idle_start(void)
472 {
473 struct cpufreq_interactive_cpuinfo *pcpu =
474 &per_cpu(cpuinfo, smp_processor_id());
475 int pending;
476
477 if (!down_read_trylock(&pcpu->enable_sem))
478 return;
479 if (!pcpu->governor_enabled) {
480 up_read(&pcpu->enable_sem);
481 return;
482 }
483
484 pending = timer_pending(&pcpu->cpu_timer);
485
486 if (pcpu->target_freq != pcpu->policy->min) {
487 /*
488 * Entering idle while not at lowest speed. On some
489 * platforms this can hold the other CPU(s) at that speed
490 * even though the CPU is idle. Set a timer to re-evaluate
491 * speed so this idle CPU doesn't hold the other CPUs above
492 * min indefinitely. This should probably be a quirk of
493 * the CPUFreq driver.
494 */
495 if (!pending)
496 cpufreq_interactive_timer_resched(pcpu);
497 }
498
499 up_read(&pcpu->enable_sem);
500 }
501
502 static void cpufreq_interactive_idle_end(void)
503 {
504 struct cpufreq_interactive_cpuinfo *pcpu =
505 &per_cpu(cpuinfo, smp_processor_id());
506
507 if (!down_read_trylock(&pcpu->enable_sem))
508 return;
509 if (!pcpu->governor_enabled) {
510 up_read(&pcpu->enable_sem);
511 return;
512 }
513
514 /* Arm the timer for 1-2 ticks later if not already. */
515 if (!timer_pending(&pcpu->cpu_timer)) {
516 cpufreq_interactive_timer_resched(pcpu);
517 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
518 del_timer(&pcpu->cpu_timer);
519 del_timer(&pcpu->cpu_slack_timer);
520 cpufreq_interactive_timer(smp_processor_id());
521 }
522
523 up_read(&pcpu->enable_sem);
524 }
525
526 static int cpufreq_interactive_speedchange_task(void *data)
527 {
528 unsigned int cpu;
529 cpumask_t tmp_mask;
530 unsigned long flags;
531 struct cpufreq_interactive_cpuinfo *pcpu;
532
533 while (1) {
534 set_current_state(TASK_INTERRUPTIBLE);
535 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
536
537 if (cpumask_empty(&speedchange_cpumask)) {
538 spin_unlock_irqrestore(&speedchange_cpumask_lock,
539 flags);
540 schedule();
541
542 if (kthread_should_stop())
543 break;
544
545 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
546 }
547
548 set_current_state(TASK_RUNNING);
549 tmp_mask = speedchange_cpumask;
550 cpumask_clear(&speedchange_cpumask);
551 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
552
553 for_each_cpu(cpu, &tmp_mask) {
554 unsigned int j;
555 unsigned int max_freq = 0;
556
557 pcpu = &per_cpu(cpuinfo, cpu);
558 if (!down_read_trylock(&pcpu->enable_sem))
559 continue;
560 if (!pcpu->governor_enabled) {
561 up_read(&pcpu->enable_sem);
562 continue;
563 }
564
565 for_each_cpu(j, pcpu->policy->cpus) {
566 struct cpufreq_interactive_cpuinfo *pjcpu =
567 &per_cpu(cpuinfo, j);
568
569 if (pjcpu->target_freq > max_freq)
570 max_freq = pjcpu->target_freq;
571 }
572
573 if (max_freq != pcpu->policy->cur)
574 __cpufreq_driver_target(pcpu->policy,
575 max_freq,
576 CPUFREQ_RELATION_H);
577 trace_cpufreq_interactive_setspeed(cpu,
578 pcpu->target_freq,
579 pcpu->policy->cur);
580
581 up_read(&pcpu->enable_sem);
582 }
583 }
584
585 return 0;
586 }
587
588 static void cpufreq_interactive_boost(void)
589 {
590 int i;
591 int anyboost = 0;
592 unsigned long flags[2];
593 struct cpufreq_interactive_cpuinfo *pcpu;
594 struct cpufreq_interactive_tunables *tunables;
595
596 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
597
598 for_each_online_cpu(i) {
599 pcpu = &per_cpu(cpuinfo, i);
600 tunables = pcpu->policy->governor_data;
601
602 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
603 if (pcpu->target_freq < tunables->hispeed_freq) {
604 pcpu->target_freq = tunables->hispeed_freq;
605 cpumask_set_cpu(i, &speedchange_cpumask);
606 pcpu->hispeed_validate_time =
607 ktime_to_us(ktime_get());
608 anyboost = 1;
609 }
610
611 /*
612 * Set floor freq and (re)start timer for when last
613 * validated.
614 */
615
616 pcpu->floor_freq = tunables->hispeed_freq;
617 pcpu->floor_validate_time = ktime_to_us(ktime_get());
618 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
619 }
620
621 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
622
623 if (anyboost)
624 wake_up_process(speedchange_task);
625 }
626
627 static int cpufreq_interactive_notifier(
628 struct notifier_block *nb, unsigned long val, void *data)
629 {
630 struct cpufreq_freqs *freq = data;
631 struct cpufreq_interactive_cpuinfo *pcpu;
632 int cpu;
633 unsigned long flags;
634
635 if (val == CPUFREQ_POSTCHANGE) {
636 pcpu = &per_cpu(cpuinfo, freq->cpu);
637 if (!down_read_trylock(&pcpu->enable_sem))
638 return 0;
639 if (!pcpu->governor_enabled) {
640 up_read(&pcpu->enable_sem);
641 return 0;
642 }
643
644 for_each_cpu(cpu, pcpu->policy->cpus) {
645 struct cpufreq_interactive_cpuinfo *pjcpu =
646 &per_cpu(cpuinfo, cpu);
647 if (cpu != freq->cpu) {
648 if (!down_read_trylock(&pjcpu->enable_sem))
649 continue;
650 if (!pjcpu->governor_enabled) {
651 up_read(&pjcpu->enable_sem);
652 continue;
653 }
654 }
655 spin_lock_irqsave(&pjcpu->load_lock, flags);
656 update_load(cpu);
657 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
658 if (cpu != freq->cpu)
659 up_read(&pjcpu->enable_sem);
660 }
661
662 up_read(&pcpu->enable_sem);
663 }
664 return 0;
665 }
666
667 static struct notifier_block cpufreq_notifier_block = {
668 .notifier_call = cpufreq_interactive_notifier,
669 };
670
671 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
672 {
673 const char *cp;
674 int i;
675 int ntokens = 1;
676 unsigned int *tokenized_data;
677 int err = -EINVAL;
678
679 cp = buf;
680 while ((cp = strpbrk(cp + 1, " :")))
681 ntokens++;
682
683 if (!(ntokens & 0x1))
684 goto err;
685
686 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
687 if (!tokenized_data) {
688 err = -ENOMEM;
689 goto err;
690 }
691
692 cp = buf;
693 i = 0;
694 while (i < ntokens) {
695 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
696 goto err_kfree;
697
698 cp = strpbrk(cp, " :");
699 if (!cp)
700 break;
701 cp++;
702 }
703
704 if (i != ntokens)
705 goto err_kfree;
706
707 *num_tokens = ntokens;
708 return tokenized_data;
709
710 err_kfree:
711 kfree(tokenized_data);
712 err:
713 return ERR_PTR(err);
714 }
715
716 static ssize_t show_target_loads(
717 struct cpufreq_interactive_tunables *tunables,
718 char *buf)
719 {
720 int i;
721 ssize_t ret = 0;
722 unsigned long flags;
723
724 spin_lock_irqsave(&tunables->target_loads_lock, flags);
725
726 for (i = 0; i < tunables->ntarget_loads; i++)
727 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
728 i & 0x1 ? ":" : " ");
729
730 ret += sprintf(buf + --ret, "\n");
731 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
732 return ret;
733 }
734
735 static ssize_t store_target_loads(
736 struct cpufreq_interactive_tunables *tunables,
737 const char *buf, size_t count)
738 {
739 int ntokens;
740 unsigned int *new_target_loads = NULL;
741 unsigned long flags;
742
743 new_target_loads = get_tokenized_data(buf, &ntokens);
744 if (IS_ERR(new_target_loads))
745 return PTR_RET(new_target_loads);
746
747 spin_lock_irqsave(&tunables->target_loads_lock, flags);
748 if (tunables->target_loads != default_target_loads)
749 kfree(tunables->target_loads);
750 tunables->target_loads = new_target_loads;
751 tunables->ntarget_loads = ntokens;
752 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
753 return count;
754 }
755
756 static ssize_t show_above_hispeed_delay(
757 struct cpufreq_interactive_tunables *tunables, char *buf)
758 {
759 int i;
760 ssize_t ret = 0;
761 unsigned long flags;
762
763 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
764
765 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
766 ret += sprintf(buf + ret, "%u%s",
767 tunables->above_hispeed_delay[i],
768 i & 0x1 ? ":" : " ");
769
770 ret += sprintf(buf + --ret, "\n");
771 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
772 return ret;
773 }
774
775 static ssize_t store_above_hispeed_delay(
776 struct cpufreq_interactive_tunables *tunables,
777 const char *buf, size_t count)
778 {
779 int ntokens;
780 unsigned int *new_above_hispeed_delay = NULL;
781 unsigned long flags;
782
783 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
784 if (IS_ERR(new_above_hispeed_delay))
785 return PTR_RET(new_above_hispeed_delay);
786
787 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
788 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
789 kfree(tunables->above_hispeed_delay);
790 tunables->above_hispeed_delay = new_above_hispeed_delay;
791 tunables->nabove_hispeed_delay = ntokens;
792 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
793 return count;
794
795 }
796
797 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
798 char *buf)
799 {
800 return sprintf(buf, "%u\n", tunables->hispeed_freq);
801 }
802
803 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
804 const char *buf, size_t count)
805 {
806 int ret;
807 long unsigned int val;
808
809 ret = strict_strtoul(buf, 0, &val);
810 if (ret < 0)
811 return ret;
812 tunables->hispeed_freq = val;
813 return count;
814 }
815
816 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
817 *tunables, char *buf)
818 {
819 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
820 }
821
822 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
823 *tunables, const char *buf, size_t count)
824 {
825 int ret;
826 unsigned long val;
827
828 ret = strict_strtoul(buf, 0, &val);
829 if (ret < 0)
830 return ret;
831 tunables->go_hispeed_load = val;
832 return count;
833 }
834
835 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
836 *tunables, char *buf)
837 {
838 return sprintf(buf, "%lu\n", tunables->min_sample_time);
839 }
840
841 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
842 *tunables, const char *buf, size_t count)
843 {
844 int ret;
845 unsigned long val;
846
847 ret = strict_strtoul(buf, 0, &val);
848 if (ret < 0)
849 return ret;
850 tunables->min_sample_time = val;
851 return count;
852 }
853
854 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
855 char *buf)
856 {
857 return sprintf(buf, "%lu\n", tunables->timer_rate);
858 }
859
860 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
861 const char *buf, size_t count)
862 {
863 int ret;
864 unsigned long val;
865
866 ret = strict_strtoul(buf, 0, &val);
867 if (ret < 0)
868 return ret;
869 tunables->timer_rate = val;
870 return count;
871 }
872
873 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
874 char *buf)
875 {
876 return sprintf(buf, "%d\n", tunables->timer_slack_val);
877 }
878
879 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
880 const char *buf, size_t count)
881 {
882 int ret;
883 unsigned long val;
884
885 ret = kstrtol(buf, 10, &val);
886 if (ret < 0)
887 return ret;
888
889 tunables->timer_slack_val = val;
890 return count;
891 }
892
893 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
894 char *buf)
895 {
896 return sprintf(buf, "%d\n", tunables->boost_val);
897 }
898
899 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
900 const char *buf, size_t count)
901 {
902 int ret;
903 unsigned long val;
904
905 ret = kstrtoul(buf, 0, &val);
906 if (ret < 0)
907 return ret;
908
909 tunables->boost_val = val;
910
911 if (tunables->boost_val) {
912 trace_cpufreq_interactive_boost("on");
913 cpufreq_interactive_boost();
914 } else {
915 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
916 trace_cpufreq_interactive_unboost("off");
917 }
918
919 return count;
920 }
921
922 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
923 const char *buf, size_t count)
924 {
925 int ret;
926 unsigned long val;
927
928 ret = kstrtoul(buf, 0, &val);
929 if (ret < 0)
930 return ret;
931
932 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
933 tunables->boostpulse_duration_val;
934 trace_cpufreq_interactive_boost("pulse");
935 cpufreq_interactive_boost();
936 return count;
937 }
938
939 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
940 *tunables, char *buf)
941 {
942 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
943 }
944
945 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
946 *tunables, const char *buf, size_t count)
947 {
948 int ret;
949 unsigned long val;
950
951 ret = kstrtoul(buf, 0, &val);
952 if (ret < 0)
953 return ret;
954
955 tunables->boostpulse_duration_val = val;
956 return count;
957 }
958
959 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
960 char *buf)
961 {
962 return sprintf(buf, "%u\n", tunables->io_is_busy);
963 }
964
965 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
966 const char *buf, size_t count)
967 {
968 int ret;
969 unsigned long val;
970
971 ret = kstrtoul(buf, 0, &val);
972 if (ret < 0)
973 return ret;
974 tunables->io_is_busy = val;
975 return count;
976 }
977
978 /*
979 * Create show/store routines
980 * - sys: One governor instance for complete SYSTEM
981 * - pol: One governor instance per struct cpufreq_policy
982 */
983 #define show_gov_pol_sys(file_name) \
984 static ssize_t show_##file_name##_gov_sys \
985 (struct kobject *kobj, struct attribute *attr, char *buf) \
986 { \
987 return show_##file_name(common_tunables, buf); \
988 } \
989 \
990 static ssize_t show_##file_name##_gov_pol \
991 (struct cpufreq_policy *policy, char *buf) \
992 { \
993 return show_##file_name(policy->governor_data, buf); \
994 }
995
996 #define store_gov_pol_sys(file_name) \
997 static ssize_t store_##file_name##_gov_sys \
998 (struct kobject *kobj, struct attribute *attr, const char *buf, \
999 size_t count) \
1000 { \
1001 return store_##file_name(common_tunables, buf, count); \
1002 } \
1003 \
1004 static ssize_t store_##file_name##_gov_pol \
1005 (struct cpufreq_policy *policy, const char *buf, size_t count) \
1006 { \
1007 return store_##file_name(policy->governor_data, buf, count); \
1008 }
1009
1010 #define show_store_gov_pol_sys(file_name) \
1011 show_gov_pol_sys(file_name); \
1012 store_gov_pol_sys(file_name)
1013
1014 show_store_gov_pol_sys(target_loads);
1015 show_store_gov_pol_sys(above_hispeed_delay);
1016 show_store_gov_pol_sys(hispeed_freq);
1017 show_store_gov_pol_sys(go_hispeed_load);
1018 show_store_gov_pol_sys(min_sample_time);
1019 show_store_gov_pol_sys(timer_rate);
1020 show_store_gov_pol_sys(timer_slack);
1021 show_store_gov_pol_sys(boost);
1022 store_gov_pol_sys(boostpulse);
1023 show_store_gov_pol_sys(boostpulse_duration);
1024 show_store_gov_pol_sys(io_is_busy);
1025
1026 #define gov_sys_attr_rw(_name) \
1027 static struct global_attr _name##_gov_sys = \
1028 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1029
1030 #define gov_pol_attr_rw(_name) \
1031 static struct freq_attr _name##_gov_pol = \
1032 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1033
1034 #define gov_sys_pol_attr_rw(_name) \
1035 gov_sys_attr_rw(_name); \
1036 gov_pol_attr_rw(_name)
1037
1038 gov_sys_pol_attr_rw(target_loads);
1039 gov_sys_pol_attr_rw(above_hispeed_delay);
1040 gov_sys_pol_attr_rw(hispeed_freq);
1041 gov_sys_pol_attr_rw(go_hispeed_load);
1042 gov_sys_pol_attr_rw(min_sample_time);
1043 gov_sys_pol_attr_rw(timer_rate);
1044 gov_sys_pol_attr_rw(timer_slack);
1045 gov_sys_pol_attr_rw(boost);
1046 gov_sys_pol_attr_rw(boostpulse_duration);
1047 gov_sys_pol_attr_rw(io_is_busy);
1048
1049 static struct global_attr boostpulse_gov_sys =
1050 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1051
1052 static struct freq_attr boostpulse_gov_pol =
1053 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1054
1055 /* One Governor instance for entire system */
1056 static struct attribute *interactive_attributes_gov_sys[] = {
1057 &target_loads_gov_sys.attr,
1058 &above_hispeed_delay_gov_sys.attr,
1059 &hispeed_freq_gov_sys.attr,
1060 &go_hispeed_load_gov_sys.attr,
1061 &min_sample_time_gov_sys.attr,
1062 &timer_rate_gov_sys.attr,
1063 &timer_slack_gov_sys.attr,
1064 &boost_gov_sys.attr,
1065 &boostpulse_gov_sys.attr,
1066 &boostpulse_duration_gov_sys.attr,
1067 &io_is_busy_gov_sys.attr,
1068 NULL,
1069 };
1070
1071 static struct attribute_group interactive_attr_group_gov_sys = {
1072 .attrs = interactive_attributes_gov_sys,
1073 .name = "interactive",
1074 };
1075
1076 /* Per policy governor instance */
1077 static struct attribute *interactive_attributes_gov_pol[] = {
1078 &target_loads_gov_pol.attr,
1079 &above_hispeed_delay_gov_pol.attr,
1080 &hispeed_freq_gov_pol.attr,
1081 &go_hispeed_load_gov_pol.attr,
1082 &min_sample_time_gov_pol.attr,
1083 &timer_rate_gov_pol.attr,
1084 &timer_slack_gov_pol.attr,
1085 &boost_gov_pol.attr,
1086 &boostpulse_gov_pol.attr,
1087 &boostpulse_duration_gov_pol.attr,
1088 &io_is_busy_gov_pol.attr,
1089 NULL,
1090 };
1091
1092 static struct attribute_group interactive_attr_group_gov_pol = {
1093 .attrs = interactive_attributes_gov_pol,
1094 .name = "interactive",
1095 };
1096
1097 static struct attribute_group *get_sysfs_attr(void)
1098 {
1099 if (have_governor_per_policy())
1100 return &interactive_attr_group_gov_pol;
1101 else
1102 return &interactive_attr_group_gov_sys;
1103 }
1104
1105 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1106 unsigned long val,
1107 void *data)
1108 {
1109 switch (val) {
1110 case IDLE_START:
1111 cpufreq_interactive_idle_start();
1112 break;
1113 case IDLE_END:
1114 cpufreq_interactive_idle_end();
1115 break;
1116 }
1117
1118 return 0;
1119 }
1120
1121 static struct notifier_block cpufreq_interactive_idle_nb = {
1122 .notifier_call = cpufreq_interactive_idle_notifier,
1123 };
1124
1125 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1126 unsigned int event)
1127 {
1128 int rc;
1129 unsigned int j;
1130 struct cpufreq_interactive_cpuinfo *pcpu;
1131 struct cpufreq_frequency_table *freq_table;
1132 struct cpufreq_interactive_tunables *tunables;
1133 unsigned long flags;
1134
1135 if (have_governor_per_policy())
1136 tunables = policy->governor_data;
1137 else
1138 tunables = common_tunables;
1139
1140 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1141
1142 switch (event) {
1143 case CPUFREQ_GOV_POLICY_INIT:
1144 if (have_governor_per_policy()) {
1145 WARN_ON(tunables);
1146 } else if (tunables) {
1147 tunables->usage_count++;
1148 policy->governor_data = tunables;
1149 return 0;
1150 }
1151
1152 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1153 if (!tunables) {
1154 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1155 return -ENOMEM;
1156 }
1157
1158 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1159 get_sysfs_attr());
1160 if (rc) {
1161 kfree(tunables);
1162 return rc;
1163 }
1164
1165 tunables->usage_count = 1;
1166 tunables->above_hispeed_delay = default_above_hispeed_delay;
1167 tunables->nabove_hispeed_delay =
1168 ARRAY_SIZE(default_above_hispeed_delay);
1169 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1170 tunables->target_loads = default_target_loads;
1171 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1172 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1173 tunables->timer_rate = DEFAULT_TIMER_RATE;
1174 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1175 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1176
1177 spin_lock_init(&tunables->target_loads_lock);
1178 spin_lock_init(&tunables->above_hispeed_delay_lock);
1179
1180 if (!policy->governor->initialized) {
1181 idle_notifier_register(&cpufreq_interactive_idle_nb);
1182 cpufreq_register_notifier(&cpufreq_notifier_block,
1183 CPUFREQ_TRANSITION_NOTIFIER);
1184 }
1185
1186 policy->governor_data = tunables;
1187 if (!have_governor_per_policy())
1188 common_tunables = tunables;
1189
1190 break;
1191
1192 case CPUFREQ_GOV_POLICY_EXIT:
1193 if (!--tunables->usage_count) {
1194 if (policy->governor->initialized == 1) {
1195 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1196 CPUFREQ_TRANSITION_NOTIFIER);
1197 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1198 }
1199
1200 sysfs_remove_group(get_governor_parent_kobj(policy),
1201 get_sysfs_attr());
1202 kfree(tunables);
1203 common_tunables = NULL;
1204 }
1205
1206 policy->governor_data = NULL;
1207 break;
1208
1209 case CPUFREQ_GOV_START:
1210 mutex_lock(&gov_lock);
1211
1212 freq_table = cpufreq_frequency_get_table(policy->cpu);
1213 if (!tunables->hispeed_freq)
1214 tunables->hispeed_freq = policy->max;
1215
1216 for_each_cpu(j, policy->cpus) {
1217 pcpu = &per_cpu(cpuinfo, j);
1218 pcpu->policy = policy;
1219 pcpu->target_freq = policy->cur;
1220 pcpu->freq_table = freq_table;
1221 pcpu->floor_freq = pcpu->target_freq;
1222 pcpu->floor_validate_time =
1223 ktime_to_us(ktime_get());
1224 pcpu->hispeed_validate_time =
1225 pcpu->floor_validate_time;
1226 pcpu->max_freq = policy->max;
1227 down_write(&pcpu->enable_sem);
1228 del_timer_sync(&pcpu->cpu_timer);
1229 del_timer_sync(&pcpu->cpu_slack_timer);
1230 cpufreq_interactive_timer_start(tunables, j);
1231 pcpu->governor_enabled = 1;
1232 up_write(&pcpu->enable_sem);
1233 }
1234
1235 mutex_unlock(&gov_lock);
1236 break;
1237
1238 case CPUFREQ_GOV_STOP:
1239 mutex_lock(&gov_lock);
1240 for_each_cpu(j, policy->cpus) {
1241 pcpu = &per_cpu(cpuinfo, j);
1242 down_write(&pcpu->enable_sem);
1243 pcpu->governor_enabled = 0;
1244 del_timer_sync(&pcpu->cpu_timer);
1245 del_timer_sync(&pcpu->cpu_slack_timer);
1246 up_write(&pcpu->enable_sem);
1247 }
1248
1249 mutex_unlock(&gov_lock);
1250 break;
1251
1252 case CPUFREQ_GOV_LIMITS:
1253 if (policy->max < policy->cur)
1254 __cpufreq_driver_target(policy,
1255 policy->max, CPUFREQ_RELATION_H);
1256 else if (policy->min > policy->cur)
1257 __cpufreq_driver_target(policy,
1258 policy->min, CPUFREQ_RELATION_L);
1259 for_each_cpu(j, policy->cpus) {
1260 pcpu = &per_cpu(cpuinfo, j);
1261
1262 down_read(&pcpu->enable_sem);
1263 if (pcpu->governor_enabled == 0) {
1264 up_read(&pcpu->enable_sem);
1265 continue;
1266 }
1267
1268 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1269 if (policy->max < pcpu->target_freq)
1270 pcpu->target_freq = policy->max;
1271 else if (policy->min > pcpu->target_freq)
1272 pcpu->target_freq = policy->min;
1273
1274 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1275 up_read(&pcpu->enable_sem);
1276
1277 /* Reschedule timer only if policy->max is raised.
1278 * Delete the timers, else the timer callback may
1279 * return without re-arm the timer when failed
1280 * acquire the semaphore. This race may cause timer
1281 * stopped unexpectedly.
1282 */
1283
1284 if (policy->max > pcpu->max_freq) {
1285 down_write(&pcpu->enable_sem);
1286 del_timer_sync(&pcpu->cpu_timer);
1287 del_timer_sync(&pcpu->cpu_slack_timer);
1288 cpufreq_interactive_timer_start(tunables, j);
1289 up_write(&pcpu->enable_sem);
1290 }
1291
1292 pcpu->max_freq = policy->max;
1293 }
1294 break;
1295 }
1296 return 0;
1297 }
1298
1299 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1300 static
1301 #endif
1302 struct cpufreq_governor cpufreq_gov_interactive = {
1303 .name = "interactive",
1304 .governor = cpufreq_governor_interactive,
1305 .max_transition_latency = 10000000,
1306 .owner = THIS_MODULE,
1307 };
1308
1309 static void cpufreq_interactive_nop_timer(unsigned long data)
1310 {
1311 }
1312
1313 static int __init cpufreq_interactive_init(void)
1314 {
1315 unsigned int i;
1316 struct cpufreq_interactive_cpuinfo *pcpu;
1317 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1318
1319 /* Initalize per-cpu timers */
1320 for_each_possible_cpu(i) {
1321 pcpu = &per_cpu(cpuinfo, i);
1322 init_timer_deferrable(&pcpu->cpu_timer);
1323 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1324 pcpu->cpu_timer.data = i;
1325 init_timer(&pcpu->cpu_slack_timer);
1326 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1327 spin_lock_init(&pcpu->load_lock);
1328 spin_lock_init(&pcpu->target_freq_lock);
1329 init_rwsem(&pcpu->enable_sem);
1330 }
1331
1332 spin_lock_init(&speedchange_cpumask_lock);
1333 mutex_init(&gov_lock);
1334 speedchange_task =
1335 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1336 "cfinteractive");
1337 if (IS_ERR(speedchange_task))
1338 return PTR_ERR(speedchange_task);
1339
1340 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1341 get_task_struct(speedchange_task);
1342
1343 /* NB: wake up so the thread does not look hung to the freezer */
1344 wake_up_process(speedchange_task);
1345
1346 return cpufreq_register_governor(&cpufreq_gov_interactive);
1347 }
1348
1349 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1350 fs_initcall(cpufreq_interactive_init);
1351 #else
1352 module_init(cpufreq_interactive_init);
1353 #endif
1354
1355 static void __exit cpufreq_interactive_exit(void)
1356 {
1357 cpufreq_unregister_governor(&cpufreq_gov_interactive);
1358 kthread_stop(speedchange_task);
1359 put_task_struct(speedchange_task);
1360 }
1361
1362 module_exit(cpufreq_interactive_exit);
1363
1364 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1365 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1366 "Latency sensitive workloads");
1367 MODULE_LICENSE("GPL");