2 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/cpufreq.h>
15 #include <linux/kthread.h>
16 #include <uapi/linux/sched/types.h>
17 #include <linux/slab.h>
18 #include <trace/events/power.h>
22 unsigned long boosted_cpu_util(int cpu
);
24 #define SUGOV_KTHREAD_PRIORITY 50
26 struct sugov_tunables
{
27 struct gov_attr_set attr_set
;
28 unsigned int up_rate_limit_us
;
29 unsigned int down_rate_limit_us
;
33 struct cpufreq_policy
*policy
;
35 struct sugov_tunables
*tunables
;
36 struct list_head tunables_hook
;
38 raw_spinlock_t update_lock
; /* For shared policies */
39 u64 last_freq_update_time
;
40 s64 min_rate_limit_ns
;
42 s64 down_rate_delay_ns
;
43 unsigned int next_freq
;
44 unsigned int cached_raw_freq
;
46 /* The next fields are only needed if fast switch cannot be used. */
47 struct irq_work irq_work
;
48 struct kthread_work work
;
49 struct mutex work_lock
;
50 struct kthread_worker worker
;
51 struct task_struct
*thread
;
52 bool work_in_progress
;
54 bool need_freq_update
;
58 struct update_util_data update_util
;
59 struct sugov_policy
*sg_policy
;
62 bool iowait_boost_pending
;
63 unsigned int iowait_boost
;
64 unsigned int iowait_boost_max
;
67 /* The fields below are only needed when sharing a policy. */
72 /* The field below is for single-CPU policies only. */
73 #ifdef CONFIG_NO_HZ_COMMON
74 unsigned long saved_idle_calls
;
78 static DEFINE_PER_CPU(struct sugov_cpu
, sugov_cpu
);
80 /************************ Governor internals ***********************/
82 static bool sugov_should_update_freq(struct sugov_policy
*sg_policy
, u64 time
)
87 * Since cpufreq_update_util() is called with rq->lock held for
88 * the @target_cpu, our per-cpu data is fully serialized.
90 * However, drivers cannot in general deal with cross-cpu
91 * requests, so while get_next_freq() will work, our
92 * sugov_update_commit() call may not for the fast switching platforms.
94 * Hence stop here for remote requests if they aren't supported
95 * by the hardware, as calculating the frequency is pointless if
96 * we cannot in fact act on it.
98 * For the slow switching platforms, the kthread is always scheduled on
99 * the right set of CPUs and any CPU can find the next frequency and
100 * schedule the kthread.
102 if (sg_policy
->policy
->fast_switch_enabled
&&
103 !cpufreq_can_do_remote_dvfs(sg_policy
->policy
))
106 if (sg_policy
->work_in_progress
)
109 if (unlikely(sg_policy
->need_freq_update
)) {
110 sg_policy
->need_freq_update
= false;
112 * This happens when limits change, so forget the previous
113 * next_freq value and force an update.
115 sg_policy
->next_freq
= UINT_MAX
;
119 /* No need to recalculate next freq for min_rate_limit_us
120 * at least. However we might still decide to further rate
121 * limit once frequency change direction is decided, according
122 * to the separate rate limits.
125 delta_ns
= time
- sg_policy
->last_freq_update_time
;
126 return delta_ns
>= sg_policy
->min_rate_limit_ns
;
129 static bool sugov_up_down_rate_limit(struct sugov_policy
*sg_policy
, u64 time
,
130 unsigned int next_freq
)
134 delta_ns
= time
- sg_policy
->last_freq_update_time
;
136 if (next_freq
> sg_policy
->next_freq
&&
137 delta_ns
< sg_policy
->up_rate_delay_ns
)
140 if (next_freq
< sg_policy
->next_freq
&&
141 delta_ns
< sg_policy
->down_rate_delay_ns
)
147 static void sugov_update_commit(struct sugov_policy
*sg_policy
, u64 time
,
148 unsigned int next_freq
)
150 struct cpufreq_policy
*policy
= sg_policy
->policy
;
152 if (sg_policy
->next_freq
== next_freq
)
155 if (sugov_up_down_rate_limit(sg_policy
, time
, next_freq
))
158 sg_policy
->next_freq
= next_freq
;
159 sg_policy
->last_freq_update_time
= time
;
161 if (policy
->fast_switch_enabled
) {
162 next_freq
= cpufreq_driver_fast_switch(policy
, next_freq
);
166 policy
->cur
= next_freq
;
167 trace_cpu_frequency(next_freq
, smp_processor_id());
169 sg_policy
->work_in_progress
= true;
170 irq_work_queue(&sg_policy
->irq_work
);
175 * get_next_freq - Compute a new frequency for a given cpufreq policy.
176 * @sg_policy: schedutil policy object to compute the new frequency for.
177 * @util: Current CPU utilization.
178 * @max: CPU capacity.
180 * If the utilization is frequency-invariant, choose the new frequency to be
181 * proportional to it, that is
183 * next_freq = C * max_freq * util / max
185 * Otherwise, approximate the would-be frequency-invariant utilization by
186 * util_raw * (curr_freq / max_freq) which leads to
188 * next_freq = C * curr_freq * util_raw / max
190 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
192 * The lowest driver-supported frequency which is equal or greater than the raw
193 * next_freq (as calculated above) is returned, subject to policy min/max and
194 * cpufreq driver limitations.
196 static unsigned int get_next_freq(struct sugov_policy
*sg_policy
,
197 unsigned long util
, unsigned long max
)
199 struct cpufreq_policy
*policy
= sg_policy
->policy
;
200 unsigned int freq
= arch_scale_freq_invariant() ?
201 policy
->cpuinfo
.max_freq
: policy
->cur
;
203 freq
= (freq
+ (freq
>> 2)) * util
/ max
;
205 if (freq
== sg_policy
->cached_raw_freq
&& sg_policy
->next_freq
!= UINT_MAX
)
206 return sg_policy
->next_freq
;
207 sg_policy
->cached_raw_freq
= freq
;
208 return cpufreq_driver_resolve_freq(policy
, freq
);
211 static void sugov_get_util(unsigned long *util
, unsigned long *max
, int cpu
)
213 unsigned long max_cap
, rt
;
215 max_cap
= arch_scale_cpu_capacity(NULL
, cpu
);
217 rt
= sched_get_rt_rq_util(cpu
);
219 *util
= boosted_cpu_util(cpu
) + rt
;
220 *util
= min(*util
, max_cap
);
224 static void sugov_set_iowait_boost(struct sugov_cpu
*sg_cpu
, u64 time
,
227 if (flags
& SCHED_CPUFREQ_IOWAIT
) {
228 if (sg_cpu
->iowait_boost_pending
)
231 sg_cpu
->iowait_boost_pending
= true;
233 if (sg_cpu
->iowait_boost
) {
234 sg_cpu
->iowait_boost
<<= 1;
235 if (sg_cpu
->iowait_boost
> sg_cpu
->iowait_boost_max
)
236 sg_cpu
->iowait_boost
= sg_cpu
->iowait_boost_max
;
238 sg_cpu
->iowait_boost
= sg_cpu
->sg_policy
->policy
->min
;
240 } else if (sg_cpu
->iowait_boost
) {
241 s64 delta_ns
= time
- sg_cpu
->last_update
;
243 /* Clear iowait_boost if the CPU apprears to have been idle. */
244 if (delta_ns
> TICK_NSEC
) {
245 sg_cpu
->iowait_boost
= 0;
246 sg_cpu
->iowait_boost_pending
= false;
251 static void sugov_iowait_boost(struct sugov_cpu
*sg_cpu
, unsigned long *util
,
254 unsigned int boost_util
, boost_max
;
256 if (!sg_cpu
->iowait_boost
)
259 if (sg_cpu
->iowait_boost_pending
) {
260 sg_cpu
->iowait_boost_pending
= false;
262 sg_cpu
->iowait_boost
>>= 1;
263 if (sg_cpu
->iowait_boost
< sg_cpu
->sg_policy
->policy
->min
) {
264 sg_cpu
->iowait_boost
= 0;
269 boost_util
= sg_cpu
->iowait_boost
;
270 boost_max
= sg_cpu
->iowait_boost_max
;
272 if (*util
* boost_max
< *max
* boost_util
) {
278 #ifdef CONFIG_NO_HZ_COMMON
279 static bool sugov_cpu_is_busy(struct sugov_cpu
*sg_cpu
)
281 unsigned long idle_calls
= tick_nohz_get_idle_calls_cpu(sg_cpu
->cpu
);
282 bool ret
= idle_calls
== sg_cpu
->saved_idle_calls
;
284 sg_cpu
->saved_idle_calls
= idle_calls
;
288 static inline bool sugov_cpu_is_busy(struct sugov_cpu
*sg_cpu
) { return false; }
289 #endif /* CONFIG_NO_HZ_COMMON */
291 static void sugov_update_single(struct update_util_data
*hook
, u64 time
,
294 struct sugov_cpu
*sg_cpu
= container_of(hook
, struct sugov_cpu
, update_util
);
295 struct sugov_policy
*sg_policy
= sg_cpu
->sg_policy
;
296 struct cpufreq_policy
*policy
= sg_policy
->policy
;
297 unsigned long util
, max
;
301 sugov_set_iowait_boost(sg_cpu
, time
, flags
);
302 sg_cpu
->last_update
= time
;
304 if (!sugov_should_update_freq(sg_policy
, time
))
307 busy
= sugov_cpu_is_busy(sg_cpu
);
309 if (flags
& SCHED_CPUFREQ_DL
) {
310 next_f
= policy
->cpuinfo
.max_freq
;
312 sugov_get_util(&util
, &max
, sg_cpu
->cpu
);
313 sugov_iowait_boost(sg_cpu
, &util
, &max
);
314 next_f
= get_next_freq(sg_policy
, util
, max
);
316 * Do not reduce the frequency if the CPU has not been idle
317 * recently, as the reduction is likely to be premature then.
319 if (busy
&& next_f
< sg_policy
->next_freq
) {
320 next_f
= sg_policy
->next_freq
;
322 /* Reset cached freq as next_freq has changed */
323 sg_policy
->cached_raw_freq
= 0;
327 sugov_update_commit(sg_policy
, time
, next_f
);
330 static unsigned int sugov_next_freq_shared(struct sugov_cpu
*sg_cpu
, u64 time
)
332 struct sugov_policy
*sg_policy
= sg_cpu
->sg_policy
;
333 struct cpufreq_policy
*policy
= sg_policy
->policy
;
334 unsigned long util
= 0, max
= 1;
337 for_each_cpu(j
, policy
->cpus
) {
338 struct sugov_cpu
*j_sg_cpu
= &per_cpu(sugov_cpu
, j
);
339 unsigned long j_util
, j_max
;
343 * If the CPU utilization was last updated before the previous
344 * frequency update and the time elapsed between the last update
345 * of the CPU utilization and the last frequency update is long
346 * enough, don't take the CPU into account as it probably is
347 * idle now (and clear iowait_boost for it).
349 delta_ns
= time
- j_sg_cpu
->last_update
;
350 if (delta_ns
> TICK_NSEC
) {
351 j_sg_cpu
->iowait_boost
= 0;
352 j_sg_cpu
->iowait_boost_pending
= false;
355 if (j_sg_cpu
->flags
& SCHED_CPUFREQ_DL
)
356 return policy
->cpuinfo
.max_freq
;
358 j_util
= j_sg_cpu
->util
;
359 j_max
= j_sg_cpu
->max
;
360 if (j_util
* max
> j_max
* util
) {
365 sugov_iowait_boost(j_sg_cpu
, &util
, &max
);
368 return get_next_freq(sg_policy
, util
, max
);
371 static void sugov_update_shared(struct update_util_data
*hook
, u64 time
,
374 struct sugov_cpu
*sg_cpu
= container_of(hook
, struct sugov_cpu
, update_util
);
375 struct sugov_policy
*sg_policy
= sg_cpu
->sg_policy
;
376 unsigned long util
, max
;
379 sugov_get_util(&util
, &max
, sg_cpu
->cpu
);
381 raw_spin_lock(&sg_policy
->update_lock
);
385 sg_cpu
->flags
= flags
;
387 sugov_set_iowait_boost(sg_cpu
, time
, flags
);
388 sg_cpu
->last_update
= time
;
390 if (sugov_should_update_freq(sg_policy
, time
)) {
391 if (flags
& SCHED_CPUFREQ_DL
)
392 next_f
= sg_policy
->policy
->cpuinfo
.max_freq
;
394 next_f
= sugov_next_freq_shared(sg_cpu
, time
);
396 sugov_update_commit(sg_policy
, time
, next_f
);
399 raw_spin_unlock(&sg_policy
->update_lock
);
402 static void sugov_work(struct kthread_work
*work
)
404 struct sugov_policy
*sg_policy
= container_of(work
, struct sugov_policy
, work
);
406 mutex_lock(&sg_policy
->work_lock
);
407 __cpufreq_driver_target(sg_policy
->policy
, sg_policy
->next_freq
,
409 mutex_unlock(&sg_policy
->work_lock
);
411 sg_policy
->work_in_progress
= false;
414 static void sugov_irq_work(struct irq_work
*irq_work
)
416 struct sugov_policy
*sg_policy
;
418 sg_policy
= container_of(irq_work
, struct sugov_policy
, irq_work
);
421 * For RT and deadline tasks, the schedutil governor shoots the
422 * frequency to maximum. Special care must be taken to ensure that this
423 * kthread doesn't result in the same behavior.
425 * This is (mostly) guaranteed by the work_in_progress flag. The flag is
426 * updated only at the end of the sugov_work() function and before that
427 * the schedutil governor rejects all other frequency scaling requests.
429 * There is a very rare case though, where the RT thread yields right
430 * after the work_in_progress flag is cleared. The effects of that are
433 kthread_queue_work(&sg_policy
->worker
, &sg_policy
->work
);
436 /************************** sysfs interface ************************/
438 static struct sugov_tunables
*global_tunables
;
439 static DEFINE_MUTEX(global_tunables_lock
);
441 static inline struct sugov_tunables
*to_sugov_tunables(struct gov_attr_set
*attr_set
)
443 return container_of(attr_set
, struct sugov_tunables
, attr_set
);
446 static DEFINE_MUTEX(min_rate_lock
);
448 static void update_min_rate_limit_ns(struct sugov_policy
*sg_policy
)
450 mutex_lock(&min_rate_lock
);
451 sg_policy
->min_rate_limit_ns
= min(sg_policy
->up_rate_delay_ns
,
452 sg_policy
->down_rate_delay_ns
);
453 mutex_unlock(&min_rate_lock
);
456 static ssize_t
up_rate_limit_us_show(struct gov_attr_set
*attr_set
, char *buf
)
458 struct sugov_tunables
*tunables
= to_sugov_tunables(attr_set
);
460 return sprintf(buf
, "%u\n", tunables
->up_rate_limit_us
);
463 static ssize_t
down_rate_limit_us_show(struct gov_attr_set
*attr_set
, char *buf
)
465 struct sugov_tunables
*tunables
= to_sugov_tunables(attr_set
);
467 return sprintf(buf
, "%u\n", tunables
->down_rate_limit_us
);
470 static ssize_t
up_rate_limit_us_store(struct gov_attr_set
*attr_set
,
471 const char *buf
, size_t count
)
473 struct sugov_tunables
*tunables
= to_sugov_tunables(attr_set
);
474 struct sugov_policy
*sg_policy
;
475 unsigned int rate_limit_us
;
477 if (kstrtouint(buf
, 10, &rate_limit_us
))
480 tunables
->up_rate_limit_us
= rate_limit_us
;
482 list_for_each_entry(sg_policy
, &attr_set
->policy_list
, tunables_hook
) {
483 sg_policy
->up_rate_delay_ns
= rate_limit_us
* NSEC_PER_USEC
;
484 update_min_rate_limit_ns(sg_policy
);
490 static ssize_t
down_rate_limit_us_store(struct gov_attr_set
*attr_set
,
491 const char *buf
, size_t count
)
493 struct sugov_tunables
*tunables
= to_sugov_tunables(attr_set
);
494 struct sugov_policy
*sg_policy
;
495 unsigned int rate_limit_us
;
497 if (kstrtouint(buf
, 10, &rate_limit_us
))
500 tunables
->down_rate_limit_us
= rate_limit_us
;
502 list_for_each_entry(sg_policy
, &attr_set
->policy_list
, tunables_hook
) {
503 sg_policy
->down_rate_delay_ns
= rate_limit_us
* NSEC_PER_USEC
;
504 update_min_rate_limit_ns(sg_policy
);
510 static struct governor_attr up_rate_limit_us
= __ATTR_RW(up_rate_limit_us
);
511 static struct governor_attr down_rate_limit_us
= __ATTR_RW(down_rate_limit_us
);
513 static struct attribute
*sugov_attributes
[] = {
514 &up_rate_limit_us
.attr
,
515 &down_rate_limit_us
.attr
,
519 static struct kobj_type sugov_tunables_ktype
= {
520 .default_attrs
= sugov_attributes
,
521 .sysfs_ops
= &governor_sysfs_ops
,
524 /********************** cpufreq governor interface *********************/
526 static struct cpufreq_governor schedutil_gov
;
528 static struct sugov_policy
*sugov_policy_alloc(struct cpufreq_policy
*policy
)
530 struct sugov_policy
*sg_policy
;
532 sg_policy
= kzalloc(sizeof(*sg_policy
), GFP_KERNEL
);
536 sg_policy
->policy
= policy
;
537 raw_spin_lock_init(&sg_policy
->update_lock
);
541 static void sugov_policy_free(struct sugov_policy
*sg_policy
)
546 static int sugov_kthread_create(struct sugov_policy
*sg_policy
)
548 struct task_struct
*thread
;
549 struct sched_param param
= { .sched_priority
= MAX_USER_RT_PRIO
/ 2 };
550 struct cpufreq_policy
*policy
= sg_policy
->policy
;
553 /* kthread only required for slow path */
554 if (policy
->fast_switch_enabled
)
557 kthread_init_work(&sg_policy
->work
, sugov_work
);
558 kthread_init_worker(&sg_policy
->worker
);
559 thread
= kthread_create(kthread_worker_fn
, &sg_policy
->worker
,
561 cpumask_first(policy
->related_cpus
));
562 if (IS_ERR(thread
)) {
563 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread
));
564 return PTR_ERR(thread
);
567 ret
= sched_setscheduler_nocheck(thread
, SCHED_FIFO
, ¶m
);
569 kthread_stop(thread
);
570 pr_warn("%s: failed to set SCHED_FIFO\n", __func__
);
574 sg_policy
->thread
= thread
;
576 /* Kthread is bound to all CPUs by default */
577 if (!policy
->dvfs_possible_from_any_cpu
)
578 kthread_bind_mask(thread
, policy
->related_cpus
);
580 init_irq_work(&sg_policy
->irq_work
, sugov_irq_work
);
581 mutex_init(&sg_policy
->work_lock
);
583 wake_up_process(thread
);
588 static void sugov_kthread_stop(struct sugov_policy
*sg_policy
)
590 /* kthread only required for slow path */
591 if (sg_policy
->policy
->fast_switch_enabled
)
594 kthread_flush_worker(&sg_policy
->worker
);
595 kthread_stop(sg_policy
->thread
);
596 mutex_destroy(&sg_policy
->work_lock
);
599 static struct sugov_tunables
*sugov_tunables_alloc(struct sugov_policy
*sg_policy
)
601 struct sugov_tunables
*tunables
;
603 tunables
= kzalloc(sizeof(*tunables
), GFP_KERNEL
);
605 gov_attr_set_init(&tunables
->attr_set
, &sg_policy
->tunables_hook
);
606 if (!have_governor_per_policy())
607 global_tunables
= tunables
;
612 static void sugov_tunables_free(struct sugov_tunables
*tunables
)
614 if (!have_governor_per_policy())
615 global_tunables
= NULL
;
620 static int sugov_init(struct cpufreq_policy
*policy
)
622 struct sugov_policy
*sg_policy
;
623 struct sugov_tunables
*tunables
;
626 /* State should be equivalent to EXIT */
627 if (policy
->governor_data
)
630 cpufreq_enable_fast_switch(policy
);
632 sg_policy
= sugov_policy_alloc(policy
);
635 goto disable_fast_switch
;
638 ret
= sugov_kthread_create(sg_policy
);
642 mutex_lock(&global_tunables_lock
);
644 if (global_tunables
) {
645 if (WARN_ON(have_governor_per_policy())) {
649 policy
->governor_data
= sg_policy
;
650 sg_policy
->tunables
= global_tunables
;
652 gov_attr_set_get(&global_tunables
->attr_set
, &sg_policy
->tunables_hook
);
656 tunables
= sugov_tunables_alloc(sg_policy
);
662 tunables
->up_rate_limit_us
= cpufreq_policy_transition_delay_us(policy
);
663 tunables
->down_rate_limit_us
= cpufreq_policy_transition_delay_us(policy
);
665 policy
->governor_data
= sg_policy
;
666 sg_policy
->tunables
= tunables
;
668 ret
= kobject_init_and_add(&tunables
->attr_set
.kobj
, &sugov_tunables_ktype
,
669 get_governor_parent_kobj(policy
), "%s",
675 mutex_unlock(&global_tunables_lock
);
679 policy
->governor_data
= NULL
;
680 sugov_tunables_free(tunables
);
683 sugov_kthread_stop(sg_policy
);
686 mutex_unlock(&global_tunables_lock
);
688 sugov_policy_free(sg_policy
);
691 cpufreq_disable_fast_switch(policy
);
693 pr_err("initialization failed (error %d)\n", ret
);
697 static void sugov_exit(struct cpufreq_policy
*policy
)
699 struct sugov_policy
*sg_policy
= policy
->governor_data
;
700 struct sugov_tunables
*tunables
= sg_policy
->tunables
;
703 mutex_lock(&global_tunables_lock
);
705 count
= gov_attr_set_put(&tunables
->attr_set
, &sg_policy
->tunables_hook
);
706 policy
->governor_data
= NULL
;
708 sugov_tunables_free(tunables
);
710 mutex_unlock(&global_tunables_lock
);
712 sugov_kthread_stop(sg_policy
);
713 sugov_policy_free(sg_policy
);
714 cpufreq_disable_fast_switch(policy
);
717 static int sugov_start(struct cpufreq_policy
*policy
)
719 struct sugov_policy
*sg_policy
= policy
->governor_data
;
722 sg_policy
->up_rate_delay_ns
=
723 sg_policy
->tunables
->up_rate_limit_us
* NSEC_PER_USEC
;
724 sg_policy
->down_rate_delay_ns
=
725 sg_policy
->tunables
->down_rate_limit_us
* NSEC_PER_USEC
;
726 update_min_rate_limit_ns(sg_policy
);
727 sg_policy
->last_freq_update_time
= 0;
728 sg_policy
->next_freq
= UINT_MAX
;
729 sg_policy
->work_in_progress
= false;
730 sg_policy
->need_freq_update
= false;
731 sg_policy
->cached_raw_freq
= 0;
733 for_each_cpu(cpu
, policy
->cpus
) {
734 struct sugov_cpu
*sg_cpu
= &per_cpu(sugov_cpu
, cpu
);
736 memset(sg_cpu
, 0, sizeof(*sg_cpu
));
738 sg_cpu
->sg_policy
= sg_policy
;
739 sg_cpu
->flags
= SCHED_CPUFREQ_DL
;
740 sg_cpu
->iowait_boost_max
= policy
->cpuinfo
.max_freq
;
743 for_each_cpu(cpu
, policy
->cpus
) {
744 struct sugov_cpu
*sg_cpu
= &per_cpu(sugov_cpu
, cpu
);
746 cpufreq_add_update_util_hook(cpu
, &sg_cpu
->update_util
,
747 policy_is_shared(policy
) ?
748 sugov_update_shared
:
749 sugov_update_single
);
754 static void sugov_stop(struct cpufreq_policy
*policy
)
756 struct sugov_policy
*sg_policy
= policy
->governor_data
;
759 for_each_cpu(cpu
, policy
->cpus
)
760 cpufreq_remove_update_util_hook(cpu
);
764 if (!policy
->fast_switch_enabled
) {
765 irq_work_sync(&sg_policy
->irq_work
);
766 kthread_cancel_work_sync(&sg_policy
->work
);
770 static void sugov_limits(struct cpufreq_policy
*policy
)
772 struct sugov_policy
*sg_policy
= policy
->governor_data
;
774 if (!policy
->fast_switch_enabled
) {
775 mutex_lock(&sg_policy
->work_lock
);
776 cpufreq_policy_apply_limits(policy
);
777 mutex_unlock(&sg_policy
->work_lock
);
780 sg_policy
->need_freq_update
= true;
783 static struct cpufreq_governor schedutil_gov
= {
785 .owner
= THIS_MODULE
,
786 .dynamic_switching
= true,
789 .start
= sugov_start
,
791 .limits
= sugov_limits
,
794 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
795 struct cpufreq_governor
*cpufreq_default_governor(void)
797 return &schedutil_gov
;
801 static int __init
sugov_register(void)
803 return cpufreq_register_governor(&schedutil_gov
);
805 fs_initcall(sugov_register
);