From a0c7abd15190b3231d6e1478f26834aae52f7677 Mon Sep 17 00:00:00 2001 From: Youngtae Lee Date: Tue, 30 Jan 2018 17:15:12 +0900 Subject: [PATCH] Revert "cpufreq: schedutil: move slow path from workqueue to SCHED_FIFO task" This reverts commit 02a7b1ee3baa15a98b541d8cfd156bbe1a091c20. Change-Id: I33dd4543bc56f7be867bfeb9bf4ff63350405f11 Signed-off-by: Youngtae Lee --- kernel/sched/cpufreq_schedutil.c | 99 ++++---------------------------- 1 file changed, 10 insertions(+), 89 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index c06b2b886dde..107b1df2953d 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -12,8 +12,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include -#include #include #include @@ -24,8 +22,6 @@ unsigned long boosted_cpu_util(int cpu); -#define SUGOV_KTHREAD_PRIORITY 50 - struct sugov_tunables { struct gov_attr_set attr_set; unsigned int up_rate_limit_us; @@ -48,10 +44,8 @@ struct sugov_policy { /* The next fields are only needed if fast switch cannot be used. */ struct irq_work irq_work; - struct kthread_work work; + struct work_struct work; struct mutex work_lock; - struct kthread_worker worker; - struct task_struct *thread; bool work_in_progress; bool need_freq_update; @@ -457,7 +451,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, raw_spin_unlock(&sg_policy->update_lock); } -static void sugov_work(struct kthread_work *work) +static void sugov_work(struct work_struct *work) { struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); @@ -475,20 +469,7 @@ static void sugov_irq_work(struct irq_work *irq_work) sg_policy = container_of(irq_work, struct sugov_policy, irq_work); - /* - * For RT and deadline tasks, the schedutil governor shoots the - * frequency to maximum. Special care must be taken to ensure that this - * kthread doesn't result in the same behavior. - * - * This is (mostly) guaranteed by the work_in_progress flag. The flag is - * updated only at the end of the sugov_work() function and before that - * the schedutil governor rejects all other frequency scaling requests. - * - * There is a very rare case though, where the RT thread yields right - * after the work_in_progress flag is cleared. The effects of that are - * neglected for now. - */ - kthread_queue_work(&sg_policy->worker, &sg_policy->work); + schedule_work_on(smp_processor_id(), &sg_policy->work); } /************************ Governor externals ***********************/ @@ -640,6 +621,9 @@ static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) return NULL; sg_policy->policy = policy; + init_irq_work(&sg_policy->irq_work, sugov_irq_work); + INIT_WORK(&sg_policy->work, sugov_work); + mutex_init(&sg_policy->work_lock); raw_spin_lock_init(&sg_policy->update_lock); return sg_policy; } @@ -649,59 +633,6 @@ static void sugov_policy_free(struct sugov_policy *sg_policy) kfree(sg_policy); } -static int sugov_kthread_create(struct sugov_policy *sg_policy) -{ - struct task_struct *thread; - struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 }; - struct cpufreq_policy *policy = sg_policy->policy; - int ret; - - /* kthread only required for slow path */ - if (policy->fast_switch_enabled) - return 0; - - kthread_init_work(&sg_policy->work, sugov_work); - kthread_init_worker(&sg_policy->worker); - thread = kthread_create(kthread_worker_fn, &sg_policy->worker, - "sugov:%d", - cpumask_first(policy->related_cpus)); - if (IS_ERR(thread)) { - pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread)); - return PTR_ERR(thread); - } - - ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, ¶m); - if (ret) { - kthread_stop(thread); - pr_warn("%s: failed to set SCHED_FIFO\n", __func__); - return ret; - } - - sg_policy->thread = thread; - - /* Kthread is bound to all CPUs by default */ - if (!policy->dvfs_possible_from_any_cpu) - kthread_bind_mask(thread, policy->related_cpus); - - init_irq_work(&sg_policy->irq_work, sugov_irq_work); - mutex_init(&sg_policy->work_lock); - - wake_up_process(thread); - - return 0; -} - -static void sugov_kthread_stop(struct sugov_policy *sg_policy) -{ - /* kthread only required for slow path */ - if (sg_policy->policy->fast_switch_enabled) - return; - - kthread_flush_worker(&sg_policy->worker); - kthread_stop(sg_policy->thread); - mutex_destroy(&sg_policy->work_lock); -} - static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) { struct sugov_tunables *tunables; @@ -741,16 +672,12 @@ static int sugov_init(struct cpufreq_policy *policy) goto disable_fast_switch; } - ret = sugov_kthread_create(sg_policy); - if (ret) - goto free_sg_policy; - mutex_lock(&global_tunables_lock); if (global_tunables) { if (WARN_ON(have_governor_per_policy())) { ret = -EINVAL; - goto stop_kthread; + goto free_sg_policy; } policy->governor_data = sg_policy; sg_policy->tunables = global_tunables; @@ -762,7 +689,7 @@ static int sugov_init(struct cpufreq_policy *policy) tunables = sugov_tunables_alloc(sg_policy); if (!tunables) { ret = -ENOMEM; - goto stop_kthread; + goto free_sg_policy; } tunables->up_rate_limit_us = cpufreq_policy_transition_delay_us(policy); @@ -785,9 +712,6 @@ fail: policy->governor_data = NULL; sugov_tunables_free(tunables); -stop_kthread: - sugov_kthread_stop(sg_policy); - free_sg_policy: mutex_unlock(&global_tunables_lock); @@ -813,7 +737,6 @@ static void sugov_exit(struct cpufreq_policy *policy) if (!count) sugov_tunables_free(tunables); - sugov_kthread_stop(sg_policy); sugov_policy_free(sg_policy); mutex_unlock(&global_tunables_lock); @@ -870,10 +793,8 @@ static void sugov_stop(struct cpufreq_policy *policy) synchronize_sched(); - if (!policy->fast_switch_enabled) { - irq_work_sync(&sg_policy->irq_work); - kthread_cancel_work_sync(&sg_policy->work); - } + irq_work_sync(&sg_policy->irq_work); + cancel_work_sync(&sg_policy->work); } static void sugov_limits(struct cpufreq_policy *policy) -- 2.20.1