Revert "cpufreq: schedutil: move slow path from workqueue to SCHED_FIFO task"
authorCosmin Tanislav <demonsingur@gmail.com>
Tue, 16 Apr 2024 18:00:37 +0000 (21:00 +0300)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:24:05 +0000 (20:24 +0300)
This reverts commit 02a7b1ee3baa15a98b541d8cfd156bbe1a091c20.

Change-Id: Icd42a9920059daf44fdbefc0caff823f30ae2621

kernel/sched/cpufreq_schedutil.c

index e761d7d9a019147920785877dc3cd79f79543f71..cca814d5670c223dcc45b0a131c5c77aad0a15fe 100644 (file)
@@ -12,7 +12,6 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/cpufreq.h>
-#include <linux/kthread.h>
 #include <uapi/linux/sched/types.h>
 #include <linux/slab.h>
 #include <linux/cpu_pm.h>
@@ -24,8 +23,6 @@
 
 unsigned long boosted_cpu_util(int cpu);
 
-#define SUGOV_KTHREAD_PRIORITY 50
-
 struct sugov_tunables {
        struct gov_attr_set attr_set;
        unsigned int up_rate_limit_us;
@@ -48,10 +45,8 @@ struct sugov_policy {
 
        /* The next fields are only needed if fast switch cannot be used. */
        struct irq_work irq_work;
-       struct kthread_work work;
+       struct work_struct work;
        struct mutex work_lock;
-       struct kthread_worker worker;
-       struct task_struct *thread;
        bool work_in_progress;
 
        bool need_freq_update;
@@ -401,7 +396,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
        raw_spin_unlock(&sg_policy->update_lock);
 }
 
-static void sugov_work(struct kthread_work *work)
+static void sugov_work(struct work_struct *work)
 {
        struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
 
@@ -418,21 +413,7 @@ static void sugov_irq_work(struct irq_work *irq_work)
        struct sugov_policy *sg_policy;
 
        sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
-
-       /*
-        * For Real Time and Deadline tasks, schedutil governor shoots the
-        * frequency to maximum. And special care must be taken to ensure that
-        * this kthread doesn't result in that.
-        *
-        * This is (mostly) guaranteed by the work_in_progress flag. The flag is
-        * updated only at the end of the sugov_work() and before that schedutil
-        * rejects all other frequency scaling requests.
-        *
-        * Though there is a very rare case where the RT thread yields right
-        * after the work_in_progress flag is cleared. The effects of that are
-        * neglected for now.
-        */
-       kthread_queue_work(&sg_policy->worker, &sg_policy->work);
+       schedule_work_on(smp_processor_id(), &sg_policy->work);
 }
 
 /************************ Governor externals ***********************/
@@ -593,6 +574,7 @@ static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
 
        sg_policy->policy = policy;
        init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+       INIT_WORK(&sg_policy->work, sugov_work);
        mutex_init(&sg_policy->work_lock);
        raw_spin_lock_init(&sg_policy->update_lock);
        return sg_policy;
@@ -604,51 +586,6 @@ static void sugov_policy_free(struct sugov_policy *sg_policy)
        kfree(sg_policy);
 }
 
-static int sugov_kthread_create(struct sugov_policy *sg_policy)
-{
-       struct task_struct *thread;
-       struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
-       struct cpufreq_policy *policy = sg_policy->policy;
-       int ret;
-
-       /* kthread only required for slow path */
-       if (policy->fast_switch_enabled)
-               return 0;
-
-       kthread_init_work(&sg_policy->work, sugov_work);
-       kthread_init_worker(&sg_policy->worker);
-       thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
-                               "sugov:%d",
-                               cpumask_first(policy->related_cpus));
-       if (IS_ERR(thread)) {
-               pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
-               return PTR_ERR(thread);
-       }
-
-       ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
-       if (ret) {
-               kthread_stop(thread);
-               pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
-               return ret;
-       }
-
-       sg_policy->thread = thread;
-       kthread_bind_mask(thread, policy->related_cpus);
-       wake_up_process(thread);
-
-       return 0;
-}
-
-static void sugov_kthread_stop(struct sugov_policy *sg_policy)
-{
-       /* kthread only required for slow path */
-       if (sg_policy->policy->fast_switch_enabled)
-               return;
-
-       kthread_flush_worker(&sg_policy->worker);
-       kthread_stop(sg_policy->thread);
-}
-
 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
 {
        struct sugov_tunables *tunables;
@@ -686,16 +623,12 @@ static int sugov_init(struct cpufreq_policy *policy)
                goto disable_fast_switch;
        }
 
-       ret = sugov_kthread_create(sg_policy);
-       if (ret)
-               goto free_sg_policy;
-
        mutex_lock(&global_tunables_lock);
 
        if (global_tunables) {
                if (WARN_ON(have_governor_per_policy())) {
                        ret = -EINVAL;
-                       goto stop_kthread;
+                       goto free_sg_policy;
                }
                policy->governor_data = sg_policy;
                sg_policy->tunables = global_tunables;
@@ -707,7 +640,7 @@ static int sugov_init(struct cpufreq_policy *policy)
        tunables = sugov_tunables_alloc(sg_policy);
        if (!tunables) {
                ret = -ENOMEM;
-               goto stop_kthread;
+               goto free_sg_policy;
        }
 
        tunables->up_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
@@ -731,9 +664,6 @@ fail:
        policy->governor_data = NULL;
        sugov_clear_global_tunables();
 
-stop_kthread:
-       sugov_kthread_stop(sg_policy);
-
 free_sg_policy:
        mutex_unlock(&global_tunables_lock);
 
@@ -761,7 +691,6 @@ static void sugov_exit(struct cpufreq_policy *policy)
 
        mutex_unlock(&global_tunables_lock);
 
-       sugov_kthread_stop(sg_policy);
        sugov_policy_free(sg_policy);
        cpufreq_disable_fast_switch(policy);
 }
@@ -815,7 +744,7 @@ static void sugov_stop(struct cpufreq_policy *policy)
        synchronize_sched();
 
        irq_work_sync(&sg_policy->irq_work);
-       kthread_cancel_work_sync(&sg_policy->work);
+       cancel_work_sync(&sg_policy->work);
 }
 
 static void sugov_limits(struct cpufreq_policy *policy)