#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
-#include <linux/kthread.h>
#include <uapi/linux/sched/types.h>
#include <linux/slab.h>
#include <linux/cpu_pm.h>
unsigned long boosted_cpu_util(int cpu);
-#define SUGOV_KTHREAD_PRIORITY 50
-
struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int up_rate_limit_us;
/* The next fields are only needed if fast switch cannot be used. */
struct irq_work irq_work;
- struct kthread_work work;
+ struct work_struct work;
struct mutex work_lock;
- struct kthread_worker worker;
- struct task_struct *thread;
bool work_in_progress;
bool need_freq_update;
raw_spin_unlock(&sg_policy->update_lock);
}
-static void sugov_work(struct kthread_work *work)
+static void sugov_work(struct work_struct *work)
{
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
struct sugov_policy *sg_policy;
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
-
- /*
- * For Real Time and Deadline tasks, schedutil governor shoots the
- * frequency to maximum. And special care must be taken to ensure that
- * this kthread doesn't result in that.
- *
- * This is (mostly) guaranteed by the work_in_progress flag. The flag is
- * updated only at the end of the sugov_work() and before that schedutil
- * rejects all other frequency scaling requests.
- *
- * Though there is a very rare case where the RT thread yields right
- * after the work_in_progress flag is cleared. The effects of that are
- * neglected for now.
- */
- kthread_queue_work(&sg_policy->worker, &sg_policy->work);
+ schedule_work_on(smp_processor_id(), &sg_policy->work);
}
/************************ Governor externals ***********************/
sg_policy->policy = policy;
init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+ INIT_WORK(&sg_policy->work, sugov_work);
mutex_init(&sg_policy->work_lock);
raw_spin_lock_init(&sg_policy->update_lock);
return sg_policy;
kfree(sg_policy);
}
-static int sugov_kthread_create(struct sugov_policy *sg_policy)
-{
- struct task_struct *thread;
- struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
- struct cpufreq_policy *policy = sg_policy->policy;
- int ret;
-
- /* kthread only required for slow path */
- if (policy->fast_switch_enabled)
- return 0;
-
- kthread_init_work(&sg_policy->work, sugov_work);
- kthread_init_worker(&sg_policy->worker);
- thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
- "sugov:%d",
- cpumask_first(policy->related_cpus));
- if (IS_ERR(thread)) {
- pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
- return PTR_ERR(thread);
- }
-
- ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, ¶m);
- if (ret) {
- kthread_stop(thread);
- pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
- return ret;
- }
-
- sg_policy->thread = thread;
- kthread_bind_mask(thread, policy->related_cpus);
- wake_up_process(thread);
-
- return 0;
-}
-
-static void sugov_kthread_stop(struct sugov_policy *sg_policy)
-{
- /* kthread only required for slow path */
- if (sg_policy->policy->fast_switch_enabled)
- return;
-
- kthread_flush_worker(&sg_policy->worker);
- kthread_stop(sg_policy->thread);
-}
-
static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
{
struct sugov_tunables *tunables;
goto disable_fast_switch;
}
- ret = sugov_kthread_create(sg_policy);
- if (ret)
- goto free_sg_policy;
-
mutex_lock(&global_tunables_lock);
if (global_tunables) {
if (WARN_ON(have_governor_per_policy())) {
ret = -EINVAL;
- goto stop_kthread;
+ goto free_sg_policy;
}
policy->governor_data = sg_policy;
sg_policy->tunables = global_tunables;
tunables = sugov_tunables_alloc(sg_policy);
if (!tunables) {
ret = -ENOMEM;
- goto stop_kthread;
+ goto free_sg_policy;
}
tunables->up_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
policy->governor_data = NULL;
sugov_clear_global_tunables();
-stop_kthread:
- sugov_kthread_stop(sg_policy);
-
free_sg_policy:
mutex_unlock(&global_tunables_lock);
mutex_unlock(&global_tunables_lock);
- sugov_kthread_stop(sg_policy);
sugov_policy_free(sg_policy);
cpufreq_disable_fast_switch(policy);
}
synchronize_sched();
irq_work_sync(&sg_policy->irq_work);
- kthread_cancel_work_sync(&sg_policy->work);
+ cancel_work_sync(&sg_policy->work);
}
static void sugov_limits(struct cpufreq_policy *policy)