return NULL;
sg_policy->policy = policy;
+ init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+ mutex_init(&sg_policy->work_lock);
raw_spin_lock_init(&sg_policy->update_lock);
return sg_policy;
}
static void sugov_policy_free(struct sugov_policy *sg_policy)
{
+ mutex_destroy(&sg_policy->work_lock);
kfree(sg_policy);
}
sg_policy->thread = thread;
kthread_bind_mask(thread, policy->related_cpus);
- init_irq_work(&sg_policy->irq_work, sugov_irq_work);
- mutex_init(&sg_policy->work_lock);
-
wake_up_process(thread);
return 0;
kthread_flush_worker(&sg_policy->worker);
kthread_stop(sg_policy->thread);
- mutex_destroy(&sg_policy->work_lock);
}
static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
synchronize_sched();
- if (!policy->fast_switch_enabled) {
- irq_work_sync(&sg_policy->irq_work);
- kthread_cancel_work_sync(&sg_policy->work);
- }
+ irq_work_sync(&sg_policy->irq_work);
+ kthread_cancel_work_sync(&sg_policy->work);
}
static void sugov_limits(struct cpufreq_policy *policy)