schedule_delayed_work(&poll_pkg_cstate_work, HZ);
}
+static void start_power_clamp_thread(unsigned long cpu)
+{
+ struct task_struct **p = per_cpu_ptr(powerclamp_thread, cpu);
+ struct task_struct *thread;
+
+ thread = kthread_create_on_node(clamp_thread,
+ (void *) cpu,
+ cpu_to_node(cpu),
+ "kidle_inject/%ld", cpu);
+ if (IS_ERR(thread))
+ return;
+
+ /* bind to cpu here */
+ kthread_bind(thread, cpu);
+ wake_up_process(thread);
+ *p = thread;
+}
+
static int start_power_clamp(void)
{
unsigned long cpu;
- struct task_struct *thread;
set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
/* prevent cpu hotplug */
/* start one thread per online cpu */
for_each_online_cpu(cpu) {
- struct task_struct **p =
- per_cpu_ptr(powerclamp_thread, cpu);
-
- thread = kthread_create_on_node(clamp_thread,
- (void *) cpu,
- cpu_to_node(cpu),
- "kidle_inject/%ld", cpu);
- /* bind to cpu here */
- if (likely(!IS_ERR(thread))) {
- kthread_bind(thread, cpu);
- wake_up_process(thread);
- *p = thread;
- }
-
+ start_power_clamp_thread(cpu);
}
put_online_cpus();
unsigned long action, void *hcpu)
{
unsigned long cpu = (unsigned long)hcpu;
- struct task_struct *thread;
struct task_struct **percpu_thread =
per_cpu_ptr(powerclamp_thread, cpu);
switch (action) {
case CPU_ONLINE:
- thread = kthread_create_on_node(clamp_thread,
- (void *) cpu,
- cpu_to_node(cpu),
- "kidle_inject/%lu", cpu);
- if (likely(!IS_ERR(thread))) {
- kthread_bind(thread, cpu);
- wake_up_process(thread);
- *percpu_thread = thread;
- }
+ start_power_clamp_thread(cpu);
/* prefer BSP as controlling CPU */
if (cpu == 0) {
control_cpu = 0;