stop_machine: Store task reference in a separate per cpu variable
authorThomas Gleixner <tglx@linutronix.de>
Thu, 31 Jan 2013 12:11:13 +0000 (12:11 +0000)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 14 Feb 2013 14:29:37 +0000 (15:29 +0100)
To allow the stopper thread being managed by the smpboot thread
infrastructure separate out the task storage from the stopper data
structure.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Arjan van de Veen <arjan@infradead.org>
Cc: Paul Turner <pjt@google.com>
Cc: Richard Weinberger <rw@linutronix.de>
Cc: Magnus Damm <magnus.damm@gmail.com>
Link: http://lkml.kernel.org/r/20130131120741.626690384@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/stop_machine.c

index 2f194e965715183786f2f8b640ddcbbf1b456dc4..aaac68c5c3be161c4f8edc7bdaab96de3eedc98c 100644 (file)
@@ -37,10 +37,10 @@ struct cpu_stopper {
        spinlock_t              lock;
        bool                    enabled;        /* is this stopper enabled? */
        struct list_head        works;          /* list of pending works */
-       struct task_struct      *thread;        /* stopper thread */
 };
 
 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
+static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
 static bool stop_machine_initialized = false;
 
 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
@@ -62,16 +62,18 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
 }
 
 /* queue @work to @stopper.  if offline, @work is completed immediately */
-static void cpu_stop_queue_work(struct cpu_stopper *stopper,
-                               struct cpu_stop_work *work)
+static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
+       struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+       struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
+
        unsigned long flags;
 
        spin_lock_irqsave(&stopper->lock, flags);
 
        if (stopper->enabled) {
                list_add_tail(&work->list, &stopper->works);
-               wake_up_process(stopper->thread);
+               wake_up_process(p);
        } else
                cpu_stop_signal_done(work->done, false);
 
@@ -108,7 +110,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
        struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
 
        cpu_stop_init_done(&done, 1);
-       cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
+       cpu_stop_queue_work(cpu, &work);
        wait_for_completion(&done.completion);
        return done.executed ? done.ret : -ENOENT;
 }
@@ -130,7 +132,7 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
                        struct cpu_stop_work *work_buf)
 {
        *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
-       cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
+       cpu_stop_queue_work(cpu, work_buf);
 }
 
 /* static data for stop_cpus */
@@ -159,8 +161,7 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
         */
        preempt_disable();
        for_each_cpu(cpu, cpumask)
-               cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
-                                   &per_cpu(stop_cpus_work, cpu));
+               cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
        preempt_enable();
 }
 
@@ -304,12 +305,11 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
 {
        unsigned int cpu = (unsigned long)hcpu;
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-       struct task_struct *p;
+       struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_UP_PREPARE:
-               BUG_ON(stopper->thread || stopper->enabled ||
-                      !list_empty(&stopper->works));
+               BUG_ON(p || stopper->enabled || !list_empty(&stopper->works));
                p = kthread_create_on_node(cpu_stopper_thread,
                                           stopper,
                                           cpu_to_node(cpu),
@@ -319,12 +319,12 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
                get_task_struct(p);
                kthread_bind(p, cpu);
                sched_set_stop_task(cpu, p);
-               stopper->thread = p;
+               per_cpu(cpu_stopper_task, cpu) = p;
                break;
 
        case CPU_ONLINE:
                /* strictly unnecessary, as first user will wake it */
-               wake_up_process(stopper->thread);
+               wake_up_process(p);
                /* mark enabled */
                spin_lock_irq(&stopper->lock);
                stopper->enabled = true;
@@ -339,7 +339,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
 
                sched_set_stop_task(cpu, NULL);
                /* kill the stopper */
-               kthread_stop(stopper->thread);
+               kthread_stop(p);
                /* drain remaining works */
                spin_lock_irq(&stopper->lock);
                list_for_each_entry(work, &stopper->works, list)
@@ -347,8 +347,8 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
                stopper->enabled = false;
                spin_unlock_irq(&stopper->lock);
                /* release the stopper */
-               put_task_struct(stopper->thread);
-               stopper->thread = NULL;
+               put_task_struct(p);
+               per_cpu(cpu_stopper_task, cpu) = NULL;
                break;
        }
 #endif