ia64: Use generic idle thread allocation
authorThomas Gleixner <tglx@linutronix.de>
Fri, 20 Apr 2012 13:05:49 +0000 (13:05 +0000)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 26 Apr 2012 10:06:10 +0000 (12:06 +0200)
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Tony Luck <tony.luck@intel.com>
Link: http://lkml.kernel.org/r/20120420124557.380965133@linutronix.de
arch/ia64/Kconfig
arch/ia64/kernel/smpboot.c

index bd7266903bf8f497b4ddfd416df6091335d17039..11975475516a6e3caaf7fd8dd2ee674300f43b39 100644 (file)
@@ -33,6 +33,7 @@ config IA64
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select GENERIC_IOMAP
+       select GENERIC_SMP_IDLE_THREAD
        default y
        help
          The Itanium Processor Family is Intel's 64-bit successor to
index 03e4ef3893c93dd7a242f02f9d732ebf0144bcf8..1113b8aba07f5a0caa632ff01dc8111899e49185 100644 (file)
 #define bsp_remove_ok  0
 #endif
 
-/*
- * Store all idle threads, this can be reused instead of creating
- * a new thread. Also avoids complicated thread destroy functionality
- * for idle threads.
- */
-struct task_struct *idle_thread_array[NR_CPUS];
-
 /*
  * Global array allocated for NR_CPUS at boot time
  */
@@ -94,13 +87,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
 
 #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
 
-#define get_idle_for_cpu(x)            (idle_thread_array[(x)])
-#define set_idle_for_cpu(x,p)  (idle_thread_array[(x)] = (p))
-
 #else
-
-#define get_idle_for_cpu(x)            (NULL)
-#define set_idle_for_cpu(x,p)
 #define set_brendez_area(x)
 #endif
 
@@ -480,54 +467,12 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
        return NULL;
 }
 
-struct create_idle {
-       struct work_struct work;
-       struct task_struct *idle;
-       struct completion done;
-       int cpu;
-};
-
-void __cpuinit
-do_fork_idle(struct work_struct *work)
-{
-       struct create_idle *c_idle =
-               container_of(work, struct create_idle, work);
-
-       c_idle->idle = fork_idle(c_idle->cpu);
-       complete(&c_idle->done);
-}
-
 static int __cpuinit
-do_boot_cpu (int sapicid, int cpu)
+do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
 {
        int timeout;
-       struct create_idle c_idle = {
-               .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
-               .cpu    = cpu,
-               .done   = COMPLETION_INITIALIZER(c_idle.done),
-       };
-
-       /*
-        * We can't use kernel_thread since we must avoid to
-        * reschedule the child.
-        */
-       c_idle.idle = get_idle_for_cpu(cpu);
-       if (c_idle.idle) {
-               init_idle(c_idle.idle, cpu);
-               goto do_rest;
-       }
-
-       schedule_work(&c_idle.work);
-       wait_for_completion(&c_idle.done);
-
-       if (IS_ERR(c_idle.idle))
-               panic("failed fork for CPU %d", cpu);
-
-       set_idle_for_cpu(cpu, c_idle.idle);
-
-do_rest:
-       task_for_booting_cpu = c_idle.idle;
 
+       task_for_booting_cpu = idle;
        Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
 
        set_brendez_area(cpu);
@@ -811,7 +756,7 @@ __cpu_up(unsigned int cpu, struct task_struct *tidle)
 
        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
        /* Processor goes to start_secondary(), sets online flag */
-       ret = do_boot_cpu(sapicid, cpu);
+       ret = do_boot_cpu(sapicid, cpu, tidle);
        if (ret < 0)
                return ret;