x86: Merge the x86_32 and x86_64 cpu_idle() functions
authorRichard Weinberger <richard@nod.at>
Sun, 25 Mar 2012 21:00:04 +0000 (23:00 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 26 Mar 2012 01:16:07 +0000 (03:16 +0200)
Both functions are mostly identical.
The differences are:

- x86_32's cpu_idle() makes use of check_pgt_cache(), which is a
  nop on both x86_32 and x86_64.

- x86_64's cpu_idle() uses enter/__exit_idle/(), on x86_32 these
  function are a nop.

- In contrast to x86_32, x86_64 calls rcu_idle_enter/exit() in
  the innermost loop because idle notifications need RCU.
  Calling these function on x86_32 also in the innermost loop
  does not hurt.

So we can merge both functions.

Signed-off-by: Richard Weinberger <richard@nod.at>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: paulmck@linux.vnet.ibm.com
Cc: josh@joshtriplett.org
Cc: tj@kernel.org
Link: http://lkml.kernel.org/r/1332709204-22496-1-git-send-email-richard@nod.at
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/idle.h
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c

index f49253d75710e90bd31a86956d941b0cfb289940..c5d1785373ed38c25f1096467d103cd6440c1b66 100644 (file)
@@ -14,6 +14,7 @@ void exit_idle(void);
 #else /* !CONFIG_X86_64 */
 static inline void enter_idle(void) { }
 static inline void exit_idle(void) { }
+static inline void __exit_idle(void) { }
 #endif /* CONFIG_X86_64 */
 
 void amd_e400_remove_cpu(int cpu);
index 14baf78d5a1fd86b969941550f83024ed7c77246..29309c42b9e5743b81d30bb2dd946b1ce32dfec6 100644 (file)
@@ -12,6 +12,9 @@
 #include <linux/user-return-notifier.h>
 #include <linux/dmi.h>
 #include <linux/utsname.h>
+#include <linux/stackprotector.h>
+#include <linux/tick.h>
+#include <linux/cpuidle.h>
 #include <trace/events/power.h>
 #include <linux/hw_breakpoint.h>
 #include <asm/cpu.h>
 #include <asm/i387.h>
 #include <asm/fpu-internal.h>
 #include <asm/debugreg.h>
+#include <asm/nmi.h>
+
+#ifdef CONFIG_X86_64
+static DEFINE_PER_CPU(unsigned char, is_idle);
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+       atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+       atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+#endif
 
 struct kmem_cache *task_xstate_cachep;
 EXPORT_SYMBOL_GPL(task_xstate_cachep);
@@ -371,6 +392,99 @@ static inline int hlt_use_halt(void)
 }
 #endif
 
+#ifndef CONFIG_SMP
+static inline void play_dead(void)
+{
+       BUG();
+}
+#endif
+
+#ifdef CONFIG_X86_64
+void enter_idle(void)
+{
+       percpu_write(is_idle, 1);
+       atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+}
+
+static void __exit_idle(void)
+{
+       if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
+               return;
+       atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+}
+
+/* Called from interrupts to signify idle end */
+void exit_idle(void)
+{
+       /* idle loop has pid 0 */
+       if (current->pid)
+               return;
+       __exit_idle();
+}
+#endif
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle(void)
+{
+       /*
+        * If we're the non-boot CPU, nothing set the stack canary up
+        * for us.  CPU0 already has it initialized but no harm in
+        * doing it again.  This is a good place for updating it, as
+        * we wont ever return from this function (so the invalid
+        * canaries already on the stack wont ever trigger).
+        */
+       boot_init_stack_canary();
+       current_thread_info()->status |= TS_POLLING;
+
+       while (1) {
+               tick_nohz_idle_enter();
+
+               while (!need_resched()) {
+                       rmb();
+
+                       if (cpu_is_offline(smp_processor_id()))
+                               play_dead();
+
+                       /*
+                        * Idle routines should keep interrupts disabled
+                        * from here on, until they go to idle.
+                        * Otherwise, idle callbacks can misfire.
+                        */
+                       local_touch_nmi();
+                       local_irq_disable();
+
+                       enter_idle();
+
+                       /* Don't trace irqs off for idle */
+                       stop_critical_timings();
+
+                       /* enter_idle() needs rcu for notifiers */
+                       rcu_idle_enter();
+
+                       if (cpuidle_idle_call())
+                               pm_idle();
+
+                       rcu_idle_exit();
+                       start_critical_timings();
+
+                       /* In many cases the interrupt that ended idle
+                          has already called exit_idle. But some idle
+                          loops can be woken up without interrupt. */
+                       __exit_idle();
+               }
+
+               tick_nohz_idle_exit();
+               preempt_enable_no_resched();
+               schedule();
+               preempt_disable();
+       }
+}
+
 /*
  * We use this if we don't have any better
  * idle routine..
index 9d7d4842bfafee620bbead3aac128594bbe7f912..ea207c245aa49689bbd2742c1c7f3ccfed2daa86 100644 (file)
@@ -9,7 +9,6 @@
  * This file handles the architecture-dependent parts of process handling..
  */
 
-#include <linux/stackprotector.h>
 #include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/kallsyms.h>
 #include <linux/ptrace.h>
 #include <linux/personality.h>
-#include <linux/tick.h>
 #include <linux/percpu.h>
 #include <linux/prctl.h>
 #include <linux/ftrace.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/kdebug.h>
-#include <linux/cpuidle.h>
 
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -58,7 +55,6 @@
 #include <asm/idle.h>
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
-#include <asm/nmi.h>
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
@@ -70,60 +66,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
        return ((unsigned long *)tsk->thread.sp)[3];
 }
 
-#ifndef CONFIG_SMP
-static inline void play_dead(void)
-{
-       BUG();
-}
-#endif
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
-{
-       int cpu = smp_processor_id();
-
-       /*
-        * If we're the non-boot CPU, nothing set the stack canary up
-        * for us.  CPU0 already has it initialized but no harm in
-        * doing it again.  This is a good place for updating it, as
-        * we wont ever return from this function (so the invalid
-        * canaries already on the stack wont ever trigger).
-        */
-       boot_init_stack_canary();
-
-       current_thread_info()->status |= TS_POLLING;
-
-       /* endless idle loop with no priority at all */
-       while (1) {
-               tick_nohz_idle_enter();
-               rcu_idle_enter();
-               while (!need_resched()) {
-
-                       check_pgt_cache();
-                       rmb();
-
-                       if (cpu_is_offline(cpu))
-                               play_dead();
-
-                       local_touch_nmi();
-                       local_irq_disable();
-                       /* Don't trace irqs off for idle */
-                       stop_critical_timings();
-                       if (cpuidle_idle_call())
-                               pm_idle();
-                       start_critical_timings();
-               }
-               rcu_idle_exit();
-               tick_nohz_idle_exit();
-               schedule_preempt_disabled();
-       }
-}
-
 void __show_regs(struct pt_regs *regs, int all)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
index 292da13fc5aa85867522a5004475cfa9c594e56a..ce5e34f2becac3551e077d297cfa6fa21f01caa0 100644 (file)
@@ -14,7 +14,6 @@
  * This file handles the architecture-dependent parts of process handling..
  */
 
-#include <linux/stackprotector.h>
 #include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/notifier.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
-#include <linux/tick.h>
 #include <linux/prctl.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/ftrace.h>
-#include <linux/cpuidle.h>
 
 #include <asm/pgtable.h>
 #include <asm/system.h>
 #include <asm/idle.h>
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
-#include <asm/nmi.h>
 
 asmlinkage extern void ret_from_fork(void);
 
 DEFINE_PER_CPU(unsigned long, old_rsp);
-static DEFINE_PER_CPU(unsigned char, is_idle);
-
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
-       atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
-       atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
-
-void enter_idle(void)
-{
-       percpu_write(is_idle, 1);
-       atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
-}
-
-static void __exit_idle(void)
-{
-       if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
-               return;
-       atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
-}
-
-/* Called from interrupts to signify idle end */
-void exit_idle(void)
-{
-       /* idle loop has pid 0 */
-       if (current->pid)
-               return;
-       __exit_idle();
-}
-
-#ifndef CONFIG_SMP
-static inline void play_dead(void)
-{
-       BUG();
-}
-#endif
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
-{
-       current_thread_info()->status |= TS_POLLING;
-
-       /*
-        * If we're the non-boot CPU, nothing set the stack canary up
-        * for us.  CPU0 already has it initialized but no harm in
-        * doing it again.  This is a good place for updating it, as
-        * we wont ever return from this function (so the invalid
-        * canaries already on the stack wont ever trigger).
-        */
-       boot_init_stack_canary();
-
-       /* endless idle loop with no priority at all */
-       while (1) {
-               tick_nohz_idle_enter();
-               while (!need_resched()) {
-
-                       rmb();
-
-                       if (cpu_is_offline(smp_processor_id()))
-                               play_dead();
-                       /*
-                        * Idle routines should keep interrupts disabled
-                        * from here on, until they go to idle.
-                        * Otherwise, idle callbacks can misfire.
-                        */
-                       local_touch_nmi();
-                       local_irq_disable();
-                       enter_idle();
-                       /* Don't trace irqs off for idle */
-                       stop_critical_timings();
-
-                       /* enter_idle() needs rcu for notifiers */
-                       rcu_idle_enter();
-
-                       if (cpuidle_idle_call())
-                               pm_idle();
-
-                       rcu_idle_exit();
-                       start_critical_timings();
-
-                       /* In many cases the interrupt that ended idle
-                          has already called exit_idle. But some idle
-                          loops can be woken up without interrupt. */
-                       __exit_idle();
-               }
-
-               tick_nohz_idle_exit();
-               schedule_preempt_disabled();
-       }
-}
 
 /* Prints also some state that isn't saved in the pt_regs */
 void __show_regs(struct pt_regs *regs, int all)