Commit | Line | Data |
---|---|---|
a1a04ec3 TG |
1 | /* |
2 | * Generic entry point for the idle threads | |
3 | */ | |
4 | #include <linux/sched.h> | |
5 | #include <linux/cpu.h> | |
d1669912 TG |
6 | #include <linux/tick.h> |
7 | #include <linux/mm.h> | |
6fa3eb70 S |
8 | #ifdef CONFIG_MT_LOAD_BALANCE_PROFILER |
9 | #include <mtlbprof/mtlbprof.h> | |
10 | #endif | |
d7880812 | 11 | #include <linux/stackprotector.h> |
a1a04ec3 | 12 | |
d1669912 TG |
13 | #include <asm/tlb.h> |
14 | ||
15 | #include <trace/events/power.h> | |
16 | ||
d1669912 TG |
17 | static int __read_mostly cpu_idle_force_poll; |
18 | ||
19 | void cpu_idle_poll_ctrl(bool enable) | |
20 | { | |
21 | if (enable) { | |
22 | cpu_idle_force_poll++; | |
23 | } else { | |
24 | cpu_idle_force_poll--; | |
25 | WARN_ON_ONCE(cpu_idle_force_poll < 0); | |
26 | } | |
27 | } | |
28 | ||
29 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP | |
30 | static int __init cpu_idle_poll_setup(char *__unused) | |
31 | { | |
32 | cpu_idle_force_poll = 1; | |
33 | return 1; | |
34 | } | |
35 | __setup("nohlt", cpu_idle_poll_setup); | |
36 | ||
37 | static int __init cpu_idle_nopoll_setup(char *__unused) | |
38 | { | |
39 | cpu_idle_force_poll = 0; | |
40 | return 1; | |
41 | } | |
42 | __setup("hlt", cpu_idle_nopoll_setup); | |
43 | #endif | |
44 | ||
45 | static inline int cpu_idle_poll(void) | |
46 | { | |
b47430d3 | 47 | rcu_idle_enter(); |
d1669912 TG |
48 | trace_cpu_idle_rcuidle(0, smp_processor_id()); |
49 | local_irq_enable(); | |
e895dad0 | 50 | while (!tif_need_resched()) |
d1669912 TG |
51 | cpu_relax(); |
52 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | |
b47430d3 | 53 | rcu_idle_exit(); |
d1669912 TG |
54 | return 1; |
55 | } | |
56 | ||
57 | /* Weak implementations for optional arch specific functions */ | |
58 | void __weak arch_cpu_idle_prepare(void) { } | |
59 | void __weak arch_cpu_idle_enter(void) { } | |
60 | void __weak arch_cpu_idle_exit(void) { } | |
61 | void __weak arch_cpu_idle_dead(void) { } | |
62 | void __weak arch_cpu_idle(void) | |
63 | { | |
64 | cpu_idle_force_poll = 1; | |
29ce3785 | 65 | local_irq_enable(); |
d1669912 TG |
66 | } |
67 | ||
68 | /* | |
69 | * Generic idle loop implementation | |
70 | */ | |
71 | static void cpu_idle_loop(void) | |
72 | { | |
6fa3eb70 S |
73 | #ifdef CONFIG_MT_LOAD_BALANCE_PROFILER |
74 | mt_lbprof_update_state(smp_processor_id(), MT_LBPROF_NO_TASK_STATE); | |
75 | #endif | |
76 | ||
d1669912 TG |
77 | while (1) { |
78 | tick_nohz_idle_enter(); | |
79 | ||
80 | while (!need_resched()) { | |
81 | check_pgt_cache(); | |
82 | rmb(); | |
83 | ||
84 | if (cpu_is_offline(smp_processor_id())) | |
85 | arch_cpu_idle_dead(); | |
86 | ||
6fa3eb70 S |
87 | #ifdef CONFIG_MT_LOAD_BALANCE_PROFILER |
88 | mt_lbprof_update_state(smp_processor_id(), MT_LBPROF_IDLE_STATE); | |
89 | #endif | |
90 | ||
d1669912 TG |
91 | local_irq_disable(); |
92 | arch_cpu_idle_enter(); | |
93 | ||
ab86e974 LT |
94 | /* |
95 | * In poll mode we reenable interrupts and spin. | |
96 | * | |
97 | * Also if we detected in the wakeup from idle | |
98 | * path that the tick broadcast device expired | |
99 | * for us, we don't want to go deep idle as we | |
100 | * know that the IPI is going to arrive right | |
101 | * away | |
102 | */ | |
103 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | |
d1669912 TG |
104 | cpu_idle_poll(); |
105 | } else { | |
e895dad0 | 106 | if (!current_clr_polling_and_test()) { |
d1669912 TG |
107 | stop_critical_timings(); |
108 | rcu_idle_enter(); | |
109 | arch_cpu_idle(); | |
110 | WARN_ON_ONCE(irqs_disabled()); | |
111 | rcu_idle_exit(); | |
112 | start_critical_timings(); | |
6fa3eb70 S |
113 | #ifdef CONFIG_MT_LOAD_BALANCE_PROFILER |
114 | mt_lbprof_update_state(smp_processor_id(), MT_LBPROF_NO_TASK_STATE); | |
115 | #endif | |
d1669912 TG |
116 | } else { |
117 | local_irq_enable(); | |
118 | } | |
e895dad0 | 119 | __current_set_polling(); |
d1669912 TG |
120 | } |
121 | arch_cpu_idle_exit(); | |
122 | } | |
123 | tick_nohz_idle_exit(); | |
124 | schedule_preempt_disabled(); | |
125 | } | |
126 | } | |
127 | ||
128 | void cpu_startup_entry(enum cpuhp_state state) | |
129 | { | |
d7880812 TG |
130 | /* |
131 | * This #ifdef needs to die, but it's too late in the cycle to | |
132 | * make this generic (arm and sh have never invoked the canary | |
133 | * init for the non boot cpus!). Will be fixed in 3.11 | |
134 | */ | |
135 | #ifdef CONFIG_X86 | |
136 | /* | |
137 | * If we're the non-boot CPU, nothing set the stack canary up | |
138 | * for us. The boot CPU already has it initialized but no harm | |
139 | * in doing it again. This is a good place for updating it, as | |
140 | * we wont ever return from this function (so the invalid | |
141 | * canaries already on the stack wont ever trigger). | |
142 | */ | |
143 | boot_init_stack_canary(); | |
144 | #endif | |
e895dad0 | 145 | __current_set_polling(); |
d1669912 TG |
146 | arch_cpu_idle_prepare(); |
147 | cpu_idle_loop(); | |
148 | } |