Commit | Line | Data |
---|---|---|
cf37b6b4 NP |
1 | /* |
2 | * Generic entry point for the idle threads | |
3 | */ | |
4 | #include <linux/sched.h> | |
4c822698 | 5 | #include <linux/sched/idle.h> |
cf37b6b4 NP |
6 | #include <linux/cpu.h> |
7 | #include <linux/cpuidle.h> | |
8df3e07e | 8 | #include <linux/cpuhotplug.h> |
cf37b6b4 NP |
9 | #include <linux/tick.h> |
10 | #include <linux/mm.h> | |
11 | #include <linux/stackprotector.h> | |
38106313 | 12 | #include <linux/suspend.h> |
d83a7cb3 | 13 | #include <linux/livepatch.h> |
15ee6980 | 14 | #include <linux/cpu_pm.h> |
cf37b6b4 NP |
15 | |
16 | #include <asm/tlb.h> | |
17 | ||
18 | #include <trace/events/power.h> | |
19 | ||
e3baac47 PZ |
20 | #include "sched.h" |
21 | ||
6727ad9e CM |
22 | /* Linker adds these: start and end of __cpuidle functions */ |
23 | extern char __cpuidle_text_start[], __cpuidle_text_end[]; | |
24 | ||
faad3849 RW |
25 | /** |
26 | * sched_idle_set_state - Record idle state for the current CPU. | |
27 | * @idle_state: State to record. | |
28 | */ | |
a732c974 | 29 | void sched_idle_set_state(struct cpuidle_state *idle_state, int index) |
faad3849 RW |
30 | { |
31 | idle_set_state(this_rq(), idle_state); | |
a732c974 | 32 | idle_set_state_idx(this_rq(), index); |
faad3849 RW |
33 | } |
34 | ||
cf37b6b4 NP |
35 | static int __read_mostly cpu_idle_force_poll; |
36 | ||
37 | void cpu_idle_poll_ctrl(bool enable) | |
38 | { | |
39 | if (enable) { | |
40 | cpu_idle_force_poll++; | |
41 | } else { | |
42 | cpu_idle_force_poll--; | |
43 | WARN_ON_ONCE(cpu_idle_force_poll < 0); | |
44 | } | |
45 | } | |
46 | ||
47 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP | |
48 | static int __init cpu_idle_poll_setup(char *__unused) | |
49 | { | |
50 | cpu_idle_force_poll = 1; | |
51 | return 1; | |
52 | } | |
53 | __setup("nohlt", cpu_idle_poll_setup); | |
54 | ||
55 | static int __init cpu_idle_nopoll_setup(char *__unused) | |
56 | { | |
57 | cpu_idle_force_poll = 0; | |
58 | return 1; | |
59 | } | |
60 | __setup("hlt", cpu_idle_nopoll_setup); | |
61 | #endif | |
62 | ||
6727ad9e | 63 | static noinline int __cpuidle cpu_idle_poll(void) |
cf37b6b4 NP |
64 | { |
65 | rcu_idle_enter(); | |
66 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | |
67 | local_irq_enable(); | |
9babcd79 | 68 | stop_critical_timings(); |
ff6f2d29 PM |
69 | while (!tif_need_resched() && |
70 | (cpu_idle_force_poll || tick_check_broadcast_expired())) | |
cf37b6b4 | 71 | cpu_relax(); |
9babcd79 | 72 | start_critical_timings(); |
cf37b6b4 NP |
73 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
74 | rcu_idle_exit(); | |
75 | return 1; | |
76 | } | |
77 | ||
78 | /* Weak implementations for optional arch specific functions */ | |
79 | void __weak arch_cpu_idle_prepare(void) { } | |
80 | void __weak arch_cpu_idle_enter(void) { } | |
81 | void __weak arch_cpu_idle_exit(void) { } | |
82 | void __weak arch_cpu_idle_dead(void) { } | |
83 | void __weak arch_cpu_idle(void) | |
84 | { | |
85 | cpu_idle_force_poll = 1; | |
86 | local_irq_enable(); | |
87 | } | |
88 | ||
827a5aef RW |
89 | /** |
90 | * default_idle_call - Default CPU idle routine. | |
91 | * | |
92 | * To use when the cpuidle framework cannot be used. | |
93 | */ | |
6727ad9e | 94 | void __cpuidle default_idle_call(void) |
82f66327 | 95 | { |
63caae84 | 96 | if (current_clr_polling_and_test()) { |
82f66327 | 97 | local_irq_enable(); |
63caae84 LS |
98 | } else { |
99 | stop_critical_timings(); | |
82f66327 | 100 | arch_cpu_idle(); |
63caae84 LS |
101 | start_critical_timings(); |
102 | } | |
82f66327 RW |
103 | } |
104 | ||
bcf6ad8a RW |
105 | static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
106 | int next_state) | |
107 | { | |
bcf6ad8a RW |
108 | /* |
109 | * The idle task must be scheduled, it is pointless to go to idle, just | |
110 | * update no idle residency and return. | |
111 | */ | |
112 | if (current_clr_polling_and_test()) { | |
113 | dev->last_residency = 0; | |
114 | local_irq_enable(); | |
115 | return -EBUSY; | |
116 | } | |
117 | ||
bcf6ad8a RW |
118 | /* |
119 | * Enter the idle state previously returned by the governor decision. | |
120 | * This function will block until an interrupt occurs and will take | |
121 | * care of re-enabling the local interrupts | |
122 | */ | |
827a5aef | 123 | return cpuidle_enter(drv, dev, next_state); |
bcf6ad8a RW |
124 | } |
125 | ||
30cdd69e DL |
126 | /** |
127 | * cpuidle_idle_call - the main idle function | |
128 | * | |
129 | * NOTE: no locks or semaphores should be used here | |
82c65d60 AL |
130 | * |
131 | * On archs that support TIF_POLLING_NRFLAG, is called with polling | |
132 | * set, and it returns with polling set. If it ever stops polling, it | |
133 | * must clear the polling bit. | |
30cdd69e | 134 | */ |
08c373e5 | 135 | static void cpuidle_idle_call(void) |
30cdd69e | 136 | { |
9bd616e3 | 137 | struct cpuidle_device *dev = cpuidle_get_device(); |
30cdd69e | 138 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
37352273 | 139 | int next_state, entered_state; |
30cdd69e | 140 | |
a1d028bd DL |
141 | /* |
142 | * Check if the idle task must be rescheduled. If it is the | |
c444117f | 143 | * case, exit the function after re-enabling the local irq. |
a1d028bd | 144 | */ |
c444117f | 145 | if (need_resched()) { |
8ca3c642 | 146 | local_irq_enable(); |
08c373e5 | 147 | return; |
8ca3c642 DL |
148 | } |
149 | ||
a1d028bd | 150 | /* |
27e8616e RW |
151 | * The RCU framework needs to be told that we are entering an idle |
152 | * section, so no more rcu read side critical sections and one more | |
a1d028bd DL |
153 | * step to the grace period |
154 | */ | |
c8cc7d4d | 155 | |
82f66327 | 156 | if (cpuidle_not_available(drv, dev)) { |
27e8616e RW |
157 | tick_nohz_idle_stop_tick(); |
158 | rcu_idle_enter(); | |
159 | ||
82f66327 RW |
160 | default_idle_call(); |
161 | goto exit_idle; | |
162 | } | |
ef2b22ac | 163 | |
38106313 | 164 | /* |
f02f4f9d | 165 | * Suspend-to-idle ("s2idle") is a system state in which all user space |
38106313 RW |
166 | * has been frozen, all I/O devices have been suspended and the only |
167 | * activity happens here and in iterrupts (if any). In that case bypass | |
168 | * the cpuidle governor and go stratight for the deepest idle state | |
169 | * available. Possibly also suspend the local tick and the entire | |
170 | * timekeeping to prevent timer interrupts from kicking us out of idle | |
171 | * until a proper wakeup interrupt happens. | |
172 | */ | |
bb8313b6 | 173 | |
f02f4f9d RW |
174 | if (idle_should_enter_s2idle() || dev->use_deepest_state) { |
175 | if (idle_should_enter_s2idle()) { | |
27e8616e RW |
176 | rcu_idle_enter(); |
177 | ||
28ba086e | 178 | entered_state = cpuidle_enter_s2idle(drv, dev); |
bb8313b6 JP |
179 | if (entered_state > 0) { |
180 | local_irq_enable(); | |
181 | goto exit_idle; | |
182 | } | |
27e8616e RW |
183 | |
184 | rcu_idle_exit(); | |
ef2b22ac RW |
185 | } |
186 | ||
27e8616e RW |
187 | tick_nohz_idle_stop_tick(); |
188 | rcu_idle_enter(); | |
189 | ||
ef2b22ac | 190 | next_state = cpuidle_find_deepest_state(drv, dev); |
bcf6ad8a | 191 | call_cpuidle(drv, dev, next_state); |
ef2b22ac | 192 | } else { |
3a25735b RW |
193 | bool stop_tick = true; |
194 | ||
ef2b22ac RW |
195 | /* |
196 | * Ask the cpuidle framework to choose a convenient idle state. | |
197 | */ | |
3a25735b | 198 | next_state = cpuidle_select(drv, dev, &stop_tick); |
8c71f69f RW |
199 | |
200 | if (stop_tick) | |
201 | tick_nohz_idle_stop_tick(); | |
202 | else | |
203 | tick_nohz_idle_retain_tick(); | |
204 | ||
205 | rcu_idle_enter(); | |
206 | ||
bcf6ad8a RW |
207 | entered_state = call_cpuidle(drv, dev, next_state); |
208 | /* | |
209 | * Give the governor an opportunity to reflect on the outcome | |
210 | */ | |
ef2b22ac | 211 | cpuidle_reflect(dev, entered_state); |
bcf6ad8a | 212 | } |
37352273 PZ |
213 | |
214 | exit_idle: | |
8ca3c642 | 215 | __current_set_polling(); |
30cdd69e | 216 | |
a1d028bd | 217 | /* |
37352273 | 218 | * It is up to the idle functions to reenable local interrupts |
a1d028bd | 219 | */ |
c8cc7d4d DL |
220 | if (WARN_ON_ONCE(irqs_disabled())) |
221 | local_irq_enable(); | |
222 | ||
223 | rcu_idle_exit(); | |
30cdd69e | 224 | } |
30cdd69e | 225 | |
cf37b6b4 NP |
226 | /* |
227 | * Generic idle loop implementation | |
82c65d60 AL |
228 | * |
229 | * Called with polling cleared. | |
cf37b6b4 | 230 | */ |
c1de45ca | 231 | static void do_idle(void) |
cf37b6b4 | 232 | { |
c1de45ca PZ |
233 | /* |
234 | * If the arch has a polling bit, we maintain an invariant: | |
235 | * | |
236 | * Our polling bit is clear if we're not scheduled (i.e. if rq->curr != | |
237 | * rq->idle). This means that, if rq->idle has the polling bit set, | |
238 | * then setting need_resched is guaranteed to cause the CPU to | |
239 | * reschedule. | |
240 | */ | |
cf37b6b4 | 241 | |
c1de45ca | 242 | __current_set_polling(); |
15ee6980 | 243 | cpu_pm_enter_pre(); |
ebfa4c02 | 244 | quiet_vmstat(); |
c1de45ca | 245 | tick_nohz_idle_enter(); |
cf37b6b4 | 246 | |
c1de45ca PZ |
247 | while (!need_resched()) { |
248 | check_pgt_cache(); | |
249 | rmb(); | |
cf37b6b4 | 250 | |
c1de45ca | 251 | if (cpu_is_offline(smp_processor_id())) { |
f6d3093d | 252 | tick_nohz_idle_stop_tick_protected(); |
c1de45ca PZ |
253 | cpuhp_report_idle_dead(); |
254 | arch_cpu_idle_dead(); | |
cf37b6b4 | 255 | } |
06d50c65 | 256 | |
c1de45ca PZ |
257 | local_irq_disable(); |
258 | arch_cpu_idle_enter(); | |
82c65d60 AL |
259 | |
260 | /* | |
c1de45ca PZ |
261 | * In poll mode we reenable interrupts and spin. Also if we |
262 | * detected in the wakeup from idle path that the tick | |
263 | * broadcast device expired for us, we don't want to go deep | |
264 | * idle as we know that the IPI is going to arrive right away. | |
82c65d60 | 265 | */ |
f6d3093d RW |
266 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { |
267 | tick_nohz_idle_restart_tick(); | |
c1de45ca | 268 | cpu_idle_poll(); |
f6d3093d | 269 | } else { |
c1de45ca | 270 | cpuidle_idle_call(); |
f6d3093d | 271 | } |
c1de45ca | 272 | arch_cpu_idle_exit(); |
cf37b6b4 | 273 | } |
c1de45ca PZ |
274 | |
275 | /* | |
276 | * Since we fell out of the loop above, we know TIF_NEED_RESCHED must | |
277 | * be set, propagate it into PREEMPT_NEED_RESCHED. | |
278 | * | |
279 | * This is required because for polling idle loops we will not have had | |
280 | * an IPI to fold the state for us. | |
281 | */ | |
15ee6980 | 282 | cpu_pm_exit_post(); |
c1de45ca PZ |
283 | preempt_set_need_resched(); |
284 | tick_nohz_idle_exit(); | |
285 | __current_clr_polling(); | |
286 | ||
287 | /* | |
288 | * We promise to call sched_ttwu_pending() and reschedule if | |
289 | * need_resched() is set while polling is set. That means that clearing | |
290 | * polling needs to be visible before doing these things. | |
291 | */ | |
292 | smp_mb__after_atomic(); | |
293 | ||
294 | sched_ttwu_pending(); | |
8663effb | 295 | schedule_idle(); |
d83a7cb3 JP |
296 | |
297 | if (unlikely(klp_patch_pending(current))) | |
298 | klp_update_patch_state(current); | |
cf37b6b4 NP |
299 | } |
300 | ||
6727ad9e CM |
301 | bool cpu_in_idle(unsigned long pc) |
302 | { | |
303 | return pc >= (unsigned long)__cpuidle_text_start && | |
304 | pc < (unsigned long)__cpuidle_text_end; | |
305 | } | |
306 | ||
c1de45ca PZ |
307 | struct idle_timer { |
308 | struct hrtimer timer; | |
309 | int done; | |
310 | }; | |
311 | ||
312 | static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer) | |
313 | { | |
314 | struct idle_timer *it = container_of(timer, struct idle_timer, timer); | |
315 | ||
316 | WRITE_ONCE(it->done, 1); | |
317 | set_tsk_need_resched(current); | |
318 | ||
319 | return HRTIMER_NORESTART; | |
320 | } | |
321 | ||
322 | void play_idle(unsigned long duration_ms) | |
323 | { | |
324 | struct idle_timer it; | |
325 | ||
326 | /* | |
327 | * Only FIFO tasks can disable the tick since they don't need the forced | |
328 | * preemption. | |
329 | */ | |
330 | WARN_ON_ONCE(current->policy != SCHED_FIFO); | |
331 | WARN_ON_ONCE(current->nr_cpus_allowed != 1); | |
332 | WARN_ON_ONCE(!(current->flags & PF_KTHREAD)); | |
333 | WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); | |
334 | WARN_ON_ONCE(!duration_ms); | |
335 | ||
336 | rcu_sleep_check(); | |
337 | preempt_disable(); | |
338 | current->flags |= PF_IDLE; | |
339 | cpuidle_use_deepest_state(true); | |
340 | ||
341 | it.done = 0; | |
342 | hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
343 | it.timer.function = idle_inject_timer_fn; | |
344 | hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED); | |
345 | ||
346 | while (!READ_ONCE(it.done)) | |
347 | do_idle(); | |
348 | ||
349 | cpuidle_use_deepest_state(false); | |
350 | current->flags &= ~PF_IDLE; | |
351 | ||
352 | preempt_fold_need_resched(); | |
353 | preempt_enable(); | |
354 | } | |
355 | EXPORT_SYMBOL_GPL(play_idle); | |
356 | ||
cf37b6b4 NP |
357 | void cpu_startup_entry(enum cpuhp_state state) |
358 | { | |
359 | /* | |
360 | * This #ifdef needs to die, but it's too late in the cycle to | |
361 | * make this generic (arm and sh have never invoked the canary | |
362 | * init for the non boot cpus!). Will be fixed in 3.11 | |
363 | */ | |
364 | #ifdef CONFIG_X86 | |
365 | /* | |
366 | * If we're the non-boot CPU, nothing set the stack canary up | |
367 | * for us. The boot CPU already has it initialized but no harm | |
368 | * in doing it again. This is a good place for updating it, as | |
369 | * we wont ever return from this function (so the invalid | |
370 | * canaries already on the stack wont ever trigger). | |
371 | */ | |
372 | boot_init_stack_canary(); | |
373 | #endif | |
cf37b6b4 | 374 | arch_cpu_idle_prepare(); |
8df3e07e | 375 | cpuhp_online_idle(state); |
c1de45ca PZ |
376 | while (1) |
377 | do_idle(); | |
cf37b6b4 | 378 | } |