Commit | Line | Data |
---|---|---|
a1a04ec3 TG |
1 | /* |
2 | * Generic entry point for the idle threads | |
3 | */ | |
4 | #include <linux/sched.h> | |
5 | #include <linux/cpu.h> | |
d1669912 TG |
6 | #include <linux/tick.h> |
7 | #include <linux/mm.h> | |
a1a04ec3 | 8 | |
d1669912 TG |
9 | #include <asm/tlb.h> |
10 | ||
11 | #include <trace/events/power.h> | |
12 | ||
d1669912 TG |
13 | static int __read_mostly cpu_idle_force_poll; |
14 | ||
15 | void cpu_idle_poll_ctrl(bool enable) | |
16 | { | |
17 | if (enable) { | |
18 | cpu_idle_force_poll++; | |
19 | } else { | |
20 | cpu_idle_force_poll--; | |
21 | WARN_ON_ONCE(cpu_idle_force_poll < 0); | |
22 | } | |
23 | } | |
24 | ||
25 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP | |
26 | static int __init cpu_idle_poll_setup(char *__unused) | |
27 | { | |
28 | cpu_idle_force_poll = 1; | |
29 | return 1; | |
30 | } | |
31 | __setup("nohlt", cpu_idle_poll_setup); | |
32 | ||
33 | static int __init cpu_idle_nopoll_setup(char *__unused) | |
34 | { | |
35 | cpu_idle_force_poll = 0; | |
36 | return 1; | |
37 | } | |
38 | __setup("hlt", cpu_idle_nopoll_setup); | |
39 | #endif | |
40 | ||
41 | static inline int cpu_idle_poll(void) | |
42 | { | |
b47430d3 | 43 | rcu_idle_enter(); |
d1669912 TG |
44 | trace_cpu_idle_rcuidle(0, smp_processor_id()); |
45 | local_irq_enable(); | |
46 | while (!need_resched()) | |
47 | cpu_relax(); | |
48 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | |
b47430d3 | 49 | rcu_idle_exit(); |
d1669912 TG |
50 | return 1; |
51 | } | |
52 | ||
53 | /* Weak implementations for optional arch specific functions */ | |
54 | void __weak arch_cpu_idle_prepare(void) { } | |
55 | void __weak arch_cpu_idle_enter(void) { } | |
56 | void __weak arch_cpu_idle_exit(void) { } | |
57 | void __weak arch_cpu_idle_dead(void) { } | |
58 | void __weak arch_cpu_idle(void) | |
59 | { | |
60 | cpu_idle_force_poll = 1; | |
61 | } | |
62 | ||
63 | /* | |
64 | * Generic idle loop implementation | |
65 | */ | |
66 | static void cpu_idle_loop(void) | |
67 | { | |
68 | while (1) { | |
69 | tick_nohz_idle_enter(); | |
70 | ||
71 | while (!need_resched()) { | |
72 | check_pgt_cache(); | |
73 | rmb(); | |
74 | ||
75 | if (cpu_is_offline(smp_processor_id())) | |
76 | arch_cpu_idle_dead(); | |
77 | ||
78 | local_irq_disable(); | |
79 | arch_cpu_idle_enter(); | |
80 | ||
ab86e974 LT |
81 | /* |
82 | * In poll mode we reenable interrupts and spin. | |
83 | * | |
84 | * Also if we detected in the wakeup from idle | |
85 | * path that the tick broadcast device expired | |
86 | * for us, we don't want to go deep idle as we | |
87 | * know that the IPI is going to arrive right | |
88 | * away | |
89 | */ | |
90 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { | |
d1669912 TG |
91 | cpu_idle_poll(); |
92 | } else { | |
93 | current_clr_polling(); | |
94 | if (!need_resched()) { | |
95 | stop_critical_timings(); | |
96 | rcu_idle_enter(); | |
97 | arch_cpu_idle(); | |
98 | WARN_ON_ONCE(irqs_disabled()); | |
99 | rcu_idle_exit(); | |
100 | start_critical_timings(); | |
101 | } else { | |
102 | local_irq_enable(); | |
103 | } | |
104 | current_set_polling(); | |
105 | } | |
106 | arch_cpu_idle_exit(); | |
107 | } | |
108 | tick_nohz_idle_exit(); | |
109 | schedule_preempt_disabled(); | |
110 | } | |
111 | } | |
112 | ||
113 | void cpu_startup_entry(enum cpuhp_state state) | |
114 | { | |
115 | current_set_polling(); | |
116 | arch_cpu_idle_prepare(); | |
117 | cpu_idle_loop(); | |
118 | } |