asoc: abox: check abox power domain status before resuming
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / kernel / context_tracking.c
CommitLineData
4eacdf18
FW
1/*
2 * Context tracking: Probe on high level context boundaries such as kernel
3 * and userspace. This includes syscalls and exceptions entry/exit.
4 *
5 * This is used by RCU to remove its dependency on the timer tick while a CPU
6 * runs in userspace.
7 *
8 * Started by Frederic Weisbecker:
9 *
10 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
11 *
12 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
13 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
14 *
15 */
16
91d1aa43
FW
17#include <linux/context_tracking.h>
18#include <linux/rcupdate.h>
19#include <linux/sched.h>
91d1aa43 20#include <linux/hardirq.h>
6a61671b 21#include <linux/export.h>
4cdf77a8 22#include <linux/kprobes.h>
91d1aa43 23
1b6a259a
FW
24#define CREATE_TRACE_POINTS
25#include <trace/events/context_tracking.h>
26
ed11a7f1 27DEFINE_STATIC_KEY_FALSE(context_tracking_enabled);
48d6a816 28EXPORT_SYMBOL_GPL(context_tracking_enabled);
65f382fd
FW
29
30DEFINE_PER_CPU(struct context_tracking, context_tracking);
48d6a816 31EXPORT_SYMBOL_GPL(context_tracking);
91d1aa43 32
aed5ed47
FW
33static bool context_tracking_recursion_enter(void)
34{
35 int recursion;
36
37 recursion = __this_cpu_inc_return(context_tracking.recursion);
38 if (recursion == 1)
39 return true;
40
41 WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion);
42 __this_cpu_dec(context_tracking.recursion);
43
44 return false;
45}
46
47static void context_tracking_recursion_exit(void)
48{
49 __this_cpu_dec(context_tracking.recursion);
50}
51
4eacdf18 52/**
3aab4f50
RR
53 * context_tracking_enter - Inform the context tracking that the CPU is going
54 * enter user or guest space mode.
4eacdf18
FW
55 *
56 * This function must be called right before we switch from the kernel
3aab4f50
RR
57 * to user or guest space, when it's guaranteed the remaining kernel
58 * instructions to execute won't use any RCU read side critical section
59 * because this function sets RCU in extended quiescent state.
4eacdf18 60 */
d0e536d8 61void __context_tracking_enter(enum ctx_state state)
91d1aa43 62{
4eacdf18 63 /* Kernel threads aren't supposed to go to userspace */
91d1aa43
FW
64 WARN_ON_ONCE(!current->mm);
65
aed5ed47 66 if (!context_tracking_recursion_enter())
d0e536d8 67 return;
aed5ed47 68
3aab4f50 69 if ( __this_cpu_read(context_tracking.state) != state) {
d65ec121
FW
70 if (__this_cpu_read(context_tracking.active)) {
71 /*
72 * At this stage, only low level arch entry code remains and
73 * then we'll run in userspace. We can assume there won't be
74 * any RCU read-side critical section until the next call to
75 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
76 * on the tick.
77 */
19fdd98b
RR
78 if (state == CONTEXT_USER) {
79 trace_user_enter(0);
80 vtime_user_enter(current);
81 }
d65ec121
FW
82 rcu_user_enter();
83 }
4eacdf18 84 /*
d65ec121
FW
85 * Even if context tracking is disabled on this CPU, because it's outside
86 * the full dynticks mask for example, we still have to keep track of the
87 * context transitions and states to prevent inconsistency on those of
88 * other CPUs.
89 * If a task triggers an exception in userspace, sleep on the exception
90 * handler and then migrate to another CPU, that new CPU must know where
91 * the exception returns by the time we call exception_exit().
92 * This information can only be provided by the previous CPU when it called
93 * exception_enter().
94 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
95 * is false because we know that CPU is not tickless.
4eacdf18 96 */
3aab4f50 97 __this_cpu_write(context_tracking.state, state);
91d1aa43 98 }
aed5ed47 99 context_tracking_recursion_exit();
d0e536d8
PB
100}
101NOKPROBE_SYMBOL(__context_tracking_enter);
102EXPORT_SYMBOL_GPL(__context_tracking_enter);
103
104void context_tracking_enter(enum ctx_state state)
105{
106 unsigned long flags;
107
108 /*
109 * Some contexts may involve an exception occuring in an irq,
110 * leading to that nesting:
111 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
112 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
113 * helpers are enough to protect RCU uses inside the exception. So
114 * just return immediately if we detect we are in an IRQ.
115 */
116 if (in_interrupt())
117 return;
118
119 local_irq_save(flags);
120 __context_tracking_enter(state);
91d1aa43
FW
121 local_irq_restore(flags);
122}
3aab4f50 123NOKPROBE_SYMBOL(context_tracking_enter);
efc1e2c9 124EXPORT_SYMBOL_GPL(context_tracking_enter);
3aab4f50
RR
125
126void context_tracking_user_enter(void)
127{
f70cd6b0 128 user_enter();
3aab4f50 129}
4cdf77a8 130NOKPROBE_SYMBOL(context_tracking_user_enter);
91d1aa43 131
4eacdf18 132/**
3aab4f50
RR
133 * context_tracking_exit - Inform the context tracking that the CPU is
134 * exiting user or guest mode and entering the kernel.
4eacdf18 135 *
3aab4f50
RR
136 * This function must be called after we entered the kernel from user or
137 * guest space before any use of RCU read side critical section. This
138 * potentially include any high level kernel code like syscalls, exceptions,
139 * signal handling, etc...
4eacdf18
FW
140 *
141 * This call supports re-entrancy. This way it can be called from any exception
142 * handler without needing to know if we came from userspace or not.
143 */
d0e536d8 144void __context_tracking_exit(enum ctx_state state)
91d1aa43 145{
aed5ed47 146 if (!context_tracking_recursion_enter())
d0e536d8 147 return;
aed5ed47 148
3aab4f50 149 if (__this_cpu_read(context_tracking.state) == state) {
d65ec121
FW
150 if (__this_cpu_read(context_tracking.active)) {
151 /*
152 * We are going to run code that may use RCU. Inform
153 * RCU core about that (ie: we may need the tick again).
154 */
155 rcu_user_exit();
19fdd98b
RR
156 if (state == CONTEXT_USER) {
157 vtime_user_exit(current);
158 trace_user_exit(0);
159 }
d65ec121 160 }
c467ea76 161 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
91d1aa43 162 }
aed5ed47 163 context_tracking_recursion_exit();
d0e536d8
PB
164}
165NOKPROBE_SYMBOL(__context_tracking_exit);
166EXPORT_SYMBOL_GPL(__context_tracking_exit);
167
168void context_tracking_exit(enum ctx_state state)
169{
170 unsigned long flags;
171
172 if (in_interrupt())
173 return;
174
175 local_irq_save(flags);
176 __context_tracking_exit(state);
91d1aa43
FW
177 local_irq_restore(flags);
178}
3aab4f50 179NOKPROBE_SYMBOL(context_tracking_exit);
efc1e2c9 180EXPORT_SYMBOL_GPL(context_tracking_exit);
3aab4f50
RR
181
182void context_tracking_user_exit(void)
183{
f70cd6b0 184 user_exit();
3aab4f50 185}
4cdf77a8 186NOKPROBE_SYMBOL(context_tracking_user_exit);
91d1aa43 187
fafe870f 188void __init context_tracking_cpu_set(int cpu)
91d1aa43 189{
fafe870f
FW
190 static __initdata bool initialized = false;
191
192 if (!per_cpu(context_tracking.active, cpu)) {
193 per_cpu(context_tracking.active, cpu) = true;
ed11a7f1 194 static_branch_inc(&context_tracking_enabled);
fafe870f
FW
195 }
196
197 if (initialized)
198 return;
199
200 /*
201 * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork
202 * This assumes that init is the only task at this early boot stage.
203 */
204 set_tsk_thread_flag(&init_task, TIF_NOHZ);
205 WARN_ON_ONCE(!tasklist_empty());
206
207 initialized = true;
91d1aa43 208}
65f382fd
FW
209
210#ifdef CONFIG_CONTEXT_TRACKING_FORCE
211void __init context_tracking_init(void)
212{
213 int cpu;
214
215 for_each_possible_cpu(cpu)
216 context_tracking_cpu_set(cpu);
217}
218#endif