static inline void exception_exit(enum ctx_state prev_ctx)
{
if (context_tracking_is_enabled()) {
- if (prev_ctx == IN_USER)
+ if (prev_ctx == CONTEXT_USER)
context_tracking_user_enter();
}
}
*/
bool active;
enum ctx_state {
- IN_KERNEL = 0,
- IN_USER,
+ CONTEXT_KERNEL = 0,
+ CONTEXT_USER,
} state;
};
static inline bool context_tracking_in_user(void)
{
- return __this_cpu_read(context_tracking.state) == IN_USER;
+ return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
}
#else
static inline bool context_tracking_in_user(void) { return false; }
WARN_ON_ONCE(!current->mm);
local_irq_save(flags);
- if ( __this_cpu_read(context_tracking.state) != IN_USER) {
+ if ( __this_cpu_read(context_tracking.state) != CONTEXT_USER) {
if (__this_cpu_read(context_tracking.active)) {
trace_user_enter(0);
/*
* OTOH we can spare the calls to vtime and RCU when context_tracking.active
* is false because we know that CPU is not tickless.
*/
- __this_cpu_write(context_tracking.state, IN_USER);
+ __this_cpu_write(context_tracking.state, CONTEXT_USER);
}
local_irq_restore(flags);
}
return;
local_irq_save(flags);
- if (__this_cpu_read(context_tracking.state) == IN_USER) {
+ if (__this_cpu_read(context_tracking.state) == CONTEXT_USER) {
if (__this_cpu_read(context_tracking.active)) {
/*
* We are going to run code that may use RCU. Inform
vtime_user_exit(current);
trace_user_exit(0);
}
- __this_cpu_write(context_tracking.state, IN_KERNEL);
+ __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
}
local_irq_restore(flags);
}
* we find a better solution.
*
* NB: There are buggy callers of this function. Ideally we
- * should warn if prev_state != IN_USER, but that will trigger
+ * should warn if prev_state != CONTEXT_USER, but that will trigger
* too frequently to make sense yet.
*/
enum ctx_state prev_state = exception_enter();