From ad65782fba507d91a0a98f519b59e79cac1b474c Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 10 Jul 2013 02:44:35 +0200 Subject: [PATCH] context_tracking: Optimize main APIs off case with static key Optimize user and exception entry/exit APIs with static keys. This minimize the overhead for those who enable CONFIG_NO_HZ_FULL without always using it. Having no range passed to nohz_full= should result in the probes to be nopped (at least we hope so...). If this proves not be enough in the long term, we'll need to bring an exception slow path by re-routing the exception handlers. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Paul E. McKenney Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Borislav Petkov Cc: Li Zhong Cc: Mike Galbraith Cc: Kevin Hilman --- include/linux/context_tracking.h | 27 ++++++++++++++++++++++----- kernel/context_tracking.c | 12 ++++++------ 2 files changed, 28 insertions(+), 11 deletions(-) diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index c138c24bad1a..38ab60b3f3a6 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -38,23 +38,40 @@ static inline bool context_tracking_active(void) extern void context_tracking_cpu_set(int cpu); -extern void user_enter(void); -extern void user_exit(void); +extern void context_tracking_user_enter(void); +extern void context_tracking_user_exit(void); + +static inline void user_enter(void) +{ + if (static_key_false(&context_tracking_enabled)) + context_tracking_user_enter(); + +} +static inline void user_exit(void) +{ + if (static_key_false(&context_tracking_enabled)) + context_tracking_user_exit(); +} static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; + if (!static_key_false(&context_tracking_enabled)) + return 0; + prev_ctx = this_cpu_read(context_tracking.state); - user_exit(); + context_tracking_user_exit(); return prev_ctx; } static inline void exception_exit(enum ctx_state prev_ctx) { - if (prev_ctx == IN_USER) - user_enter(); + if (static_key_false(&context_tracking_enabled)) { + if (prev_ctx == IN_USER) + context_tracking_user_enter(); + } } extern void context_tracking_task_switch(struct task_struct *prev, diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 839d377d0da5..6e89e094c80e 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -33,15 +33,15 @@ void context_tracking_cpu_set(int cpu) } /** - * user_enter - Inform the context tracking that the CPU is going to - * enter userspace mode. + * context_tracking_user_enter - Inform the context tracking that the CPU is going to + * enter userspace mode. * * This function must be called right before we switch from the kernel * to userspace, when it's guaranteed the remaining kernel instructions * to execute won't use any RCU read side critical section because this * function sets RCU in extended quiescent state. */ -void user_enter(void) +void context_tracking_user_enter(void) { unsigned long flags; @@ -131,8 +131,8 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context); #endif /* CONFIG_PREEMPT */ /** - * user_exit - Inform the context tracking that the CPU is - * exiting userspace mode and entering the kernel. + * context_tracking_user_exit - Inform the context tracking that the CPU is + * exiting userspace mode and entering the kernel. * * This function must be called after we entered the kernel from userspace * before any use of RCU read side critical section. This potentially include @@ -141,7 +141,7 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context); * This call supports re-entrancy. This way it can be called from any exception * handler without needing to know if we came from userspace or not. */ -void user_exit(void) +void context_tracking_user_exit(void) { unsigned long flags; -- 2.20.1