sched/preempt: Rename PREEMPT_CHECK_OFFSET to PREEMPT_DISABLE_OFFSET
authorFrederic Weisbecker <fweisbec@gmail.com>
Tue, 12 May 2015 14:41:48 +0000 (16:41 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 06:39:12 +0000 (08:39 +0200)
"CHECK" suggests it's only used as a comparison mask. But now it's used
further as a config-conditional preempt disabler offset. Lets
disambiguate this name.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-4-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/preempt.h

index 37974cd4f0923e17a085425b1ef2fe56b137dca2..4689ef210a13c057a9c44880b1442437cdbd06af 100644 (file)
@@ -85,9 +85,9 @@
 #define in_nmi()       (preempt_count() & NMI_MASK)
 
 #if defined(CONFIG_PREEMPT_COUNT)
-# define PREEMPT_CHECK_OFFSET 1
+# define PREEMPT_DISABLE_OFFSET 1
 #else
-# define PREEMPT_CHECK_OFFSET 0
+# define PREEMPT_DISABLE_OFFSET 0
 #endif
 
 /*
  *
  * Work as expected.
  */
-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
 
 /*
  * Are we running in atomic context?  WARNING: this macro cannot
  * (used by the scheduler, *after* releasing the kernel lock)
  */
 #define in_atomic_preempt_off() \
-               ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
+               ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
 
 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
 extern void preempt_count_add(int val);