sched/headers: Move idle polling methods to <linux/sched/idle.h>
authorIngo Molnar <mingo@kernel.org>
Wed, 1 Feb 2017 17:47:28 +0000 (18:47 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 2 Mar 2017 07:42:41 +0000 (08:42 +0100)
Further reduce the size of <linux/sched.h> by moving these APIs:

tsk_is_polling()
__current_set_polling()
current_set_polling_and_test()
__current_clr_polling()
current_clr_polling_and_test()
current_clr_polling()

Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/sched.h
include/linux/sched/idle.h

index c30705c3e5532121d024591cd4b9c6b27eecdfaf..4f512e337584c21fc193206fc3a25eb2c13ced3f 100644 (file)
@@ -3176,82 +3176,6 @@ static inline int spin_needbreak(spinlock_t *lock)
 #endif
 }
 
-/*
- * Idle thread specific functions to determine the need_resched
- * polling state.
- */
-#ifdef TIF_POLLING_NRFLAG
-static inline int tsk_is_polling(struct task_struct *p)
-{
-       return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
-}
-
-static inline void __current_set_polling(void)
-{
-       set_thread_flag(TIF_POLLING_NRFLAG);
-}
-
-static inline bool __must_check current_set_polling_and_test(void)
-{
-       __current_set_polling();
-
-       /*
-        * Polling state must be visible before we test NEED_RESCHED,
-        * paired by resched_curr()
-        */
-       smp_mb__after_atomic();
-
-       return unlikely(tif_need_resched());
-}
-
-static inline void __current_clr_polling(void)
-{
-       clear_thread_flag(TIF_POLLING_NRFLAG);
-}
-
-static inline bool __must_check current_clr_polling_and_test(void)
-{
-       __current_clr_polling();
-
-       /*
-        * Polling state must be visible before we test NEED_RESCHED,
-        * paired by resched_curr()
-        */
-       smp_mb__after_atomic();
-
-       return unlikely(tif_need_resched());
-}
-
-#else
-static inline int tsk_is_polling(struct task_struct *p) { return 0; }
-static inline void __current_set_polling(void) { }
-static inline void __current_clr_polling(void) { }
-
-static inline bool __must_check current_set_polling_and_test(void)
-{
-       return unlikely(tif_need_resched());
-}
-static inline bool __must_check current_clr_polling_and_test(void)
-{
-       return unlikely(tif_need_resched());
-}
-#endif
-
-static inline void current_clr_polling(void)
-{
-       __current_clr_polling();
-
-       /*
-        * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
-        * Once the bit is cleared, we'll get IPIs with every new
-        * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
-        * fold.
-        */
-       smp_mb(); /* paired with resched_curr() */
-
-       preempt_fold_need_resched();
-}
-
 static __always_inline bool need_resched(void)
 {
        return unlikely(tif_need_resched());
index 72b6b6ab6354734d9056fcdde15404c15a04f5c3..66ee32f87f0b31f551c7944358861f265a18d3f8 100644 (file)
@@ -12,4 +12,80 @@ enum cpu_idle_type {
 
 extern void wake_up_if_idle(int cpu);
 
+/*
+ * Idle thread specific functions to determine the need_resched
+ * polling state.
+ */
+#ifdef TIF_POLLING_NRFLAG
+static inline int tsk_is_polling(struct task_struct *p)
+{
+       return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
+}
+
+static inline void __current_set_polling(void)
+{
+       set_thread_flag(TIF_POLLING_NRFLAG);
+}
+
+static inline bool __must_check current_set_polling_and_test(void)
+{
+       __current_set_polling();
+
+       /*
+        * Polling state must be visible before we test NEED_RESCHED,
+        * paired by resched_curr()
+        */
+       smp_mb__after_atomic();
+
+       return unlikely(tif_need_resched());
+}
+
+static inline void __current_clr_polling(void)
+{
+       clear_thread_flag(TIF_POLLING_NRFLAG);
+}
+
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+       __current_clr_polling();
+
+       /*
+        * Polling state must be visible before we test NEED_RESCHED,
+        * paired by resched_curr()
+        */
+       smp_mb__after_atomic();
+
+       return unlikely(tif_need_resched());
+}
+
+#else
+static inline int tsk_is_polling(struct task_struct *p) { return 0; }
+static inline void __current_set_polling(void) { }
+static inline void __current_clr_polling(void) { }
+
+static inline bool __must_check current_set_polling_and_test(void)
+{
+       return unlikely(tif_need_resched());
+}
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+       return unlikely(tif_need_resched());
+}
+#endif
+
+static inline void current_clr_polling(void)
+{
+       __current_clr_polling();
+
+       /*
+        * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
+        * Once the bit is cleared, we'll get IPIs with every new
+        * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
+        * fold.
+        */
+       smp_mb(); /* paired with resched_curr() */
+
+       preempt_fold_need_resched();
+}
+
 #endif /* _LINUX_SCHED_IDLE_H */