u64_stats: Introduce IRQs disabled helpers
authorFrederic Weisbecker <fweisbec@gmail.com>
Mon, 26 Sep 2016 00:29:19 +0000 (02:29 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 30 Sep 2016 09:46:40 +0000 (11:46 +0200)
Introduce light versions of u64_stats helpers for context where
either preempt or IRQs are disabled. This way we can make this library
usable by scheduler irqtime accounting which currenty implement its
ad-hoc version.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Link: http://lkml.kernel.org/r/1474849761-12678-4-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/u64_stats_sync.h

index d3a2bb712af3b9613b98ef9c3219f8dcd31568a5..650f3dd6b800f3efa9509a50776649fd048202d7 100644 (file)
@@ -103,31 +103,42 @@ static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
 #endif
 }
 
-static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
        return read_seqcount_begin(&syncp->seq);
 #else
-#if BITS_PER_LONG==32
-       preempt_disable();
-#endif
        return 0;
 #endif
 }
 
-static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
+       preempt_disable();
+#endif
+       return __u64_stats_fetch_begin(syncp);
+}
+
+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
                                         unsigned int start)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
        return read_seqcount_retry(&syncp->seq, start);
 #else
-#if BITS_PER_LONG==32
-       preempt_enable();
-#endif
        return false;
 #endif
 }
 
+static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+                                        unsigned int start)
+{
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
+       preempt_enable();
+#endif
+       return __u64_stats_fetch_retry(syncp, start);
+}
+
 /*
  * In case irq handlers can update u64 counters, readers can use following helpers
  * - SMP 32bit arches use seqcount protection, irq safe.
@@ -136,27 +147,19 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
  */
 static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
 {
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-       return read_seqcount_begin(&syncp->seq);
-#else
-#if BITS_PER_LONG==32
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
        local_irq_disable();
 #endif
-       return 0;
-#endif
+       return __u64_stats_fetch_begin(syncp);
 }
 
 static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
-                                        unsigned int start)
+                                            unsigned int start)
 {
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-       return read_seqcount_retry(&syncp->seq, start);
-#else
-#if BITS_PER_LONG==32
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
        local_irq_enable();
 #endif
-       return false;
-#endif
+       return __u64_stats_fetch_retry(syncp, start);
 }
 
 #endif /* _LINUX_U64_STATS_SYNC_H */