locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
authorPeter Zijlstra <peterz@infradead.org>
Sun, 17 Apr 2016 23:16:03 +0000 (01:16 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 16 Jun 2016 08:48:31 +0000 (10:48 +0200)
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/atomic64_64.h

index 3e86742881984006555fa433d9988a2c4eaa7b4f..73b8463b89e910d6ea673e2709a23e2f070dcf0d 100644 (file)
@@ -171,6 +171,16 @@ static __always_inline int atomic_sub_return(int i, atomic_t *v)
 #define atomic_inc_return(v)  (atomic_add_return(1, v))
 #define atomic_dec_return(v)  (atomic_sub_return(1, v))
 
+static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+{
+       return xadd(&v->counter, i);
+}
+
+static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+{
+       return xadd(&v->counter, -i);
+}
+
 static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        return cmpxchg(&v->counter, old, new);
@@ -190,10 +200,31 @@ static inline void atomic_##op(int i, atomic_t *v)                        \
                        : "memory");                                    \
 }
 
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#define ATOMIC_FETCH_OP(op, c_op)                                      \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                \
+{                                                                      \
+       int old, val = atomic_read(v);                                  \
+       for (;;) {                                                      \
+               old = atomic_cmpxchg(v, val, val c_op i);               \
+               if (old == val)                                         \
+                       break;                                          \
+               val = old;                                              \
+       }                                                               \
+       return old;                                                     \
+}
+
+#define ATOMIC_OPS(op, c_op)                                           \
+       ATOMIC_OP(op)                                                   \
+       ATOMIC_FETCH_OP(op, c_op)
+
+#define atomic_fetch_or atomic_fetch_or
+
+ATOMIC_OPS(and, &)
+ATOMIC_OPS(or , |)
+ATOMIC_OPS(xor, ^)
 
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP
 
 /**
index a984111135b16e9c7c51d2cde681aa3b310a6a6b..71d7705fb303ba5238f4f812fdb071277addbce8 100644 (file)
@@ -320,10 +320,29 @@ static inline void atomic64_##op(long long i, atomic64_t *v)              \
                c = old;                                                \
 }
 
-ATOMIC64_OP(and, &)
-ATOMIC64_OP(or, |)
-ATOMIC64_OP(xor, ^)
+#define ATOMIC64_FETCH_OP(op, c_op)                                    \
+static inline long long atomic64_fetch_##op(long long i, atomic64_t *v)        \
+{                                                                      \
+       long long old, c = 0;                                           \
+       while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c)           \
+               c = old;                                                \
+       return old;                                                     \
+}
+
+ATOMIC64_FETCH_OP(add, +)
+
+#define atomic64_fetch_sub(i, v)       atomic64_fetch_add(-(i), (v))
+
+#define ATOMIC64_OPS(op, c_op)                                         \
+       ATOMIC64_OP(op, c_op)                                           \
+       ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &)
+ATOMIC64_OPS(or, |)
+ATOMIC64_OPS(xor, ^)
 
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP
 
 #endif /* _ASM_X86_ATOMIC64_32_H */
index 037351022f5483c99a3f0a42dd4646f1eb0a3055..70eed0e14553e69d7f4099e21fa94efe03900a36 100644 (file)
@@ -158,6 +158,16 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
        return atomic64_add_return(-i, v);
 }
 
+static inline long atomic64_fetch_add(long i, atomic64_t *v)
+{
+       return xadd(&v->counter, i);
+}
+
+static inline long atomic64_fetch_sub(long i, atomic64_t *v)
+{
+       return xadd(&v->counter, -i);
+}
+
 #define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
 #define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
 
@@ -229,10 +239,29 @@ static inline void atomic64_##op(long i, atomic64_t *v)                   \
                        : "memory");                                    \
 }
 
-ATOMIC64_OP(and)
-ATOMIC64_OP(or)
-ATOMIC64_OP(xor)
+#define ATOMIC64_FETCH_OP(op, c_op)                                    \
+static inline long atomic64_fetch_##op(long i, atomic64_t *v)          \
+{                                                                      \
+       long old, val = atomic64_read(v);                               \
+       for (;;) {                                                      \
+               old = atomic64_cmpxchg(v, val, val c_op i);             \
+               if (old == val)                                         \
+                       break;                                          \
+               val = old;                                              \
+       }                                                               \
+       return old;                                                     \
+}
+
+#define ATOMIC64_OPS(op, c_op)                                         \
+       ATOMIC64_OP(op)                                                 \
+       ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &)
+ATOMIC64_OPS(or, |)
+ATOMIC64_OPS(xor, ^)
 
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP
 
 #endif /* _ASM_X86_ATOMIC64_64_H */