locking/atomic, arch/powerpc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}{,_rela...
authorPeter Zijlstra <peterz@infradead.org>
Sun, 17 Apr 2016 23:16:05 +0000 (01:16 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 16 Jun 2016 08:48:28 +0000 (10:48 +0200)
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Tested-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/powerpc/include/asm/atomic.h

index ae0751ef8788fd55f7c4d6b37a8fbb73d82d317f..f08d567e0ca4dfb75d560afa6b72473643f12c56 100644 (file)
@@ -78,21 +78,53 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)  \
        return t;                                                       \
 }
 
+#define ATOMIC_FETCH_OP_RELAXED(op, asm_op)                            \
+static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)      \
+{                                                                      \
+       int res, t;                                                     \
+                                                                       \
+       __asm__ __volatile__(                                           \
+"1:    lwarx   %0,0,%4         # atomic_fetch_" #op "_relaxed\n"       \
+       #asm_op " %1,%3,%0\n"                                           \
+       PPC405_ERR77(0, %4)                                             \
+"      stwcx.  %1,0,%4\n"                                              \
+"      bne-    1b\n"                                                   \
+       : "=&r" (res), "=&r" (t), "+m" (v->counter)                     \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc");                                                        \
+                                                                       \
+       return res;                                                     \
+}
+
 #define ATOMIC_OPS(op, asm_op)                                         \
        ATOMIC_OP(op, asm_op)                                           \
-       ATOMIC_OP_RETURN_RELAXED(op, asm_op)
+       ATOMIC_OP_RETURN_RELAXED(op, asm_op)                            \
+       ATOMIC_FETCH_OP_RELAXED(op, asm_op)
 
 ATOMIC_OPS(add, add)
 ATOMIC_OPS(sub, subf)
 
-ATOMIC_OP(and, and)
-ATOMIC_OP(or, or)
-ATOMIC_OP(xor, xor)
-
 #define atomic_add_return_relaxed atomic_add_return_relaxed
 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
 
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm_op)                                         \
+       ATOMIC_OP(op, asm_op)                                           \
+       ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, xor)
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP_RELAXED
 #undef ATOMIC_OP_RETURN_RELAXED
 #undef ATOMIC_OP
 
@@ -329,20 +361,53 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v)                     \
        return t;                                                       \
 }
 
+#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)                          \
+static inline long                                                     \
+atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)                   \
+{                                                                      \
+       long res, t;                                                    \
+                                                                       \
+       __asm__ __volatile__(                                           \
+"1:    ldarx   %0,0,%4         # atomic64_fetch_" #op "_relaxed\n"     \
+       #asm_op " %1,%3,%0\n"                                           \
+"      stdcx.  %1,0,%4\n"                                              \
+"      bne-    1b\n"                                                   \
+       : "=&r" (res), "=&r" (t), "+m" (v->counter)                     \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc");                                                        \
+                                                                       \
+       return res;                                                     \
+}
+
 #define ATOMIC64_OPS(op, asm_op)                                       \
        ATOMIC64_OP(op, asm_op)                                         \
-       ATOMIC64_OP_RETURN_RELAXED(op, asm_op)
+       ATOMIC64_OP_RETURN_RELAXED(op, asm_op)                          \
+       ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
 
 ATOMIC64_OPS(add, add)
 ATOMIC64_OPS(sub, subf)
-ATOMIC64_OP(and, and)
-ATOMIC64_OP(or, or)
-ATOMIC64_OP(xor, xor)
 
 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
 
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, asm_op)                                       \
+       ATOMIC64_OP(op, asm_op)                                         \
+       ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC64_OPS(and, and)
+ATOMIC64_OPS(or, or)
+ATOMIC64_OPS(xor, xor)
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+
 #undef ATOPIC64_OPS
+#undef ATOMIC64_FETCH_OP_RELAXED
 #undef ATOMIC64_OP_RETURN_RELAXED
 #undef ATOMIC64_OP