locking/atomic, arch/blackfin: Implement atomic_fetch_{add,sub,and,or,xor}()
authorPeter Zijlstra <peterz@infradead.org>
Sun, 17 Apr 2016 23:16:08 +0000 (01:16 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 16 Jun 2016 08:48:23 +0000 (10:48 +0200)
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Miao <realmz6@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: adi-buildroot-devel@lists.sourceforge.net
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/blackfin/include/asm/atomic.h
arch/blackfin/kernel/bfin_ksyms.c
arch/blackfin/mach-bf561/atomic.S

index 1c1c42330c99c9e432991429778625be6e5235df..63c7deceeeb6e8f69c5d77e1b518174401229767 100644 (file)
@@ -17,6 +17,7 @@
 
 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
 asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_xadd_asm(volatile int *ptr, int value);
 
 asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
 asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
@@ -28,10 +29,17 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
 #define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
 #define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
 
+#define atomic_fetch_add(i, v) __raw_atomic_xadd_asm(&(v)->counter, i)
+#define atomic_fetch_sub(i, v) __raw_atomic_xadd_asm(&(v)->counter, -(i))
+
 #define atomic_or(i, v)  (void)__raw_atomic_or_asm(&(v)->counter, i)
 #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
 #define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
 
+#define atomic_fetch_or(i, v)  __raw_atomic_or_asm(&(v)->counter, i)
+#define atomic_fetch_and(i, v) __raw_atomic_and_asm(&(v)->counter, i)
+#define atomic_fetch_xor(i, v) __raw_atomic_xor_asm(&(v)->counter, i)
+
 #endif
 
 #include <asm-generic/atomic.h>
index a401c27b69b4a3feb825059d299ce27be64ba191..68096e8f787f7c3c3644bdd702fbeea05cf9eab3 100644 (file)
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(insl_16);
 
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(__raw_atomic_add_asm);
+EXPORT_SYMBOL(__raw_atomic_xadd_asm);
 EXPORT_SYMBOL(__raw_atomic_and_asm);
 EXPORT_SYMBOL(__raw_atomic_or_asm);
 EXPORT_SYMBOL(__raw_atomic_xor_asm);
index 26fccb5568b97e4f4234c4e12a5f8cc8189bec56..1e2989c5d6b2bec2fb0ad4bdaacd5948b7a9b133 100644 (file)
@@ -605,6 +605,28 @@ ENTRY(___raw_atomic_add_asm)
        rts;
 ENDPROC(___raw_atomic_add_asm)
 
+/*
+ * r0 = ptr
+ * r1 = value
+ *
+ * ADD a signed value to a 32bit word and return the old value atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_atomic_xadd_asm)
+       p1 = r0;
+       r3 = r1;
+       [--sp] = rets;
+       call _get_core_lock;
+       r3 = [p1];
+       r2 = r3 + r2;
+       [p1] = r2;
+       r1 = p1;
+       call _put_core_lock;
+       r0 = r3;
+       rets = [sp++];
+       rts;
+ENDPROC(___raw_atomic_add_asm)
+
 /*
  * r0 = ptr
  * r1 = mask
@@ -618,10 +640,9 @@ ENTRY(___raw_atomic_and_asm)
        r3 = r1;
        [--sp] = rets;
        call _get_core_lock;
-       r2 = [p1];
-       r3 = r2 & r3;
-       [p1] = r3;
-       r3 = r2;
+       r3 = [p1];
+       r2 = r2 & r3;
+       [p1] = r2;
        r1 = p1;
        call _put_core_lock;
        r0 = r3;
@@ -642,10 +663,9 @@ ENTRY(___raw_atomic_or_asm)
        r3 = r1;
        [--sp] = rets;
        call _get_core_lock;
-       r2 = [p1];
-       r3 = r2 | r3;
-       [p1] = r3;
-       r3 = r2;
+       r3 = [p1];
+       r2 = r2 | r3;
+       [p1] = r2;
        r1 = p1;
        call _put_core_lock;
        r0 = r3;
@@ -666,10 +686,9 @@ ENTRY(___raw_atomic_xor_asm)
        r3 = r1;
        [--sp] = rets;
        call _get_core_lock;
-       r2 = [p1];
-       r3 = r2 ^ r3;
-       [p1] = r3;
-       r3 = r2;
+       r3 = [p1];
+       r2 = r2 ^ r3;
+       [p1] = r2;
        r1 = p1;
        call _put_core_lock;
        r0 = r3;