ARC: add barriers to futex code
authorVineet Gupta <vgupta@synopsys.com>
Wed, 5 Aug 2015 13:40:02 +0000 (19:10 +0530)
committerVineet Gupta <vgupta@synopsys.com>
Thu, 20 Aug 2015 12:45:59 +0000 (18:15 +0530)
The atomic ops on futex need to provide the full barrier just like
regular atomics in kernel.

Also remove pagefault_enable/disable in futex_atomic_cmpxchg_inatomic()
as core code already does that

Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Michel Lespinasse <walken@google.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
arch/arc/include/asm/futex.h

index 70cfe16b742d78f7e8016a41b11271e7d258f7a6..9de18a526aff18678e393808d99e25799231e250 100644 (file)
@@ -20,6 +20,7 @@
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
                                                        \
+       smp_mb();                                       \
        __asm__ __volatile__(                           \
        "1:     llock   %1, [%2]                \n"     \
                insn                            "\n"    \
                                                        \
        : "=&r" (ret), "=&r" (oldval)                   \
        : "r" (uaddr), "r" (oparg), "ir" (-EFAULT)      \
-       : "cc", "memory")
+       : "cc", "memory");                              \
+       smp_mb()                                        \
 
 #else  /* !CONFIG_ARC_HAS_LLSC */
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
                                                        \
+       smp_mb();                                       \
        __asm__ __volatile__(                           \
        "1:     ld      %1, [%2]                \n"     \
                insn                            "\n"    \
@@ -65,7 +68,8 @@
                                                        \
        : "=&r" (ret), "=&r" (oldval)                   \
        : "r" (uaddr), "r" (oparg), "ir" (-EFAULT)      \
-       : "cc", "memory")
+       : "cc", "memory");                              \
+       smp_mb()                                        \
 
 #endif
 
@@ -134,13 +138,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
        return ret;
 }
 
-/* Compare-xchg with pagefaults disabled.
- *  Notes:
- *      -Best-Effort: Exchg happens only if compare succeeds.
- *          If compare fails, returns; leaving retry/looping to upper layers
- *      -successful cmp-xchg: return orig value in @addr (same as cmp val)
- *      -Compare fails: return orig value in @addr
- *      -user access r/w fails: return -EFAULT
+/*
+ * cmpxchg of futex (pagefaults disabled by caller)
  */
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
@@ -151,7 +150,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       pagefault_disable();
+       smp_mb();
 
        __asm__ __volatile__(
 #ifdef CONFIG_ARC_HAS_LLSC
@@ -178,7 +177,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
        : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
        : "cc", "memory");
 
-       pagefault_enable();
+       smp_mb();
 
        *uval = val;
        return val;