powerpc/spinlock: Fix spin_unlock_wait()
authorBoqun Feng <boqun.feng@gmail.com>
Fri, 10 Jun 2016 03:51:28 +0000 (11:51 +0800)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 14 Jun 2016 06:05:44 +0000 (16:05 +1000)
There is an ordering issue with spin_unlock_wait() on powerpc, because
the spin_lock primitive is an ACQUIRE and an ACQUIRE is only ordering
the load part of the operation with memory operations following it.
Therefore the following event sequence can happen:

CPU 1 CPU 2 CPU 3

================== ==================== ==============
spin_unlock(&lock);
spin_lock(&lock):
  r1 = *lock; // r1 == 0;
o = object; o = READ_ONCE(object); // reordered here
object = NULL;
smp_mb();
spin_unlock_wait(&lock);
  *lock = 1;
smp_mb();
o->dead = true;         < o = READ_ONCE(object); > // reordered upwards
if (o) // true
BUG_ON(o->dead); // true!!

To fix this, we add a "nop" ll/sc loop in arch_spin_unlock_wait() on
ppc, the "nop" ll/sc loop reads the lock
value and writes it back atomically, in this way it will synchronize the
view of the lock on CPU1 with that on CPU2. Therefore in the scenario
above, either CPU2 will fail to get the lock at first or CPU1 will see
the lock acquired by CPU2, both cases will eliminate this bug. This is a
similar idea as what Will Deacon did for ARM64 in:

  d86b8da04dfa ("arm64: spinlock: serialise spin_unlock_wait against concurrent lockers")

Furthermore, if the "nop" ll/sc figures out the lock is locked, we
actually don't need to do the "nop" ll/sc trick again, we can just do a
normal load+check loop for the lock to be released, because in that
case, spin_unlock_wait() is called when someone is holding the lock, and
the store part of the "nop" ll/sc happens before the lock release of the
current lock holder:

"nop" ll/sc -> spin_unlock()

and the lock release happens before the next lock acquisition:

spin_unlock() -> spin_lock() <next holder>

which means the "nop" ll/sc happens before the next lock acquisition:

"nop" ll/sc -> spin_unlock() -> spin_lock() <next holder>

With a smp_mb() preceding spin_unlock_wait(), the store of object is
guaranteed to be observed by the next lock holder:

STORE -> smp_mb() -> "nop" ll/sc
-> spin_unlock() -> spin_lock() <next holder>

This patch therefore fixes the issue and also cleans the
arch_spin_unlock_wait() a little bit by removing superfluous memory
barriers in loops and consolidating the implementations for PPC32 and
PPC64 into one.

Suggested-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Reviewed-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
[mpe: Inline the "nop" ll/sc loop and set EH=0, munge change log]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/spinlock.h
arch/powerpc/lib/locks.c

index 523673d7583c349a8ea95ec5c2f85cbf59f71339..fa37fe93bc029eb9965754021c027b2de484b809 100644 (file)
@@ -162,12 +162,38 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        lock->slock = 0;
 }
 
-#ifdef CONFIG_PPC64
-extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
-#else
-#define arch_spin_unlock_wait(lock) \
-       do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-#endif
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       arch_spinlock_t lock_val;
+
+       smp_mb();
+
+       /*
+        * Atomically load and store back the lock value (unchanged). This
+        * ensures that our observation of the lock value is ordered with
+        * respect to other lock operations.
+        */
+       __asm__ __volatile__(
+"1:    " PPC_LWARX(%0, 0, %2, 0) "\n"
+"      stwcx. %0, 0, %2\n"
+"      bne- 1b\n"
+       : "=&r" (lock_val), "+m" (*lock)
+       : "r" (lock)
+       : "cr0", "xer");
+
+       if (arch_spin_value_unlocked(lock_val))
+               goto out;
+
+       while (lock->slock) {
+               HMT_low();
+               if (SHARED_PROCESSOR)
+                       __spin_yield(lock);
+       }
+       HMT_medium();
+
+out:
+       smp_mb();
+}
 
 /*
  * Read-write spinlocks, allowing multiple readers
index f7deebdf33651fd3f1c5038267117341d84bdf79..b7b1237d4aa666c6dde3b4346b1c3c403616ac99 100644 (file)
@@ -68,19 +68,3 @@ void __rw_yield(arch_rwlock_t *rw)
                get_hard_smp_processor_id(holder_cpu), yield_count);
 }
 #endif
-
-void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_mb();
-
-       while (lock->slock) {
-               HMT_low();
-               if (SHARED_PROCESSOR)
-                       __spin_yield(lock);
-       }
-       HMT_medium();
-
-       smp_mb();
-}
-
-EXPORT_SYMBOL(arch_spin_unlock_wait);