locking: Remove ACCESS_ONCE() usage
authorDavidlohr Bueso <dave@stgolabs.net>
Mon, 23 Feb 2015 03:31:41 +0000 (19:31 -0800)
committerIngo Molnar <mingo@kernel.org>
Tue, 24 Feb 2015 07:44:16 +0000 (08:44 +0100)
With the new standardized functions, we can replace all
ACCESS_ONCE() calls across relevant locking - this includes
lockref and seqlock while at it.

ACCESS_ONCE() does not work reliably on non-scalar types.
For example gcc 4.6 and 4.7 might remove the volatile tag
for such accesses during the SRA (scalar replacement of
aggregates) step:

  https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145

Update the new calls regardless of if it is a scalar type,
this is cleaner than having three alternatives.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1424662301.6539.18.camel@stgolabs.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/seqlock.h
kernel/locking/mcs_spinlock.h
kernel/locking/mutex.c
kernel/locking/osq_lock.c
kernel/locking/rwsem-xadd.c
lib/lockref.c

index f5df8f687b4d097dd7855e3670a2b790cb380d5d..5f68d0a391cee8506f8e0d94cda72d8bd357b10f 100644 (file)
@@ -108,7 +108,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
        unsigned ret;
 
 repeat:
-       ret = ACCESS_ONCE(s->sequence);
+       ret = READ_ONCE(s->sequence);
        if (unlikely(ret & 1)) {
                cpu_relax();
                goto repeat;
@@ -127,7 +127,7 @@ repeat:
  */
 static inline unsigned raw_read_seqcount(const seqcount_t *s)
 {
-       unsigned ret = ACCESS_ONCE(s->sequence);
+       unsigned ret = READ_ONCE(s->sequence);
        smp_rmb();
        return ret;
 }
@@ -179,7 +179,7 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
  */
 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
 {
-       unsigned ret = ACCESS_ONCE(s->sequence);
+       unsigned ret = READ_ONCE(s->sequence);
        smp_rmb();
        return ret & ~1;
 }
index d1fe2ba5bac958bc85da8e8868408d8c6c809dc3..75e114bdf3f26f379c4382dce2bc5c128c06b868 100644 (file)
@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
                 */
                return;
        }
-       ACCESS_ONCE(prev->next) = node;
+       WRITE_ONCE(prev->next, node);
 
        /* Wait until the lock holder passes the lock down. */
        arch_mcs_spin_lock_contended(&node->locked);
@@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 static inline
 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 {
-       struct mcs_spinlock *next = ACCESS_ONCE(node->next);
+       struct mcs_spinlock *next = READ_ONCE(node->next);
 
        if (likely(!next)) {
                /*
@@ -100,7 +100,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
                if (likely(cmpxchg(lock, node, NULL) == node))
                        return;
                /* Wait until the next pointer is set */
-               while (!(next = ACCESS_ONCE(node->next)))
+               while (!(next = READ_ONCE(node->next)))
                        cpu_relax_lowlatency();
        }
 
index 43bf25ef3c8167682fa5355e39df5591a6002652..16b2d3cc88b0ca08c58f9ca2f123e3cc0477b0ae 100644 (file)
@@ -266,7 +266,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
                return 0;
 
        rcu_read_lock();
-       owner = ACCESS_ONCE(lock->owner);
+       owner = READ_ONCE(lock->owner);
        if (owner)
                retval = owner->on_cpu;
        rcu_read_unlock();
@@ -340,7 +340,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
                         * As such, when deadlock detection needs to be
                         * performed the optimistic spinning cannot be done.
                         */
-                       if (ACCESS_ONCE(ww->ctx))
+                       if (READ_ONCE(ww->ctx))
                                break;
                }
 
@@ -348,7 +348,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
                 * If there's an owner, wait for it to either
                 * release the lock or go to sleep.
                 */
-               owner = ACCESS_ONCE(lock->owner);
+               owner = READ_ONCE(lock->owner);
                if (owner && !mutex_spin_on_owner(lock, owner))
                        break;
 
@@ -487,7 +487,7 @@ static inline int __sched
 __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
 {
        struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
-       struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
+       struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
 
        if (!hold_ctx)
                return 0;
index c112d00341b05773934ecdb2977c0d2aca1d5c11..dc85ee23a26f79416a140241e3067a5a2ca24d0b 100644 (file)
@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
 
        prev = decode_cpu(old);
        node->prev = prev;
-       ACCESS_ONCE(prev->next) = node;
+       WRITE_ONCE(prev->next, node);
 
        /*
         * Normally @prev is untouchable after the above store; because at that
@@ -109,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
         * cmpxchg in an attempt to undo our queueing.
         */
 
-       while (!ACCESS_ONCE(node->locked)) {
+       while (!READ_ONCE(node->locked)) {
                /*
                 * If we need to reschedule bail... so we can block.
                 */
@@ -148,7 +148,7 @@ unqueue:
                 * Or we race against a concurrent unqueue()'s step-B, in which
                 * case its step-C will write us a new @node->prev pointer.
                 */
-               prev = ACCESS_ONCE(node->prev);
+               prev = READ_ONCE(node->prev);
        }
 
        /*
@@ -170,8 +170,8 @@ unqueue:
         * it will wait in Step-A.
         */
 
-       ACCESS_ONCE(next->prev) = prev;
-       ACCESS_ONCE(prev->next) = next;
+       WRITE_ONCE(next->prev, prev);
+       WRITE_ONCE(prev->next, next);
 
        return false;
 }
@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
        node = this_cpu_ptr(&osq_node);
        next = xchg(&node->next, NULL);
        if (next) {
-               ACCESS_ONCE(next->locked) = 1;
+               WRITE_ONCE(next->locked, 1);
                return;
        }
 
        next = osq_wait_next(lock, node, NULL);
        if (next)
-               ACCESS_ONCE(next->locked) = 1;
+               WRITE_ONCE(next->locked, 1);
 }
index e4ad019e23f51fbe8a61231a8eb59ee8e6cecb3e..06e2214edf980ea296cb31a83d780a47e8441856 100644 (file)
@@ -279,7 +279,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
  */
 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
 {
-       long old, count = ACCESS_ONCE(sem->count);
+       long old, count = READ_ONCE(sem->count);
 
        while (true) {
                if (!(count == 0 || count == RWSEM_WAITING_BIAS))
@@ -304,9 +304,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
                return false;
 
        rcu_read_lock();
-       owner = ACCESS_ONCE(sem->owner);
+       owner = READ_ONCE(sem->owner);
        if (!owner) {
-               long count = ACCESS_ONCE(sem->count);
+               long count = READ_ONCE(sem->count);
                /*
                 * If sem->owner is not set, yet we have just recently entered the
                 * slowpath with the lock being active, then there is a possibility
@@ -385,7 +385,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
                goto done;
 
        while (true) {
-               owner = ACCESS_ONCE(sem->owner);
+               owner = READ_ONCE(sem->owner);
                if (owner && !rwsem_spin_on_owner(sem, owner))
                        break;
 
@@ -459,7 +459,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
 
        /* we're now waiting on the lock, but no longer actively locking */
        if (waiting) {
-               count = ACCESS_ONCE(sem->count);
+               count = READ_ONCE(sem->count);
 
                /*
                 * If there were already threads queued before us and there are
index ecb9a665ec19b5c8b6e062568dffb0c4c0b12d90..494994bf17c8ec9764cbb784cbd88840fa0919c9 100644 (file)
@@ -18,7 +18,7 @@
 #define CMPXCHG_LOOP(CODE, SUCCESS) do {                                       \
        struct lockref old;                                                     \
        BUILD_BUG_ON(sizeof(old) != 8);                                         \
-       old.lock_count = ACCESS_ONCE(lockref->lock_count);                      \
+       old.lock_count = READ_ONCE(lockref->lock_count);                        \
        while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
                struct lockref new = old, prev = old;                           \
                CODE                                                            \