locking/refcounts: Use atomic_try_cmpxchg()
authorPeter Zijlstra <peterz@infradead.org>
Wed, 1 Feb 2017 15:07:55 +0000 (16:07 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 23 Mar 2017 07:54:41 +0000 (08:54 +0100)
Generates better code (GCC-6.2.1):

  text        filename
  1576        defconfig-build/lib/refcount.o.pre
  1488        defconfig-build/lib/refcount.o.post

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
lib/refcount.c

index 8e206ce5609b50f34ee2326b52a06c25d76e7d90..f42124ccf2951a915cf332632d7007627efc64c9 100644 (file)
@@ -57,9 +57,9 @@
  */
 bool refcount_add_not_zero(unsigned int i, refcount_t *r)
 {
-       unsigned int old, new, val = atomic_read(&r->refs);
+       unsigned int new, val = atomic_read(&r->refs);
 
-       for (;;) {
+       do {
                if (!val)
                        return false;
 
@@ -69,12 +69,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
                new = val + i;
                if (new < val)
                        new = UINT_MAX;
-               old = atomic_cmpxchg_relaxed(&r->refs, val, new);
-               if (old == val)
-                       break;
 
-               val = old;
-       }
+       } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
 
        WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
 
@@ -118,9 +114,9 @@ EXPORT_SYMBOL_GPL(refcount_add);
  */
 bool refcount_inc_not_zero(refcount_t *r)
 {
-       unsigned int old, new, val = atomic_read(&r->refs);
+       unsigned int new, val = atomic_read(&r->refs);
 
-       for (;;) {
+       do {
                new = val + 1;
 
                if (!val)
@@ -129,12 +125,7 @@ bool refcount_inc_not_zero(refcount_t *r)
                if (unlikely(!new))
                        return true;
 
-               old = atomic_cmpxchg_relaxed(&r->refs, val, new);
-               if (old == val)
-                       break;
-
-               val = old;
-       }
+       } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
 
        WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
 
@@ -182,9 +173,9 @@ EXPORT_SYMBOL_GPL(refcount_inc);
  */
 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
 {
-       unsigned int old, new, val = atomic_read(&r->refs);
+       unsigned int new, val = atomic_read(&r->refs);
 
-       for (;;) {
+       do {
                if (unlikely(val == UINT_MAX))
                        return false;
 
@@ -194,12 +185,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
                        return false;
                }
 
-               old = atomic_cmpxchg_release(&r->refs, val, new);
-               if (old == val)
-                       break;
-
-               val = old;
-       }
+       } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
 
        return !new;
 }
@@ -258,7 +244,9 @@ EXPORT_SYMBOL_GPL(refcount_dec);
  */
 bool refcount_dec_if_one(refcount_t *r)
 {
-       return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+       int val = 1;
+
+       return atomic_try_cmpxchg_release(&r->refs, &val, 0);
 }
 EXPORT_SYMBOL_GPL(refcount_dec_if_one);
 
@@ -275,9 +263,9 @@ EXPORT_SYMBOL_GPL(refcount_dec_if_one);
  */
 bool refcount_dec_not_one(refcount_t *r)
 {
-       unsigned int old, new, val = atomic_read(&r->refs);
+       unsigned int new, val = atomic_read(&r->refs);
 
-       for (;;) {
+       do {
                if (unlikely(val == UINT_MAX))
                        return true;
 
@@ -290,12 +278,7 @@ bool refcount_dec_not_one(refcount_t *r)
                        return true;
                }
 
-               old = atomic_cmpxchg_release(&r->refs, val, new);
-               if (old == val)
-                       break;
-
-               val = old;
-       }
+       } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
 
        return true;
 }