hexagon: parenthesize registers in asm predicates
authorNick Desaulniers <ndesaulniers@google.com>
Sat, 4 Jan 2020 20:59:59 +0000 (12:59 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Jan 2020 18:45:54 +0000 (19:45 +0100)
[ Upstream commit 780a0cfda9006a9a22d6473c2d4c527f5c68eb2e ]

Hexagon requires that register predicates in assembly be parenthesized.

Link: https://github.com/ClangBuiltLinux/linux/issues/754
Link: http://lkml.kernel.org/r/20191209222956.239798-3-ndesaulniers@google.com
Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
Suggested-by: Sid Manning <sidneym@codeaurora.org>
Acked-by: Brian Cain <bcain@codeaurora.org>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Tuowen Zhao <ztuowen@gmail.com>
Cc: Mika Westerberg <mika.westerberg@linux.intel.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Alexios Zavras <alexios.zavras@intel.com>
Cc: Allison Randal <allison@lohutok.net>
Cc: Will Deacon <will@kernel.org>
Cc: Richard Fontana <rfontana@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/hexagon/include/asm/atomic.h
arch/hexagon/include/asm/bitops.h
arch/hexagon/include/asm/cmpxchg.h
arch/hexagon/include/asm/futex.h
arch/hexagon/include/asm/spinlock.h
arch/hexagon/kernel/vm_entry.S

index fb3dfb2a667ee11626cf9c999e5b303fcc9b9c83..d4e283b4f335e28ad924b723791df6066a7c465a 100644 (file)
@@ -105,7 +105,7 @@ static inline void atomic_##op(int i, atomic_t *v)                  \
                "1:     %0 = memw_locked(%1);\n"                        \
                "       %0 = "#op "(%0,%2);\n"                          \
                "       memw_locked(%1,P3)=%0;\n"                       \
-               "       if !P3 jump 1b;\n"                              \
+               "       if (!P3) jump 1b;\n"                            \
                : "=&r" (output)                                        \
                : "r" (&v->counter), "r" (i)                            \
                : "memory", "p3"                                        \
@@ -121,7 +121,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
                "1:     %0 = memw_locked(%1);\n"                        \
                "       %0 = "#op "(%0,%2);\n"                          \
                "       memw_locked(%1,P3)=%0;\n"                       \
-               "       if !P3 jump 1b;\n"                              \
+               "       if (!P3) jump 1b;\n"                            \
                : "=&r" (output)                                        \
                : "r" (&v->counter), "r" (i)                            \
                : "memory", "p3"                                        \
@@ -138,7 +138,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                     \
                "1:     %0 = memw_locked(%2);\n"                        \
                "       %1 = "#op "(%0,%3);\n"                          \
                "       memw_locked(%2,P3)=%1;\n"                       \
-               "       if !P3 jump 1b;\n"                              \
+               "       if (!P3) jump 1b;\n"                            \
                : "=&r" (output), "=&r" (val)                           \
                : "r" (&v->counter), "r" (i)                            \
                : "memory", "p3"                                        \
@@ -187,7 +187,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
                "       }"
                "       memw_locked(%2, p3) = %1;"
                "       {"
-               "               if !p3 jump 1b;"
+               "               if (!p3) jump 1b;"
                "       }"
                "2:"
                : "=&r" (__oldval), "=&r" (tmp)
index 2691a1857d203db2522ae178fe744225c71d2da9..634306cda00675b0b172e87ad6162f6bf23df722 100644 (file)
@@ -52,7 +52,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
        "1:     R12 = memw_locked(R10);\n"
        "       { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
        "       memw_locked(R10,P1) = R12;\n"
-       "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+       "       {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
        : "=&r" (oldval)
        : "r" (addr), "r" (nr)
        : "r10", "r11", "r12", "p0", "p1", "memory"
@@ -76,7 +76,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
        "1:     R12 = memw_locked(R10);\n"
        "       { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
        "       memw_locked(R10,P1) = R12;\n"
-       "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+       "       {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
        : "=&r" (oldval)
        : "r" (addr), "r" (nr)
        : "r10", "r11", "r12", "p0", "p1", "memory"
@@ -102,7 +102,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
        "1:     R12 = memw_locked(R10);\n"
        "       { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
        "       memw_locked(R10,P1) = R12;\n"
-       "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+       "       {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
        : "=&r" (oldval)
        : "r" (addr), "r" (nr)
        : "r10", "r11", "r12", "p0", "p1", "memory"
@@ -237,7 +237,7 @@ static inline int ffs(int x)
        int r;
 
        asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
-               "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
+               "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
                : "=&r" (r)
                : "r" (x)
                : "p0");
index a6e34e2acbbaf6cc07f377db35faa7c744ba23e3..db258424059f4f5ab5257d928bd39e4071fe4a03 100644 (file)
@@ -44,7 +44,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
        __asm__ __volatile__ (
        "1:     %0 = memw_locked(%1);\n"    /*  load into retval */
        "       memw_locked(%1,P0) = %2;\n" /*  store into memory */
-       "       if !P0 jump 1b;\n"
+       "       if (!P0) jump 1b;\n"
        : "=&r" (retval)
        : "r" (ptr), "r" (x)
        : "memory", "p0"
index c889f5993ecd35f1646e06fa1fab846860655ac6..e8e5e47afb377ad697d9b9de2ec0a9bd203c8df6 100644 (file)
@@ -16,7 +16,7 @@
            /* For example: %1 = %4 */ \
            insn \
        "2: memw_locked(%3,p2) = %1;\n" \
-       "   if !p2 jump 1b;\n" \
+       "   if (!p2) jump 1b;\n" \
        "   %1 = #0;\n" \
        "3:\n" \
        ".section .fixup,\"ax\"\n" \
@@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
        "1: %1 = memw_locked(%3)\n"
        "   {\n"
        "      p2 = cmp.eq(%1,%4)\n"
-       "      if !p2.new jump:NT 3f\n"
+       "      if (!p2.new) jump:NT 3f\n"
        "   }\n"
        "2: memw_locked(%3,p2) = %5\n"
-       "   if !p2 jump 1b\n"
+       "   if (!p2) jump 1b\n"
        "3:\n"
        ".section .fixup,\"ax\"\n"
        "4: %0 = #%6\n"
index 53a8d588588787cba2b22202c2ee675fba83e6df..007056263b8eb88fe2591f37efb5b9496113d084 100644 (file)
@@ -44,9 +44,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "1:     R6 = memw_locked(%0);\n"
                "       { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                "       memw_locked(%0,P3) = R6;\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -60,7 +60,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
                "1:     R6 = memw_locked(%0);\n"
                "       R6 = add(R6,#-1);\n"
                "       memw_locked(%0,P3) = R6\n"
-               "       if !P3 jump 1b;\n"
+               "       if (!P3) jump 1b;\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -75,7 +75,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "       R6 = memw_locked(%1);\n"
                "       { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
-               "       { if !P3 jump 1f; }\n"
+               "       { if (!P3) jump 1f; }\n"
                "       memw_locked(%1,P3) = R6;\n"
                "       { %0 = P3 }\n"
                "1:\n"
@@ -102,9 +102,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "1:     R6 = memw_locked(%0)\n"
                "       { P3 = cmp.eq(R6,#0);  R6 = #-1;}\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                "       memw_locked(%0,P3) = R6;\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -118,7 +118,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "       R6 = memw_locked(%1)\n"
                "       { %0 = #0; P3 = cmp.eq(R6,#0);  R6 = #-1;}\n"
-               "       { if !P3 jump 1f; }\n"
+               "       { if (!P3) jump 1f; }\n"
                "       memw_locked(%1,P3) = R6;\n"
                "       %0 = P3;\n"
                "1:\n"
@@ -141,9 +141,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        __asm__ __volatile__(
                "1:     R6 = memw_locked(%0);\n"
                "       P3 = cmp.eq(R6,#0);\n"
-               "       { if !P3 jump 1b; R6 = #1; }\n"
+               "       { if (!P3) jump 1b; R6 = #1; }\n"
                "       memw_locked(%0,P3) = R6;\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -163,7 +163,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
        __asm__ __volatile__(
                "       R6 = memw_locked(%1);\n"
                "       P3 = cmp.eq(R6,#0);\n"
-               "       { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
+               "       { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
                "       memw_locked(%1,P3) = R6;\n"
                "       %0 = P3;\n"
                "1:\n"
index 67c6ccc14770320c21f334b16c13e1c480a99cd7..9f4a73ff720300dd3641e65df84ecb9d501a16fe 100644 (file)
@@ -382,7 +382,7 @@ ret_from_fork:
                R26.L = #LO(do_work_pending);
                R0 = #VM_INT_DISABLE;
        }
-       if P0 jump check_work_pending
+       if (P0) jump check_work_pending
        {
                R0 = R25;
                callr R24