From: Jan Beulich Date: Thu, 13 Mar 2008 09:08:51 +0000 (+0000) Subject: x86: bitops asm constraint fixes X-Git-Tag: MMI-PSA29.97-13-9~35624^2~274 X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=709f744f18ebc3a810d29c8d5502bf20c3cecc70;p=GitHub%2FMotorolaMobilityLLC%2Fkernel-slsi.git x86: bitops asm constraint fixes This (simplified) piece of code didn't behave as expected due to incorrect constraints in some of the bitops functions, when X86_FEATURE_xxx is referring to other than the first long: int test(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_xxx)) clear_cpu_cap(c, X86_FEATURE_xxx); return cpu_has(c, X86_FEATURE_xxx); } I'd really like understand, though, what the policy of (not) having a "memory" clobber in these operations is - currently, this appears to be totally inconsistent. Also, many comments of the non-atomic functions say those may also be re-ordered - this contradicts the use of "asm volatile" in there, which again I'd like to understand. As much as all of these, using 'int' for the 'nr' parameter and 'void *' for the 'addr' one is in conflict with Documentation/atomic_ops.txt, especially because bt{,c,r,s} indeed take the bit index as signed (which hence would really need special precaution) and access the full 32 bits (if 'unsigned long' was used properly here, 64 bits for x86-64) pointed at, so invalid uses like referencing a 'char' array cannot currently be caught. Finally, the code with and without this patch relies heavily on the -fno-strict-aliasing compiler switch and I'm not certain this really is a good idea. In the light of all of this I'm sending this as RFC, as fixing the above might warrant a much bigger patch... Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 1a23ce1a5697..7a76555b676a 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -24,9 +24,12 @@ /* Technically wrong, but this avoids compilation errors on some gcc versions. */ #define ADDR "=m" (*(volatile long *) addr) +#define BIT_ADDR "=m" (((volatile int *) addr)[nr >> 5]) #else #define ADDR "+m" (*(volatile long *) addr) +#define BIT_ADDR "+m" (((volatile int *) addr)[nr >> 5]) #endif +#define BASE_ADDR "m" (*(volatile int *) addr) /** * set_bit - Atomically set a bit in memory @@ -79,9 +82,8 @@ static inline void __set_bit(int nr, volatile void *addr) */ static inline void clear_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btr %1,%0" - : ADDR - : "Ir" (nr)); + asm volatile(LOCK_PREFIX "btr %1,%2" + : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /* @@ -100,7 +102,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile void *addr) static inline void __clear_bit(int nr, volatile void *addr) { - asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); + asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /* @@ -135,7 +137,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) */ static inline void __change_bit(int nr, volatile void *addr) { - asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); + asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /** @@ -149,8 +151,8 @@ static inline void __change_bit(int nr, volatile void *addr) */ static inline void change_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btc %1,%0" - : ADDR : "Ir" (nr)); + asm volatile(LOCK_PREFIX "btc %1,%2" + : BIT_ADDR : "Ir" (nr), BASE_ADDR); } /** @@ -198,10 +200,10 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) { int oldbit; - asm("bts %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); + asm volatile("bts %2,%3\n\t" + "sbb %0,%0" + : "=r" (oldbit), BIT_ADDR + : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -238,10 +240,10 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btr %2,%1\n\t" + asm volatile("btr %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); + : "=r" (oldbit), BIT_ADDR + : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -250,10 +252,10 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btc %2,%1\n\t" + asm volatile("btc %2,%3\n\t" "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr) : "memory"); + : "=r" (oldbit), BIT_ADDR + : "Ir" (nr), BASE_ADDR); return oldbit; } @@ -288,10 +290,11 @@ static inline int variable_test_bit(int nr, volatile const void *addr) { int oldbit; - asm volatile("bt %2,%1\n\t" + asm volatile("bt %2,%3\n\t" "sbb %0,%0" : "=r" (oldbit) - : "m" (*(unsigned long *)addr), "Ir" (nr)); + : "m" (((volatile const int *)addr)[nr >> 5]), + "Ir" (nr), BASE_ADDR); return oldbit; } @@ -310,6 +313,8 @@ static int test_bit(int nr, const volatile unsigned long *addr); constant_test_bit((nr),(addr)) : \ variable_test_bit((nr),(addr))) +#undef BASE_ADDR +#undef BIT_ADDR #undef ADDR #ifdef CONFIG_X86_32