From 418ccbe37f70f5021c4cd1cdcb0ce7f98d05f2dd Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 19 Oct 2007 07:13:02 +0200 Subject: [PATCH] x86: lock bitops I missed an obvious one! x86 CPUs are defined not to reorder stores past earlier loads, so there is no hardware memory barrier required to implement a release-consistent store (all stores are, by definition). So ditch the generic lock bitops, and implement optimised versions for x86, which removes the mfence from __clear_bit_unlock (which is already a useful primitive for SLUB). Signed-off-by: Nick Piggin Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/bitops_32.h | 43 ++++++++++++++++++++++++++++++++++++- include/asm-x86/bitops_64.h | 42 +++++++++++++++++++++++++++++++++++- 2 files changed, 83 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index 3268a341cf49..36ebb5b02b4f 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h @@ -80,6 +80,20 @@ static inline void clear_bit(int nr, volatile unsigned long * addr) :"Ir" (nr)); } +/* + * clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and implies release semantics before the memory + * operation. It can be used for an unlock. + */ +static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) +{ + barrier(); + clear_bit(nr, addr); +} + static inline void __clear_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( @@ -87,6 +101,25 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr) :"+m" (ADDR) :"Ir" (nr)); } + +/* + * __clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * __clear_bit() is non-atomic and implies release semantics before the memory + * operation. It can be used for an unlock if no other CPUs can concurrently + * modify other bits in the word. + * + * No memory barrier is required here, because x86 cannot reorder stores past + * older loads. Same principle as spin_unlock. + */ +static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) +{ + barrier(); + __clear_bit(nr, addr); +} + #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() @@ -145,6 +178,15 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr) return oldbit; } +/** + * test_and_set_bit_lock - Set a bit and return its old value for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This is the same as test_and_set_bit on x86 + */ +#define test_and_set_bit_lock test_and_set_bit + /** * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set @@ -406,7 +448,6 @@ static inline int fls(int x) } #include -#include #endif /* __KERNEL__ */ diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index dacaa5f1febc..b4d47940b959 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h @@ -72,6 +72,20 @@ static __inline__ void clear_bit(int nr, volatile void * addr) :"dIr" (nr)); } +/* + * clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and implies release semantics before the memory + * operation. It can be used for an unlock. + */ +static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) +{ + barrier(); + clear_bit(nr, addr); +} + static __inline__ void __clear_bit(int nr, volatile void * addr) { __asm__ __volatile__( @@ -80,6 +94,24 @@ static __inline__ void __clear_bit(int nr, volatile void * addr) :"dIr" (nr)); } +/* + * __clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * __clear_bit() is non-atomic and implies release semantics before the memory + * operation. It can be used for an unlock if no other CPUs can concurrently + * modify other bits in the word. + * + * No memory barrier is required here, because x86 cannot reorder stores past + * older loads. Same principle as spin_unlock. + */ +static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) +{ + barrier(); + __clear_bit(nr, addr); +} + #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() @@ -136,6 +168,15 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) return oldbit; } +/** + * test_and_set_bit_lock - Set a bit and return its old value for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This is the same as test_and_set_bit on x86 + */ +#define test_and_set_bit_lock test_and_set_bit + /** * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set @@ -412,7 +453,6 @@ static __inline__ int fls(int x) #define ARCH_HAS_FAST_MULTIPLIER 1 #include -#include #endif /* __KERNEL__ */ -- 2.20.1