From b89c3b165fbec605c60fd5a9e32d647e4c0befbb Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 26 Mar 2006 01:39:19 -0800 Subject: [PATCH] [PATCH] bitops: arm: use generic bitops - remove __{,test_and_}{set,clear,change}_bit() and test_bit() - if __LINUX_ARM_ARCH__ < 5 - remove ffz() - remove __ffs() - remove generic_fls() - remove generic_ffs() - remove generic_fls64() - remove sched_find_first_bit() - remove generic_hweight{32,16,8}() Signed-off-by: Akinobu Mita Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/Kconfig | 4 ++ include/asm-arm/bitops.h | 146 +++------------------------------------ 2 files changed, 14 insertions(+), 136 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0dd24ebdf6ac..bf2e72698d02 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -53,6 +53,10 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h index eaecd553e856..0ac54b1a8bad 100644 --- a/include/asm-arm/bitops.h +++ b/include/asm-arm/bitops.h @@ -117,65 +117,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) return res & mask; } -/* - * Now the non-atomic variants. We let the compiler handle all - * optimisations for these. These are all _native_ endian. - */ -static inline void __set_bit(int nr, volatile unsigned long *p) -{ - p[nr >> 5] |= (1UL << (nr & 31)); -} - -static inline void __clear_bit(int nr, volatile unsigned long *p) -{ - p[nr >> 5] &= ~(1UL << (nr & 31)); -} - -static inline void __change_bit(int nr, volatile unsigned long *p) -{ - p[nr >> 5] ^= (1UL << (nr & 31)); -} - -static inline int __test_and_set_bit(int nr, volatile unsigned long *p) -{ - unsigned long oldval, mask = 1UL << (nr & 31); - - p += nr >> 5; - - oldval = *p; - *p = oldval | mask; - return oldval & mask; -} - -static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) -{ - unsigned long oldval, mask = 1UL << (nr & 31); - - p += nr >> 5; - - oldval = *p; - *p = oldval & ~mask; - return oldval & mask; -} - -static inline int __test_and_change_bit(int nr, volatile unsigned long *p) -{ - unsigned long oldval, mask = 1UL << (nr & 31); - - p += nr >> 5; - - oldval = *p; - *p = oldval ^ mask; - return oldval & mask; -} - -/* - * This routine doesn't need to be atomic. - */ -static inline int __test_bit(int nr, const volatile unsigned long * p) -{ - return (p[nr >> 5] >> (nr & 31)) & 1UL; -} +#include /* * A note about Endian-ness. @@ -261,7 +203,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) -#define test_bit(nr,p) __test_bit(nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_le(p,sz) @@ -280,7 +221,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) #define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) #define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) -#define test_bit(nr,p) __test_bit(nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_be(p,sz) @@ -292,55 +232,10 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #if __LINUX_ARM_ARCH__ < 5 -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static inline unsigned long ffz(unsigned long word) -{ - int k; - - word = ~word; - k = 31; - if (word & 0x0000ffff) { k -= 16; word <<= 16; } - if (word & 0x00ff0000) { k -= 8; word <<= 8; } - if (word & 0x0f000000) { k -= 4; word <<= 4; } - if (word & 0x30000000) { k -= 2; word <<= 2; } - if (word & 0x40000000) { k -= 1; } - return k; -} - -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static inline unsigned long __ffs(unsigned long word) -{ - int k; - - k = 31; - if (word & 0x0000ffff) { k -= 16; word <<= 16; } - if (word & 0x00ff0000) { k -= 8; word <<= 8; } - if (word & 0x0f000000) { k -= 4; word <<= 4; } - if (word & 0x30000000) { k -= 2; word <<= 2; } - if (word & 0x40000000) { k -= 1; } - return k; -} - -/* - * fls: find last bit set. - */ - -#define fls(x) generic_fls(x) -#define fls64(x) generic_fls64(x) - -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ - -#define ffs(x) generic_ffs(x) +#include +#include +#include +#include #else @@ -381,37 +276,16 @@ static inline int constant_fls(int x) #define fls(x) \ ( __builtin_constant_p(x) ? constant_fls(x) : \ ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) -#define fls64(x) generic_fls64(x) #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) #define __ffs(x) (ffs(x) - 1) #define ffz(x) __ffs( ~(x) ) #endif -/* - * Find first bit set in a 168-bit bitmap, where the first - * 128 bits are unlikely to be set. - */ -static inline int sched_find_first_bit(const unsigned long *b) -{ - unsigned long v; - unsigned int off; - - for (off = 0; v = b[off], off < 4; off++) { - if (unlikely(v)) - break; - } - return __ffs(v) + off * 32; -} - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ +#include -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include +#include /* * Ext2 is defined to use little-endian byte ordering. @@ -426,7 +300,7 @@ static inline int sched_find_first_bit(const unsigned long *b) #define ext2_clear_bit_atomic(lock,nr,p) \ test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_test_bit(nr,p) \ - __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) + test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define ext2_find_first_zero_bit(p,sz) \ _find_first_zero_bit_le(p,sz) #define ext2_find_next_zero_bit(p,sz,off) \ @@ -439,7 +313,7 @@ static inline int sched_find_first_bit(const unsigned long *b) #define minix_set_bit(nr,p) \ __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_bit(nr,p) \ - __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) + test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_and_set_bit(nr,p) \ __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) #define minix_test_and_clear_bit(nr,p) \ -- 2.20.1