[PATCH] remove powerpc bitops in favor of existing generic bitops
authorJon Mason <jdmason@us.ibm.com>
Fri, 19 May 2006 20:35:32 +0000 (15:35 -0500)
committerPaul Mackerras <paulus@samba.org>
Wed, 24 May 2006 06:08:58 +0000 (16:08 +1000)
There already exists a big endian safe bitops implementation in
lib/find_next_bit.c.  The code in it is 90%+ common with the powerpc
specific version, so the powerpc version is redundant.  This patch
makes the necessary changes to use the generic bitops in powerpc, and
removes the powerpc specific version.

Signed-off-by: Jon Mason <jdmason@us.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/Kconfig
arch/powerpc/lib/Makefile
arch/powerpc/lib/bitops.c [deleted file]
arch/ppc/Kconfig
include/asm-powerpc/bitops.h

index 6729c98b66f969fb16566bcc949039c776370a24..75ba0ec49e548313eb03a53937cd5e63b4637c88 100644 (file)
@@ -45,6 +45,10 @@ config GENERIC_CALIBRATE_DELAY
        bool
        default y
 
+config GENERIC_FIND_NEXT_BIT
+       bool
+       default y
+
 config PPC
        bool
        default y
index 34f5c2e074c987ea3346ad1933c03e745328bb10..ae354d65b82ccfb66311c4a652905d8ef11012e3 100644 (file)
@@ -7,7 +7,6 @@ obj-y                   := string.o strcase.o
 obj-$(CONFIG_PPC32)    += div64.o copy_32.o checksum_32.o
 endif
 
-obj-y                  += bitops.o
 obj-$(CONFIG_PPC64)    += checksum_64.o copypage_64.o copyuser_64.o \
                           memcpy_64.o usercopy_64.o mem_64.o string.o \
                           strcase.o
diff --git a/arch/powerpc/lib/bitops.c b/arch/powerpc/lib/bitops.c
deleted file mode 100644 (file)
index f68ad71..0000000
+++ /dev/null
@@ -1,150 +0,0 @@
-#include <linux/types.h>
-#include <linux/module.h>
-#include <asm/byteorder.h>
-#include <asm/bitops.h>
-
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
-                           unsigned long offset)
-{
-       const unsigned long *p = addr + BITOP_WORD(offset);
-       unsigned long result = offset & ~(BITS_PER_LONG-1);
-       unsigned long tmp;
-
-       if (offset >= size)
-               return size;
-       size -= result;
-       offset %= BITS_PER_LONG;
-       if (offset) {
-               tmp = *(p++);
-               tmp &= (~0UL << offset);
-               if (size < BITS_PER_LONG)
-                       goto found_first;
-               if (tmp)
-                       goto found_middle;
-               size -= BITS_PER_LONG;
-               result += BITS_PER_LONG;
-       }
-       while (size & ~(BITS_PER_LONG-1)) {
-               if ((tmp = *(p++)))
-                       goto found_middle;
-               result += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-       }
-       if (!size)
-               return result;
-       tmp = *p;
-
-found_first:
-       tmp &= (~0UL >> (BITS_PER_LONG - size));
-       if (tmp == 0UL)         /* Are any bits set? */
-               return result + size;   /* Nope. */
-found_middle:
-       return result + __ffs(tmp);
-}
-EXPORT_SYMBOL(find_next_bit);
-
-/*
- * This implementation of find_{first,next}_zero_bit was stolen from
- * Linus' asm-alpha/bitops.h.
- */
-unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
-                                unsigned long offset)
-{
-       const unsigned long *p = addr + BITOP_WORD(offset);
-       unsigned long result = offset & ~(BITS_PER_LONG-1);
-       unsigned long tmp;
-
-       if (offset >= size)
-               return size;
-       size -= result;
-       offset %= BITS_PER_LONG;
-       if (offset) {
-               tmp = *(p++);
-               tmp |= ~0UL >> (BITS_PER_LONG - offset);
-               if (size < BITS_PER_LONG)
-                       goto found_first;
-               if (~tmp)
-                       goto found_middle;
-               size -= BITS_PER_LONG;
-               result += BITS_PER_LONG;
-       }
-       while (size & ~(BITS_PER_LONG-1)) {
-               if (~(tmp = *(p++)))
-                       goto found_middle;
-               result += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-       }
-       if (!size)
-               return result;
-       tmp = *p;
-
-found_first:
-       tmp |= ~0UL << size;
-       if (tmp == ~0UL)        /* Are any bits zero? */
-               return result + size;   /* Nope. */
-found_middle:
-       return result + ffz(tmp);
-}
-EXPORT_SYMBOL(find_next_zero_bit);
-
-static inline unsigned int ext2_ilog2(unsigned int x)
-{
-       int lz;
-
-       asm("cntlzw %0,%1": "=r"(lz):"r"(x));
-       return 31 - lz;
-}
-
-static inline unsigned int ext2_ffz(unsigned int x)
-{
-       u32 rc;
-       if ((x = ~x) == 0)
-               return 32;
-       rc = ext2_ilog2(x & -x);
-       return rc;
-}
-
-unsigned long find_next_zero_le_bit(const unsigned long *addr,
-                                   unsigned long size, unsigned long offset)
-{
-       const unsigned int *p = ((const unsigned int *)addr) + (offset >> 5);
-       unsigned int result = offset & ~31;
-       unsigned int tmp;
-
-       if (offset >= size)
-               return size;
-       size -= result;
-       offset &= 31;
-       if (offset) {
-               tmp = cpu_to_le32p(p++);
-               tmp |= ~0U >> (32 - offset);    /* bug or feature ? */
-               if (size < 32)
-                       goto found_first;
-               if (tmp != ~0)
-                       goto found_middle;
-               size -= 32;
-               result += 32;
-       }
-       while (size >= 32) {
-               if ((tmp = cpu_to_le32p(p++)) != ~0)
-                       goto found_middle;
-               result += 32;
-               size -= 32;
-       }
-       if (!size)
-               return result;
-       tmp = cpu_to_le32p(p);
-found_first:
-       tmp |= ~0 << size;
-       if (tmp == ~0)          /* Are any bits zero? */
-               return result + size;   /* Nope. */
-found_middle:
-       return result + ext2_ffz(tmp);
-}
-EXPORT_SYMBOL(find_next_zero_le_bit);
index e9a8f5d1dfcdf9f4e8b7c8ac157e5d8e6d9abaf3..b55de4f42aec1a8b4db42d2379c43cbf7f89b4c5 100644 (file)
@@ -40,6 +40,10 @@ config GENERIC_NVRAM
        bool
        default y
 
+config GENERIC_FIND_NEXT_BIT
+       bool
+       default y
+
 config SCHED_NO_NO_OMIT_FRAME_POINTER
        bool
        default y
index d1c2a4405660dbecbcaa859c4abbc7b0fea2bba3..76e2f08c3c83016915a7831e3402a6baa638284d 100644 (file)
@@ -288,8 +288,8 @@ static __inline__ int test_le_bit(unsigned long nr,
 #define __test_and_clear_le_bit(nr, addr) \
        __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
 
-#define find_first_zero_le_bit(addr, size) find_next_zero_le_bit((addr), (size), 0)
-unsigned long find_next_zero_le_bit(const unsigned long *addr,
+#define find_first_zero_le_bit(addr, size) generic_find_next_zero_le_bit((addr), (size), 0)
+unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
                                    unsigned long size, unsigned long offset);
 
 /* Bitmap functions for the ext2 filesystem */
@@ -309,7 +309,7 @@ unsigned long find_next_zero_le_bit(const unsigned long *addr,
 #define ext2_find_first_zero_bit(addr, size) \
        find_first_zero_le_bit((unsigned long*)addr, size)
 #define ext2_find_next_zero_bit(addr, size, off) \
-       find_next_zero_le_bit((unsigned long*)addr, size, off)
+       generic_find_next_zero_le_bit((unsigned long*)addr, size, off)
 
 /* Bitmap functions for the minix filesystem.  */