extern void *__bzero(void *, size_t);
extern void *__memscan_zero(void *, size_t);
extern void *__memscan_generic(void *, int, size_t);
-extern int __memcmp(const void *, const void *, __kernel_size_t);
extern int __strncmp(const char *, const char *, __kernel_size_t);
extern int __ashrdi3(int, int);
EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__memscan_zero);
EXPORT_SYMBOL(__memscan_generic);
-EXPORT_SYMBOL(__memcmp);
EXPORT_SYMBOL(__strncmp);
EXPORT_SYMBOL(__memmove);
extern void *__bzero(void *, size_t);
extern void *__memscan_zero(void *, size_t);
extern void *__memscan_generic(void *, int, size_t);
-extern int __memcmp(const void *, const void *, __kernel_size_t);
extern __kernel_size_t strlen(const char *);
extern void sys_sigsuspend(void);
extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__memscan_zero);
EXPORT_SYMBOL(__memscan_generic);
-EXPORT_SYMBOL(__memcmp);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(csum_partial);
lib-y += strlen.o
lib-y += checksum_$(BITS).o
lib-$(CONFIG_SPARC32) += blockops.o
-lib-y += memscan_$(BITS).o memcmp_$(BITS).o strncmp_$(BITS).o
+lib-y += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o
lib-y += strncpy_from_user_$(BITS).o strlen_user_$(BITS).o
lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
lib-$(CONFIG_SPARC32) += copy_user.o locks.o
--- /dev/null
+/* Sparc optimized memcmp code.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 2000, 2008 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+ .text
+ENTRY(memcmp)
+ cmp %o2, 0
+1: BRANCH32(be, pn, 2f)
+ nop
+ ldub [%o0], %g7
+ ldub [%o1], %g3
+ sub %o2, 1, %o2
+ add %o0, 1, %o0
+ add %o1, 1, %o1
+ subcc %g7, %g3, %g3
+ BRANCH32(be, pt, 1b)
+ cmp %o2, 0
+ retl
+ mov %g3, %o0
+2: retl
+ mov 0, %o0
+ENDPROC(memcmp)
+++ /dev/null
- .text
- .align 4
- .global __memcmp, memcmp
-__memcmp:
-memcmp:
- cmp %o2, 0
- ble L3
- mov 0, %g3
-L5:
- ldub [%o0], %g2
- ldub [%o1], %g3
- sub %g2, %g3, %g2
- mov %g2, %g3
- sll %g2, 24, %g2
-
- cmp %g2, 0
- bne L3
- add %o0, 1, %o0
-
- add %o2, -1, %o2
-
- cmp %o2, 0
- bg L5
- add %o1, 1, %o1
-L3:
- sll %g3, 24, %o0
- sra %o0, 24, %o0
-
- retl
- nop
+++ /dev/null
-/*
- * Sparc64 optimized memcmp code.
- *
- * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- * Copyright (C) 2000 David S. Miller (davem@redhat.com)
- */
-
- .text
- .align 32
- .globl __memcmp, memcmp
-__memcmp:
-memcmp:
- cmp %o2, 0 ! IEU1 Group
-loop: be,pn %icc, ret_0 ! CTI
- nop ! IEU0
- ldub [%o0], %g7 ! LSU Group
- ldub [%o1], %g3 ! LSU Group
- sub %o2, 1, %o2 ! IEU0
- add %o0, 1, %o0 ! IEU1
- add %o1, 1, %o1 ! IEU0 Group
- subcc %g7, %g3, %g3 ! IEU1 Group
- be,pt %icc, loop ! CTI
- cmp %o2, 0 ! IEU1 Group
-
-ret_n0: retl
- mov %g3, %o0
-ret_0: retl
- mov 0, %o0