m32r: get rid of zeroing
authorAl Viro <viro@zeniv.linux.org.uk>
Sun, 19 Mar 2017 20:11:13 +0000 (16:11 -0400)
committerAl Viro <viro@zeniv.linux.org.uk>
Tue, 28 Mar 2017 22:23:38 +0000 (18:23 -0400)
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
arch/m32r/include/asm/uaccess.h
arch/m32r/lib/usercopy.c

index e47dcc05a0f2bac9e984b1878bd767c7466c34d7..d5c5e68fa2fba60cddfb9eea04e62d6f63e4f42d 100644 (file)
@@ -460,77 +460,13 @@ do {                                                                      \
                : "r14", "memory");                                     \
 } while (0)
 
-#define __copy_user_zeroing(to, from, size)                            \
-do {                                                                   \
-       unsigned long __dst, __src, __c;                                \
-       __asm__ __volatile__ (                                          \
-               "       mv      r14, %0\n"                              \
-               "       or      r14, %1\n"                              \
-               "       beq     %0, %1, 9f\n"                           \
-               "       beqz    %2, 9f\n"                               \
-               "       and3    r14, r14, #3\n"                         \
-               "       bnez    r14, 2f\n"                              \
-               "       and3    %2, %2, #3\n"                           \
-               "       beqz    %3, 2f\n"                               \
-               "       addi    %0, #-4         ; word_copy \n"         \
-               "       .fillinsn\n"                                    \
-               "0:     ld      r14, @%1+\n"                            \
-               "       addi    %3, #-1\n"                              \
-               "       .fillinsn\n"                                    \
-               "1:     st      r14, @+%0\n"                            \
-               "       bnez    %3, 0b\n"                               \
-               "       beqz    %2, 9f\n"                               \
-               "       addi    %0, #4\n"                               \
-               "       .fillinsn\n"                                    \
-               "2:     ldb     r14, @%1        ; byte_copy \n"         \
-               "       .fillinsn\n"                                    \
-               "3:     stb     r14, @%0\n"                             \
-               "       addi    %1, #1\n"                               \
-               "       addi    %2, #-1\n"                              \
-               "       addi    %0, #1\n"                               \
-               "       bnez    %2, 2b\n"                               \
-               "       .fillinsn\n"                                    \
-               "9:\n"                                                  \
-               ".section .fixup,\"ax\"\n"                              \
-               "       .balign 4\n"                                    \
-               "5:     addi    %3, #1\n"                               \
-               "       addi    %1, #-4\n"                              \
-               "       .fillinsn\n"                                    \
-               "6:     slli    %3, #2\n"                               \
-               "       add     %2, %3\n"                               \
-               "       addi    %0, #4\n"                               \
-               "       .fillinsn\n"                                    \
-               "7:     ldi     r14, #0         ; store zero \n"        \
-               "       .fillinsn\n"                                    \
-               "8:     addi    %2, #-1\n"                              \
-               "       stb     r14, @%0        ; ACE? \n"              \
-               "       addi    %0, #1\n"                               \
-               "       bnez    %2, 8b\n"                               \
-               "       seth    r14, #high(9b)\n"                       \
-               "       or3     r14, r14, #low(9b)\n"                   \
-               "       jmp     r14\n"                                  \
-               ".previous\n"                                           \
-               ".section __ex_table,\"a\"\n"                           \
-               "       .balign 4\n"                                    \
-               "       .long 0b,6b\n"                                  \
-               "       .long 1b,5b\n"                                  \
-               "       .long 2b,7b\n"                                  \
-               "       .long 3b,7b\n"                                  \
-               ".previous\n"                                           \
-               : "=&r" (__dst), "=&r" (__src), "=&r" (size),           \
-                 "=&r" (__c)                                           \
-               : "0" (to), "1" (from), "2" (size), "3" (size / 4)      \
-               : "r14", "memory");                                     \
-} while (0)
-
-
 /* We let the __ versions of copy_from/to_user inline, because they're often
  * used in fast paths and have only a small space overhead.
  */
 static inline unsigned long __generic_copy_from_user_nocheck(void *to,
        const void __user *from, unsigned long n)
 {
-       __copy_user_zeroing(to, from, n);
+       __copy_user(to, from, n);
        return n;
 }
 
index fd03f2731f2005591e224ef7b4bbc5d3073d05a7..6aacf5ba0a58302e6824d3d5b1886ee701e09556 100644 (file)
@@ -23,12 +23,13 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
 unsigned long
 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+       unsigned long ret = n;
        prefetchw(to);
        if (access_ok(VERIFY_READ, from, n))
-               __copy_user_zeroing(to,from,n);
-       else
-               memset(to, 0, n);
-       return n;
+               ret = __copy_user(to,from,n);
+       if (unlikely(ret))
+               memset(to + n - ret, 0, ret);
+       return ret;
 }