amd64: get rid of zeroing
authorAl Viro <viro@zeniv.linux.org.uk>
Sat, 25 Mar 2017 22:36:22 +0000 (18:36 -0400)
committerAl Viro <viro@zeniv.linux.org.uk>
Tue, 28 Mar 2017 22:24:04 +0000 (18:24 -0400)
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_64.h
arch/x86/lib/usercopy.c
arch/x86/lib/usercopy_64.c

index 0522d88a7f90b5c6bb5694d5654f87fb0e819a56..26410afcb8b04b4f1b448c50a09e7d1261462257 100644 (file)
@@ -379,6 +379,18 @@ do {                                                                       \
                     : "=r" (err), ltype(x)                             \
                     : "m" (__m(addr)), "i" (errret), "0" (err))
 
+#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret)       \
+       asm volatile("\n"                                               \
+                    "1:        mov"itype" %2,%"rtype"1\n"              \
+                    "2:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:        mov %3,%0\n"                            \
+                    "  jmp 2b\n"                                       \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : "=r" (err), ltype(x)                             \
+                    : "m" (__m(addr)), "i" (errret), "0" (err))
+
 /*
  * This doesn't do __uaccess_begin/end - the exception handling
  * around it must do that.
index 8ddadd93639e92e8b6c57e182a438a5773b0d823..142f0f1230beee5f3868442863f9c714c429d844 100644 (file)
@@ -59,44 +59,44 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
        switch (size) {
        case 1:
                __uaccess_begin();
-               __get_user_asm(*(u8 *)dst, (u8 __user *)src,
+               __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
                              ret, "b", "b", "=q", 1);
                __uaccess_end();
                return ret;
        case 2:
                __uaccess_begin();
-               __get_user_asm(*(u16 *)dst, (u16 __user *)src,
+               __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
                              ret, "w", "w", "=r", 2);
                __uaccess_end();
                return ret;
        case 4:
                __uaccess_begin();
-               __get_user_asm(*(u32 *)dst, (u32 __user *)src,
+               __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
                              ret, "l", "k", "=r", 4);
                __uaccess_end();
                return ret;
        case 8:
                __uaccess_begin();
-               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+               __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
                              ret, "q", "", "=r", 8);
                __uaccess_end();
                return ret;
        case 10:
                __uaccess_begin();
-               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+               __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
                               ret, "q", "", "=r", 10);
                if (likely(!ret))
-                       __get_user_asm(*(u16 *)(8 + (char *)dst),
+                       __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
                                       (u16 __user *)(8 + (char __user *)src),
                                       ret, "w", "w", "=r", 2);
                __uaccess_end();
                return ret;
        case 16:
                __uaccess_begin();
-               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+               __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
                               ret, "q", "", "=r", 16);
                if (likely(!ret))
-                       __get_user_asm(*(u64 *)(8 + (char *)dst),
+                       __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
                                       (u64 __user *)(8 + (char __user *)src),
                                       ret, "q", "", "=r", 8);
                __uaccess_end();
index c074799bddae178257b0bfe78766faccac75f737..a851f3d199c22b9285155266e14d91e894492832 100644 (file)
@@ -76,10 +76,11 @@ EXPORT_SYMBOL(_copy_to_user);
  */
 unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
 {
+       unsigned long res = n;
        if (access_ok(VERIFY_READ, from, n))
-               n = __copy_from_user(to, from, n);
-       else
-               memset(to, 0, n);
-       return n;
+               res = __copy_from_user_inatomic(to, from, n);
+       if (unlikely(res))
+               memset(to + n - res, 0, res);
+       return res;
 }
 EXPORT_SYMBOL(_copy_from_user);
index 69873589c0bae771b4b94ff7d6504402b1bb28d2..6c8b6a6c1b381dbb5de0b9f3826724d03f538aa3 100644 (file)
@@ -80,9 +80,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
                        break;
        }
        clac();
-
-       /* If the destination is a kernel buffer, we always clear the end */
-       if (!__addr_ok(to))
-               memset(to, 0, len);
        return len;
 }