metag/usercopy: Zero rest of buffer from copy_from_user
authorJames Hogan <james.hogan@imgtec.com>
Fri, 31 Mar 2017 10:14:02 +0000 (11:14 +0100)
committerWilly Tarreau <w@1wt.eu>
Tue, 20 Jun 2017 12:04:43 +0000 (14:04 +0200)
commit 563ddc1076109f2b3f88e6d355eab7b6fd4662cb upstream.

Currently we try to zero the destination for a failed read from userland
in fixup code in the usercopy.c macros. The rest of the destination
buffer is then zeroed from __copy_user_zeroing(), which is used for both
copy_from_user() and __copy_from_user().

Unfortunately we fail to zero in the fixup code as D1Ar1 is set to 0
before the fixup code entry labels, and __copy_from_user() shouldn't even
be zeroing the rest of the buffer.

Move the zeroing out into copy_from_user() and rename
__copy_user_zeroing() to raw_copy_from_user() since it no longer does
any zeroing. This also conveniently matches the name needed for
RAW_COPY_USER support in a later patch.

Fixes: 373cd784d0fc ("metag: Memory handling")
Reported-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: linux-metag@vger.kernel.org
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Willy Tarreau <w@1wt.eu>
arch/metag/include/asm/uaccess.h
arch/metag/lib/usercopy.c

index 7841f2290385fbe0b9ed7a8a1363ea3670197f02..9d523375f68ab00960593b5e61b70d98066f3c4c 100644 (file)
@@ -192,20 +192,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
 
 #define strlen_user(str) strnlen_user(str, 32767)
 
 
 #define strlen_user(str) strnlen_user(str, 32767)
 
-extern unsigned long __must_check __copy_user_zeroing(void *to,
-                                                     const void __user *from,
-                                                     unsigned long n);
+extern unsigned long raw_copy_from_user(void *to, const void __user *from,
+                                       unsigned long n);
 
 static inline unsigned long
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 
 static inline unsigned long
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+       unsigned long res = n;
        if (likely(access_ok(VERIFY_READ, from, n)))
        if (likely(access_ok(VERIFY_READ, from, n)))
-               return __copy_user_zeroing(to, from, n);
-       memset(to, 0, n);
-       return n;
+               res = raw_copy_from_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
 }
 
 }
 
-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
+#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
 #define __copy_from_user_inatomic __copy_from_user
 
 extern unsigned long __must_check __copy_user(void __user *to,
 #define __copy_from_user_inatomic __copy_from_user
 
 extern unsigned long __must_check __copy_user(void __user *to,
index 94095200a4ff891156945b8542579a11d788f010..2792fc621088bcd1c3d7bfe58b8560ec32e6f880 100644 (file)
@@ -29,7 +29,6 @@
                COPY                                             \
                "1:\n"                                           \
                "       .section .fixup,\"ax\"\n"                \
                COPY                                             \
                "1:\n"                                           \
                "       .section .fixup,\"ax\"\n"                \
-               "       MOV D1Ar1,#0\n"                          \
                FIXUP                                            \
                "       MOVT    D1Ar1,#HI(1b)\n"                 \
                "       JUMP    D1Ar1,#LO(1b)\n"                 \
                FIXUP                                            \
                "       MOVT    D1Ar1,#HI(1b)\n"                 \
                "       JUMP    D1Ar1,#LO(1b)\n"                 \
@@ -661,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
        __asm_copy_user_cont(to, from, ret,     \
                "       GETB D1Ar1,[%1++]\n"    \
                "2:     SETB [%0++],D1Ar1\n",   \
        __asm_copy_user_cont(to, from, ret,     \
                "       GETB D1Ar1,[%1++]\n"    \
                "2:     SETB [%0++],D1Ar1\n",   \
-               "3:     ADD  %2,%2,#1\n"        \
-               "       SETB [%0++],D1Ar1\n",   \
+               "3:     ADD  %2,%2,#1\n",       \
                "       .long 2b,3b\n")
 
 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
        __asm_copy_user_cont(to, from, ret,             \
                "       GETW D1Ar1,[%1++]\n"            \
                "2:     SETW [%0++],D1Ar1\n" COPY,      \
                "       .long 2b,3b\n")
 
 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
        __asm_copy_user_cont(to, from, ret,             \
                "       GETW D1Ar1,[%1++]\n"            \
                "2:     SETW [%0++],D1Ar1\n" COPY,      \
-               "3:     ADD  %2,%2,#2\n"                \
-               "       SETW [%0++],D1Ar1\n" FIXUP,     \
+               "3:     ADD  %2,%2,#2\n" FIXUP,         \
                "       .long 2b,3b\n" TENTRY)
 
 #define __asm_copy_from_user_2(to, from, ret) \
                "       .long 2b,3b\n" TENTRY)
 
 #define __asm_copy_from_user_2(to, from, ret) \
@@ -680,32 +677,26 @@ EXPORT_SYMBOL(__copy_user);
        __asm_copy_from_user_2x_cont(to, from, ret,     \
                "       GETB D1Ar1,[%1++]\n"            \
                "4:     SETB [%0++],D1Ar1\n",           \
        __asm_copy_from_user_2x_cont(to, from, ret,     \
                "       GETB D1Ar1,[%1++]\n"            \
                "4:     SETB [%0++],D1Ar1\n",           \
-               "5:     ADD  %2,%2,#1\n"                \
-               "       SETB [%0++],D1Ar1\n",           \
+               "5:     ADD  %2,%2,#1\n",               \
                "       .long 4b,5b\n")
 
 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
        __asm_copy_user_cont(to, from, ret,             \
                "       GETD D1Ar1,[%1++]\n"            \
                "2:     SETD [%0++],D1Ar1\n" COPY,      \
                "       .long 4b,5b\n")
 
 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
        __asm_copy_user_cont(to, from, ret,             \
                "       GETD D1Ar1,[%1++]\n"            \
                "2:     SETD [%0++],D1Ar1\n" COPY,      \
-               "3:     ADD  %2,%2,#4\n"                \
-               "       SETD [%0++],D1Ar1\n" FIXUP,     \
+               "3:     ADD  %2,%2,#4\n" FIXUP,         \
                "       .long 2b,3b\n" TENTRY)
 
 #define __asm_copy_from_user_4(to, from, ret) \
        __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
 
                "       .long 2b,3b\n" TENTRY)
 
 #define __asm_copy_from_user_4(to, from, ret) \
        __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
 
-
 #define __asm_copy_from_user_8x64(to, from, ret) \
        asm volatile (                          \
                "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
                "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
                "1:\n"                                  \
                "       .section .fixup,\"ax\"\n"       \
 #define __asm_copy_from_user_8x64(to, from, ret) \
        asm volatile (                          \
                "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
                "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
                "1:\n"                                  \
                "       .section .fixup,\"ax\"\n"       \
-               "       MOV D1Ar1,#0\n"                 \
-               "       MOV D0Ar2,#0\n"                 \
                "3:     ADD  %2,%2,#8\n"                \
                "3:     ADD  %2,%2,#8\n"                \
-               "       SETL [%0++],D0Ar2,D1Ar1\n"      \
                "       MOVT    D0Ar2,#HI(1b)\n"        \
                "       JUMP    D0Ar2,#LO(1b)\n"        \
                "       .previous\n"                    \
                "       MOVT    D0Ar2,#HI(1b)\n"        \
                "       JUMP    D0Ar2,#LO(1b)\n"        \
                "       .previous\n"                    \
@@ -765,11 +756,12 @@ EXPORT_SYMBOL(__copy_user);
                "SUB    %1, %1, D0Ar2\n")
 
 
                "SUB    %1, %1, D0Ar2\n")
 
 
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
-   userland.  The return-value is the number of bytes that were
-   inaccessible.  */
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
-                                 unsigned long n)
+/*
+ * Copy from user to kernel. The return-value is the number of bytes that were
+ * inaccessible.
+ */
+unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
+                                unsigned long n)
 {
        register char *dst asm ("A0.2") = pdst;
        register const char __user *src asm ("A1.2") = psrc;
 {
        register char *dst asm ("A0.2") = pdst;
        register const char __user *src asm ("A1.2") = psrc;
@@ -782,7 +774,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
                __asm_copy_from_user_1(dst, src, retn);
                n--;
                if (retn)
                __asm_copy_from_user_1(dst, src, retn);
                n--;
                if (retn)
-                       goto copy_exception_bytes;
+                       return retn + n;
        }
        if ((unsigned long) dst & 1) {
                /* Worst case - byte copy */
        }
        if ((unsigned long) dst & 1) {
                /* Worst case - byte copy */
@@ -790,14 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
                        __asm_copy_from_user_1(dst, src, retn);
                        n--;
                        if (retn)
                        __asm_copy_from_user_1(dst, src, retn);
                        n--;
                        if (retn)
-                               goto copy_exception_bytes;
+                               return retn + n;
                }
        }
        if (((unsigned long) src & 2) && n >= 2) {
                __asm_copy_from_user_2(dst, src, retn);
                n -= 2;
                if (retn)
                }
        }
        if (((unsigned long) src & 2) && n >= 2) {
                __asm_copy_from_user_2(dst, src, retn);
                n -= 2;
                if (retn)
-                       goto copy_exception_bytes;
+                       return retn + n;
        }
        if ((unsigned long) dst & 2) {
                /* Second worst case - word copy */
        }
        if ((unsigned long) dst & 2) {
                /* Second worst case - word copy */
@@ -805,7 +797,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
                        __asm_copy_from_user_2(dst, src, retn);
                        n -= 2;
                        if (retn)
                        __asm_copy_from_user_2(dst, src, retn);
                        n -= 2;
                        if (retn)
-                               goto copy_exception_bytes;
+                               return retn + n;
                }
        }
 
                }
        }
 
@@ -821,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
                        __asm_copy_from_user_8x64(dst, src, retn);
                        n -= 8;
                        if (retn)
                        __asm_copy_from_user_8x64(dst, src, retn);
                        n -= 8;
                        if (retn)
-                               goto copy_exception_bytes;
+                               return retn + n;
                }
        }
 
                }
        }
 
@@ -837,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
                        __asm_copy_from_user_8x64(dst, src, retn);
                        n -= 8;
                        if (retn)
                        __asm_copy_from_user_8x64(dst, src, retn);
                        n -= 8;
                        if (retn)
-                               goto copy_exception_bytes;
+                               return retn + n;
                }
        }
 #endif
                }
        }
 #endif
@@ -847,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
                n -= 4;
 
                if (retn)
                n -= 4;
 
                if (retn)
-                       goto copy_exception_bytes;
+                       return retn + n;
        }
 
        /* If we get here, there were no memory read faults.  */
        }
 
        /* If we get here, there were no memory read faults.  */
@@ -873,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
        /* If we get here, retn correctly reflects the number of failing
           bytes.  */
        return retn;
        /* If we get here, retn correctly reflects the number of failing
           bytes.  */
        return retn;
-
- copy_exception_bytes:
-       /* We already have "retn" bytes cleared, and need to clear the
-          remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
-          memset is preferred here, since this isn't speed-critical code and
-          we'd rather have this a leaf-function than calling memset.  */
-       {
-               char *endp;
-               for (endp = dst + n; dst < endp; dst++)
-                       *dst = 0;
-       }
-
-       return retn + n;
 }
 }
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(raw_copy_from_user);
 
 #define __asm_clear_8x64(to, ret) \
        asm volatile (                                  \
 
 #define __asm_clear_8x64(to, ret) \
        asm volatile (                                  \