cris: get rid of zeroing
authorAl Viro <viro@zeniv.linux.org.uk>
Sun, 19 Mar 2017 19:25:35 +0000 (15:25 -0400)
committerAl Viro <viro@zeniv.linux.org.uk>
Tue, 28 Mar 2017 22:23:30 +0000 (18:23 -0400)
... the rest of it

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
arch/cris/arch-v10/lib/usercopy.c
arch/cris/arch-v32/lib/usercopy.c
arch/cris/include/arch-v10/arch/uaccess.h
arch/cris/include/arch-v32/arch/uaccess.h

index 1ba7cc000dfc80266a7beb53ecb6c19386f989eb..80b72199bdc4c5a5a6275ee0fd467584b1026213 100644 (file)
@@ -217,19 +217,17 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
     {
       __asm_copy_from_user_1 (dst, src, retn);
       n--;
+      if (retn)
+         goto exception;
     }
 
     if (((unsigned long) src & 2) && n >= 2)
     {
       __asm_copy_from_user_2 (dst, src, retn);
       n -= 2;
+      if (retn)
+         goto exception;
     }
-
-    /* We only need one check after the unalignment-adjustments, because
-       if both adjustments were done, either both or neither reference
-       had an exception.  */
-    if (retn != 0)
-      goto copy_exception_bytes;
   }
 
   /* Decide which copying method to use. */
@@ -328,7 +326,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
     n -= 4;
 
     if (retn)
-      goto copy_exception_bytes;
+      goto exception;
   }
 
   /* If we get here, there were no memory read faults.  */
@@ -356,17 +354,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
      bytes.  */
   return retn;
 
-copy_exception_bytes:
-  /* We already have "retn" bytes cleared, and need to clear the
-     remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
-     memset is preferred here, since this isn't speed-critical code and
-     we'd rather have this a leaf-function than calling memset.  */
-  {
-    char *endp;
-    for (endp = dst + n; dst < endp; dst++)
-      *dst = 0;
-  }
-
+exception:
   return retn + n;
 }
 EXPORT_SYMBOL(__copy_user_zeroing);
index 05e58dab800d58abb8c2407a21e1beeb37a66f77..25f421f98858a331b9dc45094bdb44d138858e23 100644 (file)
@@ -184,19 +184,18 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
     {
       __asm_copy_from_user_1 (dst, src, retn);
       n--;
+      if (retn != 0)
+        goto exception;
     }
 
     if (((unsigned long) src & 2) && n >= 2)
     {
       __asm_copy_from_user_2 (dst, src, retn);
       n -= 2;
+      if (retn != 0)
+        goto exception;
     }
 
-    /* We only need one check after the unalignment-adjustments, because
-       if both adjustments were done, either both or neither reference
-       had an exception.  */
-    if (retn != 0)
-      goto copy_exception_bytes;
   }
 
   /* Movem is dirt cheap.  The overheap is low enough to always use the
@@ -279,7 +278,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
     n -= 4;
 
     if (retn)
-      goto copy_exception_bytes;
+      goto exception;
   }
 
   /* If we get here, there were no memory read faults.  */
@@ -307,17 +306,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
      bytes.  */
   return retn;
 
-copy_exception_bytes:
-  /* We already have "retn" bytes cleared, and need to clear the
-     remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
-     memset is preferred here, since this isn't speed-critical code and
-     we'd rather have this a leaf-function than calling memset.  */
-  {
-    char *endp;
-    for (endp = dst + n; dst < endp; dst++)
-      *dst = 0;
-  }
-
+exception:
   return retn + n;
 }
 EXPORT_SYMBOL(__copy_user_zeroing);
index f68d3d19df721633bb37bb3539e3432dbd61033d..5477c98c2281d1850ed853b35733fe3a8b787dc0 100644 (file)
@@ -172,16 +172,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
        __asm_copy_user_cont(to, from, ret,     \
                "       move.b [%1+],$r9\n"     \
                "2:     move.b $r9,[%0+]\n",    \
-               "3:     addq 1,%2\n"            \
-               "       clear.b [%0+]\n",       \
+               "3:     addq 1,%2\n",           \
                "       .dword 2b,3b\n")
 
 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
        __asm_copy_user_cont(to, from, ret,             \
                "       move.w [%1+],$r9\n"             \
                "2:     move.w $r9,[%0+]\n" COPY,       \
-               "3:     addq 2,%2\n"                    \
-               "       clear.w [%0+]\n" FIXUP,         \
+               "3:     addq 2,%2\n" FIXUP,             \
                "       .dword 2b,3b\n" TENTRY)
 
 #define __asm_copy_from_user_2(to, from, ret) \
@@ -191,16 +189,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
        __asm_copy_from_user_2x_cont(to, from, ret,     \
                "       move.b [%1+],$r9\n"             \
                "4:     move.b $r9,[%0+]\n",            \
-               "5:     addq 1,%2\n"                    \
-               "       clear.b [%0+]\n",               \
+               "5:     addq 1,%2\n",                   \
                "       .dword 4b,5b\n")
 
 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
        __asm_copy_user_cont(to, from, ret,             \
                "       move.d [%1+],$r9\n"             \
                "2:     move.d $r9,[%0+]\n" COPY,       \
-               "3:     addq 4,%2\n"                    \
-               "       clear.d [%0+]\n" FIXUP,         \
+               "3:     addq 4,%2\n" FIXUP,             \
                "       .dword 2b,3b\n" TENTRY)
 
 #define __asm_copy_from_user_4(to, from, ret) \
index 7a0032a4b9fbcf36ece697f4514d034424fc8222..dc2ce090f624bf7fda006a7fef32b1736bdd5b9d 100644 (file)
@@ -178,8 +178,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
                "2:     move.b [%1+],$acr\n"    \
                "       move.b $acr,[%0+]\n",   \
                "3:     addq 1,%2\n"            \
-               "       jump 1b\n"              \
-               "       clear.b [%0+]\n",       \
+               "       jump 1b\n",             \
                "       .dword 2b,3b\n")
 
 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -189,8 +188,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
                "       move.w $acr,[%0+]\n",           \
                        FIXUP                           \
                "3:     addq 2,%2\n"                    \
-               "       jump 1b\n"                      \
-               "       clear.w [%0+]\n",               \
+               "       jump 1b\n",                     \
                        TENTRY                          \
                "       .dword 2b,3b\n")
 
@@ -201,8 +199,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
        __asm_copy_from_user_2x_cont(to, from, ret,     \
                "4:     move.b [%1+],$acr\n"            \
                "       move.b $acr,[%0+]\n",           \
-               "5:     addq 1,%2\n"                    \
-               "       clear.b [%0+]\n",               \
+               "5:     addq 1,%2\n",                   \
                "       .dword 4b,5b\n")
 
 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -212,8 +209,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
                "       move.d $acr,[%0+]\n",           \
                        FIXUP                           \
                "3:     addq 4,%2\n"                    \
-               "       jump 1b\n"                      \
-               "       clear.d [%0+]\n",               \
+               "       jump 1b\n",                     \
                        TENTRY                          \
                "       .dword 2b,3b\n")