{
__asm_copy_from_user_1 (dst, src, retn);
n--;
+ if (retn)
+ goto exception;
}
if (((unsigned long) src & 2) && n >= 2)
{
__asm_copy_from_user_2 (dst, src, retn);
n -= 2;
+ if (retn)
+ goto exception;
}
-
- /* We only need one check after the unalignment-adjustments, because
- if both adjustments were done, either both or neither reference
- had an exception. */
- if (retn != 0)
- goto copy_exception_bytes;
}
/* Decide which copying method to use. */
n -= 4;
if (retn)
- goto copy_exception_bytes;
+ goto exception;
}
/* If we get here, there were no memory read faults. */
bytes. */
return retn;
-copy_exception_bytes:
- /* We already have "retn" bytes cleared, and need to clear the
- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
- memset is preferred here, since this isn't speed-critical code and
- we'd rather have this a leaf-function than calling memset. */
- {
- char *endp;
- for (endp = dst + n; dst < endp; dst++)
- *dst = 0;
- }
-
+exception:
return retn + n;
}
EXPORT_SYMBOL(__copy_user_zeroing);
{
__asm_copy_from_user_1 (dst, src, retn);
n--;
+ if (retn != 0)
+ goto exception;
}
if (((unsigned long) src & 2) && n >= 2)
{
__asm_copy_from_user_2 (dst, src, retn);
n -= 2;
+ if (retn != 0)
+ goto exception;
}
- /* We only need one check after the unalignment-adjustments, because
- if both adjustments were done, either both or neither reference
- had an exception. */
- if (retn != 0)
- goto copy_exception_bytes;
}
/* Movem is dirt cheap. The overheap is low enough to always use the
n -= 4;
if (retn)
- goto copy_exception_bytes;
+ goto exception;
}
/* If we get here, there were no memory read faults. */
bytes. */
return retn;
-copy_exception_bytes:
- /* We already have "retn" bytes cleared, and need to clear the
- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
- memset is preferred here, since this isn't speed-critical code and
- we'd rather have this a leaf-function than calling memset. */
- {
- char *endp;
- for (endp = dst + n; dst < endp; dst++)
- *dst = 0;
- }
-
+exception:
return retn + n;
}
EXPORT_SYMBOL(__copy_user_zeroing);
__asm_copy_user_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"2: move.b $r9,[%0+]\n", \
- "3: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "3: addq 1,%2\n", \
" .dword 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" move.w [%1+],$r9\n" \
"2: move.w $r9,[%0+]\n" COPY, \
- "3: addq 2,%2\n" \
- " clear.w [%0+]\n" FIXUP, \
+ "3: addq 2,%2\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
__asm_copy_from_user_2x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"4: move.b $r9,[%0+]\n", \
- "5: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "5: addq 1,%2\n", \
" .dword 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
"2: move.d $r9,[%0+]\n" COPY, \
- "3: addq 4,%2\n" \
- " clear.d [%0+]\n" FIXUP, \
+ "3: addq 4,%2\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
"2: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \
"3: addq 1,%2\n" \
- " jump 1b\n" \
- " clear.b [%0+]\n", \
+ " jump 1b\n", \
" .dword 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
" move.w $acr,[%0+]\n", \
FIXUP \
"3: addq 2,%2\n" \
- " jump 1b\n" \
- " clear.w [%0+]\n", \
+ " jump 1b\n", \
TENTRY \
" .dword 2b,3b\n")
__asm_copy_from_user_2x_cont(to, from, ret, \
"4: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \
- "5: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "5: addq 1,%2\n", \
" .dword 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
" move.d $acr,[%0+]\n", \
FIXUP \
"3: addq 4,%2\n" \
- " jump 1b\n" \
- " clear.d [%0+]\n", \
+ " jump 1b\n", \
TENTRY \
" .dword 2b,3b\n")