x86: put movsl_mask into uaccess.h.
authorGlauber Costa <gcosta@redhat.com>
Wed, 25 Jun 2008 17:53:41 +0000 (14:53 -0300)
committerIngo Molnar <mingo@elte.hu>
Wed, 9 Jul 2008 07:14:28 +0000 (09:14 +0200)
x86_64 does not need it, but it won't have X86_INTEL_USERCOPY
defined either.

Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/asm-x86/uaccess.h
include/asm-x86/uaccess_32.h

index 7c7b46af63688276139dae418d531b0d3841b3b8..0c4ab788d0171ad07131a8271a266f9e22eabe32 100644 (file)
@@ -432,6 +432,15 @@ struct __large_struct { unsigned long buf[100]; };
 #define __get_user_unaligned __get_user
 #define __put_user_unaligned __put_user
 
+/*
+ * movsl can be slow when source and dest are not both 8-byte aligned
+ */
+#ifdef CONFIG_X86_INTEL_USERCOPY
+extern struct movsl_mask {
+       int mask;
+} ____cacheline_aligned_in_smp movsl_mask;
+#endif
+
 #ifdef CONFIG_X86_32
 # include "uaccess_32.h"
 #else
index d3b5bf88ea8638094083e33d678c16a0fb6f1696..3467749c6beb7253f3bc3919739f8a1ff9f26478 100644 (file)
 #include <asm/asm.h>
 #include <asm/page.h>
 
-/*
- * movsl can be slow when source and dest are not both 8-byte aligned
- */
-#ifdef CONFIG_X86_INTEL_USERCOPY
-extern struct movsl_mask {
-       int mask;
-} ____cacheline_aligned_in_smp movsl_mask;
-#endif
-
 unsigned long __must_check __copy_to_user_ll
                (void __user *to, const void *from, unsigned long n);
 unsigned long __must_check __copy_from_user_ll