From ae2e15eb3b6c2a011bee615470bf52d2beb99a4b Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 30 Jan 2008 13:31:40 +0100 Subject: [PATCH] x86: unify prefetch operations This patch moves the prefetch[w]? functions to processor.h Signed-off-by: Glauber de Oliveira Costa Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/processor.h | 30 ++++++++++++++++++++++++++++++ include/asm-x86/processor_32.h | 25 ------------------------- include/asm-x86/processor_64.h | 8 -------- 3 files changed, 30 insertions(+), 33 deletions(-) diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index c6b749a018a..bfac9739f57 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -596,6 +596,36 @@ extern char ignore_fpu_irq; #define ARCH_HAS_PREFETCHW #define ARCH_HAS_SPINLOCK_PREFETCH +#ifdef CONFIG_X86_32 +#define BASE_PREFETCH ASM_NOP4 +#define ARCH_HAS_PREFETCH +#else +#define BASE_PREFETCH "prefetcht0 (%1)" +#endif + +/* Prefetch instructions for Pentium III and AMD Athlon */ +/* It's not worth to care about 3dnow! prefetches for the K6 + because they are microcoded there and very slow. + However we don't do prefetches for pre XP Athlons currently + That should be fixed. */ +static inline void prefetch(const void *x) +{ + alternative_input(BASE_PREFETCH, + "prefetchnta (%1)", + X86_FEATURE_XMM, + "r" (x)); +} + +/* 3dnow! prefetch to get an exclusive cache line. Useful for + spinlocks to avoid one state transition in the cache coherency protocol. */ +static inline void prefetchw(const void *x) +{ + alternative_input(BASE_PREFETCH, + "prefetchw (%1)", + X86_FEATURE_3DNOW, + "r" (x)); +} + #define spin_lock_prefetch(x) prefetchw(x) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h index 84a4c5e47d5..61a9cae2364 100644 --- a/include/asm-x86/processor_32.h +++ b/include/asm-x86/processor_32.h @@ -228,29 +228,4 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #define ASM_NOP_MAX 8 -/* Prefetch instructions for Pentium III and AMD Athlon */ -/* It's not worth to care about 3dnow! prefetches for the K6 - because they are microcoded there and very slow. - However we don't do prefetches for pre XP Athlons currently - That should be fixed. */ -static inline void prefetch(const void *x) -{ - alternative_input(ASM_NOP4, - "prefetchnta (%1)", - X86_FEATURE_XMM, - "r" (x)); -} - -#define ARCH_HAS_PREFETCH - -/* 3dnow! prefetch to get an exclusive cache line. Useful for - spinlocks to avoid one state transition in the cache coherency protocol. */ -static inline void prefetchw(const void *x) -{ - alternative_input(ASM_NOP4, - "prefetchw (%1)", - X86_FEATURE_3DNOW, - "r" (x)); -} - #endif /* __ASM_I386_PROCESSOR_H */ diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h index 45e382989b3..08b965124b9 100644 --- a/include/asm-x86/processor_64.h +++ b/include/asm-x86/processor_64.h @@ -124,12 +124,4 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist); #define ASM_NOP_MAX 8 -static inline void prefetchw(void *x) -{ - alternative_input("prefetcht0 (%1)", - "prefetchw (%1)", - X86_FEATURE_3DNOW, - "r" (x)); -} - #endif /* __ASM_X86_64_PROCESSOR_H */ -- 2.20.1