From 6619a8fb594486363783cc4a8372e4d4ee4b913e Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Wed, 17 Oct 2007 18:04:37 +0200 Subject: [PATCH] x86: Create clflush() inline, remove hardcoded wbinvd Create an inline function for clflush(), with the proper arguments, and use it instead of hard-coding the instruction. This also removes one instance of hard-coded wbinvd, based on a patch by Bauder de Oliveira Costa. [ tglx: arch/x86 adaptation ] Cc: Andi Kleen Cc: Glauber de Oliveira Costa Signed-off-by: H. Peter Anvin Signed-off-by: Andi Kleen Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/tce_64.c | 4 ++-- arch/x86/mm/pageattr_32.c | 4 ++-- arch/x86/mm/pageattr_64.c | 2 +- drivers/char/agp/efficeon-agp.c | 11 ++++++----- include/asm-x86/system_32.h | 4 ++++ include/asm-x86/system_64.h | 5 +++++ 6 files changed, 20 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c index e3f2569b2c4..9e540fee700 100644 --- a/arch/x86/kernel/tce_64.c +++ b/arch/x86/kernel/tce_64.c @@ -40,9 +40,9 @@ static inline void flush_tce(void* tceaddr) { /* a single tce can't cross a cache line */ if (cpu_has_clflush) - asm volatile("clflush (%0)" :: "r" (tceaddr)); + clflush(tceaddr); else - asm volatile("wbinvd":::"memory"); + wbinvd(); } void tce_build(struct iommu_table *tbl, unsigned long index, diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c index 4241a74d16c..260073c0760 100644 --- a/arch/x86/mm/pageattr_32.c +++ b/arch/x86/mm/pageattr_32.c @@ -70,10 +70,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, static void cache_flush_page(struct page *p) { - unsigned long adr = (unsigned long)page_address(p); + void *adr = page_address(p); int i; for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) - asm volatile("clflush (%0)" :: "r" (adr + i)); + clflush(adr+i); } static void flush_kernel_map(void *arg) diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c index 93d795d7c2a..8a4f65bf956 100644 --- a/arch/x86/mm/pageattr_64.c +++ b/arch/x86/mm/pageattr_64.c @@ -65,7 +65,7 @@ static void cache_flush_page(void *adr) { int i; for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) - asm volatile("clflush (%0)" :: "r" (adr + i)); + clflush(adr+i); } static void flush_kernel_map(void *arg) diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index d78cd09186a..cac0009cebc 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c @@ -221,7 +221,7 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge) SetPageReserved(virt_to_page((char *)page)); for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk) - asm volatile("clflush %0" : : "m" (*(char *)(page+offset))); + clflush((char *)page+offset); efficeon_private.l1_table[index] = page; @@ -268,15 +268,16 @@ static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int t *page = insert; /* clflush is slow, so don't clflush until we have to */ - if ( last_page && - ((unsigned long)page^(unsigned long)last_page) & clflush_mask ) - asm volatile("clflush %0" : : "m" (*last_page)); + if (last_page && + (((unsigned long)page^(unsigned long)last_page) & + clflush_mask)) + clflush(last_page); last_page = page; } if ( last_page ) - asm volatile("clflush %0" : : "m" (*last_page)); + clflush(last_page); agp_bridge->driver->tlb_flush(mem); return 0; diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h index 1d6fb3afa53..db6283eb5e4 100644 --- a/include/asm-x86/system_32.h +++ b/include/asm-x86/system_32.h @@ -161,6 +161,10 @@ static inline void native_wbinvd(void) asm volatile("wbinvd": : :"memory"); } +static inline void clflush(volatile void *__p) +{ + asm volatile("clflush %0" : "+m" (*(char __force *)__p)); +} #ifdef CONFIG_PARAVIRT #include diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h index fb4bcf99e66..ec4c29bcfcb 100644 --- a/include/asm-x86/system_64.h +++ b/include/asm-x86/system_64.h @@ -137,6 +137,11 @@ static inline void write_cr8(unsigned long val) #endif /* __KERNEL__ */ +static inline void clflush(volatile void *__p) +{ + asm volatile("clflush %0" : "+m" (*(char __force *)__p)); +} + #define nop() __asm__ __volatile__ ("nop") #ifdef CONFIG_SMP -- 2.20.1