{
/* a single tce can't cross a cache line */
if (cpu_has_clflush)
- asm volatile("clflush (%0)" :: "r" (tceaddr));
+ clflush(tceaddr);
else
- asm volatile("wbinvd":::"memory");
+ wbinvd();
}
void tce_build(struct iommu_table *tbl, unsigned long index,
static void cache_flush_page(struct page *p)
{
- unsigned long adr = (unsigned long)page_address(p);
+ void *adr = page_address(p);
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- asm volatile("clflush (%0)" :: "r" (adr + i));
+ clflush(adr+i);
}
static void flush_kernel_map(void *arg)
{
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- asm volatile("clflush (%0)" :: "r" (adr + i));
+ clflush(adr+i);
}
static void flush_kernel_map(void *arg)
SetPageReserved(virt_to_page((char *)page));
for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
- asm volatile("clflush %0" : : "m" (*(char *)(page+offset)));
+ clflush((char *)page+offset);
efficeon_private.l1_table[index] = page;
*page = insert;
/* clflush is slow, so don't clflush until we have to */
- if ( last_page &&
- ((unsigned long)page^(unsigned long)last_page) & clflush_mask )
- asm volatile("clflush %0" : : "m" (*last_page));
+ if (last_page &&
+ (((unsigned long)page^(unsigned long)last_page) &
+ clflush_mask))
+ clflush(last_page);
last_page = page;
}
if ( last_page )
- asm volatile("clflush %0" : : "m" (*last_page));
+ clflush(last_page);
agp_bridge->driver->tlb_flush(mem);
return 0;
asm volatile("wbinvd": : :"memory");
}
+static inline void clflush(volatile void *__p)
+{
+ asm volatile("clflush %0" : "+m" (*(char __force *)__p));
+}
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif /* __KERNEL__ */
+static inline void clflush(volatile void *__p)
+{
+ asm volatile("clflush %0" : "+m" (*(char __force *)__p));
+}
+
#define nop() __asm__ __volatile__ ("nop")
#ifdef CONFIG_SMP