* primary cache.
* o doesn't disable interrupts on the local CPU
*/
-static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
- int wait)
+static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
{
preempt_disable();
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
- smp_call_function(func, info, wait);
+ smp_call_function(func, info, 1);
#endif
func(info);
preempt_enable();
static void r4k___flush_cache_all(void)
{
- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
+ r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
}
static inline int has_valid_asid(const struct mm_struct *mm)
int exec = vma->vm_flags & VM_EXEC;
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
}
static inline void local_r4k_flush_cache_mm(void * args)
if (!cpu_has_dc_aliases)
return;
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
}
struct flush_cache_page_args {
args.addr = addr;
args.pfn = pfn;
- r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
}
static inline void local_r4k_flush_data_cache_page(void * addr)
if (in_atomic())
local_r4k_flush_data_cache_page((void *)addr);
else
- r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
- 1);
+ r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
}
struct flush_icache_range_args {
args.start = start;
args.end = end;
- r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
+ r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
instruction_hazard();
}
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
- r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
}
static void r4k_flush_icache_all(void)