BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
- flush_tlb_page(NULL, vaddr);
+ local_flush_tlb_page(vaddr);
return (void*) vaddr;
}
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
- flush_tlb_page(NULL, vaddr);
+ local_flush_tlb_page(vaddr);
#endif
pagefault_enable();
}
*
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
+ * - local_flush_tlb_page(vmaddr) flushes one page on the local processor
* - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
_tlbil_pid(mm->context.id);
}
+static inline void local_flush_tlb_page(unsigned long vmaddr)
+{
+ _tlbil_va(vmaddr, 0);
+}
+
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+static inline void local_flush_tlb_page(unsigned long vmaddr)
+{
+ flush_tlb_page(NULL, vmaddr);
+}
#else
/*
{
}
+static inline void local_flush_tlb_page(unsigned long vmaddr)
+{
+}
+
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{