powerpc/mm/hugetlb: Add flush_hugetlb_tlb_range
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Wed, 13 Jul 2016 09:36:43 +0000 (15:06 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 1 Aug 2016 01:15:13 +0000 (11:15 +1000)
Some archs like ppc64 need to do special things when flushing tlb for
hugepage. Add a new helper to flush hugetlb tlb range. This helps us to
avoid flushing the entire tlb mapping for the pid.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
arch/powerpc/include/asm/book3s/64/tlbflush.h
arch/powerpc/mm/hugetlbpage-radix.c
mm/hugetlb.c

index 10eb0d1e31404c2435484d6b1bef491eed5a2646..65037762b1205d4aedf10dac62ba9388f15c5edc 100644 (file)
@@ -10,6 +10,8 @@ static inline int mmu_get_ap(int psize)
        return mmu_psize_defs[psize].ap;
 }
 
+extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+                                          unsigned long start, unsigned long end);
 extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
                                         unsigned long end, int psize);
 extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
index 0790c4e92a64520c55d472aa7d77195df8e746b7..146b269de8fad78d799154cc4d59edb698f7dc3d 100644 (file)
@@ -16,6 +16,16 @@ static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
        return hash__flush_tlb_range(vma, start, end);
 }
 
+#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+                                          unsigned long start,
+                                          unsigned long end)
+{
+       if (radix_enabled())
+               return radix__flush_hugetlb_tlb_range(vma, start, end);
+       return hash__flush_tlb_range(vma, start, end);
+}
+
 static inline void flush_tlb_range(struct vm_area_struct *vma,
                                   unsigned long start, unsigned long end)
 {
index 1eca0deaf89b0c9683c53aca4eed1117d3d88506..35254a6784561b6f5f70399822cb163f8e59b14d 100644 (file)
@@ -25,6 +25,16 @@ void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long v
        radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
 }
 
+void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                                  unsigned long end)
+{
+       int psize;
+       struct hstate *hstate = hstate_file(vma->vm_file);
+
+       psize = hstate_get_psize(hstate);
+       radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
+}
+
 /*
  * A vairant of hugetlb_get_unmapped_area doing topdown search
  * FIXME!! should we do as x86 does or non hugetlb area does ?
index f904246a8fd5ae29d0d727f9eb1ccad6b9272cca..af2d88253bfcb0a9377dc2a80b21d30c0c3ba92c 100644 (file)
@@ -3938,6 +3938,14 @@ same_page:
        return i ? i : -EFAULT;
 }
 
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+/*
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
+ * implement this.
+ */
+#define flush_hugetlb_tlb_range(vma, addr, end)        flush_tlb_range(vma, addr, end)
+#endif
+
 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                unsigned long address, unsigned long end, pgprot_t newprot)
 {
@@ -3998,7 +4006,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * once we release i_mmap_rwsem, another task can do the final put_page
         * and that page table be reused and filled with junk.
         */
-       flush_tlb_range(vma, start, end);
+       flush_hugetlb_tlb_range(vma, start, end);
        mmu_notifier_invalidate_range(mm, start, end);
        i_mmap_unlock_write(vma->vm_file->f_mapping);
        mmu_notifier_invalidate_range_end(mm, start, end);