powerpc/mm/radix: Flush page walk cache when freeing page table
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Wed, 8 Jun 2016 14:25:51 +0000 (19:55 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Fri, 10 Jun 2016 06:14:52 +0000 (16:14 +1000)
Even though a tlb_flush() does a flush with invalidate all cache,
we can end up doing an RCU page table free before calling tlb_flush().
That means we can have page walk cache entries even after we free the
page table pages. This can result in us doing wrong page table walk.

Avoid this by doing pwc flush on every page table free. We can't batch
the pwc flush, because the rcu call back function where we free the
page table pages doesn't have information of the mmu gather. Thus we
have to do a pwc on every page table page freed.

Note: I also removed the dummy tlb_flush_pgtable call functions for
hash 32.

Fixes: 1a472c9dba6b ("powerpc/mm/radix: Add tlbflush routines")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/32/pgalloc.h
arch/powerpc/include/asm/book3s/64/pgalloc.h
arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
arch/powerpc/include/asm/book3s/64/tlbflush.h
arch/powerpc/include/asm/book3s/pgalloc.h
arch/powerpc/mm/tlb-radix.c

index a2350194fc7640bf96a3ec42f12085efc63a5711..8e21bb492dca038358f6ca7a10d69a783cd6364c 100644 (file)
@@ -102,7 +102,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       tlb_flush_pgtable(tlb, address);
        pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
index 488279edb1f045e00cb8bf2261367d319ad862ea..26eb2cb80c4e0fc6ef972f8635531445f112536c 100644 (file)
@@ -110,6 +110,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
                                   unsigned long address)
 {
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
         pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
 }
 
@@ -127,6 +132,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
                                   unsigned long address)
 {
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
         return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
 }
 
@@ -198,7 +208,11 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       tlb_flush_pgtable(tlb, address);
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
        pgtable_free_tlb(tlb, table, 0);
 }
 
index 13ef38828dfee2fec57551bf2154aabbac3422a4..3fa94fcac6284e20e51566367b58c027ef2e3716 100644 (file)
@@ -18,16 +18,19 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
 extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                                    unsigned long ap, int nid);
+extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
 extern void radix__tlb_flush(struct mmu_gather *tlb);
 #ifdef CONFIG_SMP
 extern void radix__flush_tlb_mm(struct mm_struct *mm);
 extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                              unsigned long ap, int nid);
+extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
 #else
 #define radix__flush_tlb_mm(mm)                radix__local_flush_tlb_mm(mm)
 #define radix__flush_tlb_page(vma,addr)        radix__local_flush_tlb_page(vma,addr)
 #define radix___flush_tlb_page(mm,addr,p,i)    radix___local_flush_tlb_page(mm,addr,p,i)
+#define radix__flush_tlb_pwc(tlb, addr)        radix__local_flush_tlb_pwc(tlb, addr)
 #endif
 
 #endif
index d98424ae356c49356c459334468c488b786b9be8..96e5769b18b00fe8fbcd7d788c3c94457d15a98a 100644 (file)
@@ -72,5 +72,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 #define flush_tlb_mm(mm)               local_flush_tlb_mm(mm)
 #define flush_tlb_page(vma, addr)      local_flush_tlb_page(vma, addr)
 #endif /* CONFIG_SMP */
+/*
+ * flush the page walk cache for the address
+ */
+static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
+{
+       /*
+        * Flush the page table walk cache on freeing a page table. We already
+        * have marked the upper/higher level page table entry none by now.
+        * So it is safe to flush PWC here.
+        */
+       if (!radix_enabled())
+               return;
 
+       radix__flush_tlb_pwc(tlb, address);
+}
 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
index 54f591e9572eeb4ea6c3ad99d4f397034e6acbe9..c0a69ae922567b5fc819069e7e7d150992417866 100644 (file)
@@ -4,11 +4,6 @@
 #include <linux/mm.h>
 
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
-                                    unsigned long address)
-{
-
-}
 
 #ifdef CONFIG_PPC64
 #include <asm/book3s/64/pgalloc.h>
index 71083621884ebeb49e731cb931ba446d53f02fa6..ab2f60e812e2826badd47b0f03fd0eee12ed0051 100644 (file)
@@ -128,6 +128,21 @@ void radix__local_flush_tlb_mm(struct mm_struct *mm)
 }
 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
 
+void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+       unsigned long pid;
+       struct mm_struct *mm = tlb->mm;
+
+       preempt_disable();
+
+       pid = mm->context.id;
+       if (pid != MMU_NO_CONTEXT)
+               _tlbiel_pid(pid, RIC_FLUSH_PWC);
+
+       preempt_enable();
+}
+EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
+
 void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                            unsigned long ap, int nid)
 {
@@ -183,6 +198,32 @@ no_context:
 }
 EXPORT_SYMBOL(radix__flush_tlb_mm);
 
+void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+       unsigned long pid;
+       struct mm_struct *mm = tlb->mm;
+
+       preempt_disable();
+
+       pid = mm->context.id;
+       if (unlikely(pid == MMU_NO_CONTEXT))
+               goto no_context;
+
+       if (!mm_is_core_local(mm)) {
+               int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+               if (lock_tlbie)
+                       raw_spin_lock(&native_tlbie_lock);
+               _tlbie_pid(pid, RIC_FLUSH_PWC);
+               if (lock_tlbie)
+                       raw_spin_unlock(&native_tlbie_lock);
+       } else
+               _tlbiel_pid(pid, RIC_FLUSH_PWC);
+no_context:
+       preempt_enable();
+}
+EXPORT_SYMBOL(radix__flush_tlb_pwc);
+
 void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                       unsigned long ap, int nid)
 {