sparc64: Fix huge PMD invalidation.
authorDavid S. Miller <davem@davemloft.net>
Thu, 24 Apr 2014 20:58:02 +0000 (13:58 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 4 May 2014 05:31:52 +0000 (22:31 -0700)
On sparc64 "present" and "valid" are seperate PTE bits, this allows us to
naturally distinguish between the user explicitly asking for PROT_NONE
with mprotect() and other situations.

However we weren't handling this properly in the huge PMD paths.

First of all, the page table walker in the TSB miss path only checks
for _PAGE_PMD_HUGE.  So the generic pmdp_invalidate() would clear
_PAGE_PRESENT but the TLB miss paths would still load it into the TLB
as a valid huge PMD.

Fix this by clearing the valid bit in pmdp_invalidate(), and also
checking the valid bit in USER_PGTABLE_CHECK_PMD_HUGE using "brgez"
since _PAGE_VALID is bit 63 in both the sun4u and sun4v pte layouts.

Signed-off-by: David S. Miller <davem@davemloft.net>
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/tsb.h
arch/sparc/mm/tlb.c

index 0f9e94537eee78d9d41fffe82e32fc0098811ed2..e3db4b8e689b2e2f175ef3afdc8db069e6859764 100644 (file)
@@ -719,20 +719,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
        return __pmd(pte_val(pte));
 }
 
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-{
-       unsigned long mask;
-
-       if (tlb_type == hypervisor)
-               mask = _PAGE_PRESENT_4V;
-       else
-               mask = _PAGE_PRESENT_4U;
-
-       pmd_val(pmd) &= ~mask;
-
-       return pmd;
-}
-
 static inline pmd_t pmd_mksplitting(pmd_t pmd)
 {
        pte_t pte = __pte(pmd_val(pmd));
@@ -893,6 +879,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
 extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
                                 pmd_t *pmd);
 
+#define __HAVE_ARCH_PMDP_INVALIDATE
+extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+                           pmd_t *pmdp);
+
 #define __HAVE_ARCH_PGTABLE_DEPOSIT
 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
                                       pgtable_t pgtable);
index 2230f80d9fe326dc48576e83a721fcf106efeaf0..90916f955cac80153c59dc74a768c3fdef3fae56 100644 (file)
@@ -171,7 +171,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
        andcc           REG1, REG2, %g0;                \
        be,pt           %xcc, 700f;                     \
         sethi          %hi(4 * 1024 * 1024), REG2;     \
-       andn            REG1, REG2, REG1;               \
+       brgez,pn        REG1, FAIL_LABEL;               \
+        andn           REG1, REG2, REG1;               \
        and             VADDR, REG2, REG2;              \
        brlz,pt         REG1, PTE_LABEL;                \
         or             REG1, REG2, REG1;               \
index c07667fcd66eb7c9a88b0226452d63ea298ccc70..b89aba217e3b5105c3fae5737cca2f279db4dc95 100644 (file)
@@ -193,6 +193,17 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
        }
 }
 
+void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+                    pmd_t *pmdp)
+{
+       pmd_t entry = *pmdp;
+
+       pmd_val(entry) &= ~_PAGE_VALID;
+
+       set_pmd_at(vma->vm_mm, address, pmdp, entry);
+       flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+}
+
 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
                                pgtable_t pgtable)
 {