powerpc/mm/radix: Add radix callback for pmd accessors
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Fri, 29 Apr 2016 13:25:56 +0000 (23:25 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 1 May 2016 08:32:57 +0000 (18:32 +1000)
This only does 64K Linux page support for now. 64K hash Linux config
THP needs to differentiate it from hugetlb huge page because with THP we
need to track hash pte slot information with respect to each subpage.
This is not needed with hugetlb hugepage, because we don't do MPSS with
hugetlb.

Radix doesn't have any such restrictions.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/hash-64k.h
arch/powerpc/include/asm/book3s/64/pgtable-64k.h
arch/powerpc/include/asm/book3s/64/radix.h

index d4e1f1f5322fc4decfa4cce103f44b8b81655ec6..c00e39fa3eba380bd5b10399a908ecb73b970786 100644 (file)
@@ -97,8 +97,8 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
 
 extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
                           unsigned long pfn, unsigned long size, pgprot_t);
-static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
-                              unsigned long pfn, pgprot_t prot)
+static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
+                                unsigned long pfn, pgprot_t prot)
 {
        if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) {
                WARN(1, "remap_4k_pfn called with wrong pfn value\n");
@@ -182,14 +182,13 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
  * that for explicit huge pages.
  *
  */
-static inline int pmd_trans_huge(pmd_t pmd)
+static inline int hash__pmd_trans_huge(pmd_t pmd)
 {
        return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
                  (_PAGE_PTE | H_PAGE_THP_HUGE));
 }
 
-#define __HAVE_ARCH_PMD_SAME
-static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 {
        return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
 }
index ceadc2fd408ff759d3fea01eafbae492fe0f4334..27b5e34abe24ce93827134068408068bab628143 100644 (file)
@@ -89,6 +89,29 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
        pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
 }
 
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+       if (radix_enabled())
+               return radix__pmd_trans_huge(pmd);
+       return hash__pmd_trans_huge(pmd);
+}
+
+#define __HAVE_ARCH_PMD_SAME
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+       if (radix_enabled())
+               return radix__pmd_same(pmd_a, pmd_b);
+       return hash__pmd_same(pmd_a, pmd_b);
+}
 #endif /*  CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
+                              unsigned long pfn, pgprot_t prot)
+{
+       if (radix_enabled())
+               BUG();
+       return hash__remap_4k_pfn(vma, addr, pfn, prot);
+
+}
 #endif /* __ASSEMBLY__ */
 #endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H */
index 4b581a0ebc0a71fcb2f62367481870099022de1a..db7e678e2bb6425a46636c8a45d0220ca3ad6906 100644 (file)
@@ -121,5 +121,14 @@ static inline int radix__pgd_bad(pgd_t pgd)
        return !!(pgd_val(pgd) & RADIX_PGD_BAD_BITS);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+static inline int radix__pmd_trans_huge(pmd_t pmd)
+{
+       return !!(pmd_val(pmd) & _PAGE_PTE);
+}
+
+#endif
+
 #endif /* __ASSEMBLY__ */
 #endif