powerpc/mm: Introduce _PAGE_LARGE software pte bits
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Mon, 28 Nov 2016 06:17:00 +0000 (11:47 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 28 Nov 2016 11:37:41 +0000 (22:37 +1100)
This patch adds a new software defined pte bit. We use the reserved
fields of ISA 3.0 pte definition since we will only be using this on DD1
code paths. We can possibly look at removing this code later.

The software bit will be used to differentiate between 64K/4K and 2M
ptes. This helps in finding the page size mapping by a pte so that we
can do efficient tlb flush.

We don't support 1G hugetlb pages yet. So we add a DEBUG WARN_ON to
catch wrong usage.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/hugetlb.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/book3s/64/radix.h

index d9c283f95e051d2cf261c8ca5ae6356352449e9d..c62f14d0bec191e75bd1710020485324173b60ac 100644 (file)
@@ -30,4 +30,24 @@ static inline int hstate_get_psize(struct hstate *hstate)
                return mmu_virtual_psize;
        }
 }
+
+#define arch_make_huge_pte arch_make_huge_pte
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+                                      struct page *page, int writable)
+{
+       unsigned long page_shift;
+
+       if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
+               return entry;
+
+       page_shift = huge_page_shift(hstate_vma(vma));
+       /*
+        * We don't support 1G hugetlb pages yet.
+        */
+       VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
+       if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
+               return __pte(pte_val(entry) | _PAGE_LARGE);
+       else
+               return entry;
+}
 #endif
index 9fd77f8794a0deeebf14ad922ab9869a8d428ebb..ac66d05a7c01ce62f348e73294a2b351bafb5580 100644 (file)
 #define _RPAGE_SW1             0x00800
 #define _RPAGE_SW2             0x00400
 #define _RPAGE_SW3             0x00200
+#define _RPAGE_RSV1            0x1000000000000000UL
+#define _RPAGE_RSV2            0x0800000000000000UL
+#define _RPAGE_RSV3            0x0400000000000000UL
+#define _RPAGE_RSV4            0x0200000000000000UL
+
 #ifdef CONFIG_MEM_SOFT_DIRTY
 #define _PAGE_SOFT_DIRTY       _RPAGE_SW3 /* software: software dirty tracking */
 #else
 #endif
 #define _PAGE_SPECIAL          _RPAGE_SW2 /* software: special page */
 
+/*
+ * For P9 DD1 only, we need to track whether the pte's huge.
+ */
+#define _PAGE_LARGE    _RPAGE_RSV1
+
 
 #define _PAGE_PTE              (1ul << 62)     /* distinguishes PTEs from pointers */
 #define _PAGE_PRESENT          (1ul << 63)     /* pte contains a translation */
index 2a46dea8e1b18c46299c9cc9f3eef4ed7d4ef35b..d2c5c064e26614f2491072e65ba61a0eb38e219c 100644 (file)
@@ -243,6 +243,8 @@ static inline int radix__pmd_trans_huge(pmd_t pmd)
 
 static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
 {
+       if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+               return __pmd(pmd_val(pmd) | _PAGE_PTE | _PAGE_LARGE);
        return __pmd(pmd_val(pmd) | _PAGE_PTE);
 }
 static inline void radix__pmdp_huge_split_prepare(struct vm_area_struct *vma,