When we switched to big endian page table, we never updated the hugepd
format such that it can work for both big endian and little endian
config. This patch series update hugepd format such that it is looked at
as __be64 value in big endian page table config.
This patch also switch hugepd_t.pd from signed long to unsigned long.
I did update the FSL hugepd_ok check to check for the top bit instead
of checking > 0.
Fixes:
5dc1ef858c12 ("powerpc/mm: Use big endian Linux page tables for book3s 64")
Cc: stable@vger.kernel.org # v4.7+
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
#ifdef CONFIG_HUGETLB_PAGE
static inline int hash__hugepd_ok(hugepd_t hpd)
{
+ unsigned long hpdval = hpd_val(hpd);
/*
* if it is not a pte and have hugepd shift mask
* set, then it is a hugepd directory pointer
*/
- if (!(hpd.pd & _PAGE_PTE) &&
- ((hpd.pd & HUGEPD_SHIFT_MASK) != 0))
+ if (!(hpdval & _PAGE_PTE) &&
+ ((hpdval & HUGEPD_SHIFT_MASK) != 0))
return true;
return false;
}
* We have only four bits to encode, MMU page size
*/
BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
- return __va(hpd.pd & HUGEPD_ADDR_MASK);
+ return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
}
static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
{
- return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
+ return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
}
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
BUG_ON(!hugepd_ok(hpd));
#ifdef CONFIG_PPC_8xx
- return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
+ return (pte_t *)__va(hpd_val(hpd) &
+ ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
#else
- return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+ return (pte_t *)((hpd_val(hpd) &
+ ~HUGEPD_SHIFT_MASK) | PD_HUGE);
#endif
}
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
- return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
+ return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
#else
- return hpd.pd & HUGEPD_SHIFT_MASK;
+ return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
#endif
}
static inline int hugepd_ok(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
- return ((hpd.pd & 0x4) != 0);
+ return ((hpd_val(hpd) & 0x4) != 0);
#else
- return (hpd.pd > 0);
+ /* We clear the top bit to indicate hugepd */
+ return ((hpd_val(hpd) & PD_HUGE) == 0);
#endif
}
#include <asm/pgtable-types.h>
#endif
-typedef struct { signed long pd; } hugepd_t;
#ifndef CONFIG_HUGETLB_PAGE
#define is_hugepd(pdep) (0)
#define pgd_huge(pgd) (0)
#endif /* CONFIG_HUGETLB_PAGE */
-#define __hugepd(x) ((hugepd_t) { (x) })
-
struct page;
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
return pmd_raw(old) == prev;
}
+typedef struct { __be64 pdbe; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
+
+static inline unsigned long hpd_val(hugepd_t x)
+{
+ return be64_to_cpu(x.pdbe);
+}
+
#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
}
#endif
+typedef struct { unsigned long pd; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline unsigned long hpd_val(hugepd_t x)
+{
+ return x.pd;
+}
+
#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
int hugepd_ok(hugepd_t hpd)
{
bool is_hugepd;
+ unsigned long hpdval;
+
+ hpdval = hpd_val(hpd);
/*
* We should not find this format in page directory, warn otherwise.
*/
- is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+ is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0));
WARN(is_hugepd, "Found wrong page directory format\n");
return 0;
}
static unsigned nr_gpages;
#endif
-#define hugepd_none(hpd) ((hpd).pd == 0)
+#define hugepd_none(hpd) (hpd_val(hpd) == 0)
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
for (i = 0; i < num_hugepd; i++, hpdp++) {
if (unlikely(!hugepd_none(*hpdp)))
break;
- else
+ else {
#ifdef CONFIG_PPC_BOOK3S_64
- hpdp->pd = __pa(new) |
- (shift_to_mmu_psize(pshift) << 2);
+ *hpdp = __hugepd(__pa(new) |
+ (shift_to_mmu_psize(pshift) << 2));
#elif defined(CONFIG_PPC_8xx)
- hpdp->pd = __pa(new) |
- (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
- _PMD_PAGE_512K) |
- _PMD_PRESENT;
+ *hpdp = __hugepd(__pa(new) |
+ (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
+ _PMD_PAGE_512K) | _PMD_PRESENT);
#else
/* We use the old format for PPC_FSL_BOOK3E */
- hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
+ *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
#endif
+ }
}
/* If we bailed from the for loop early, an error occurred, clean up */
if (i < num_hugepd) {
for (i = i - 1 ; i >= 0; i--, hpdp--)
- hpdp->pd = 0;
+ *hpdp = __hugepd(0);
kmem_cache_free(cachep, new);
}
spin_unlock(&mm->page_table_lock);
return;
for (i = 0; i < num_hugepd; i++, hpdp++)
- hpdp->pd = 0;
+ *hpdp = __hugepd(0);
if (shift >= pdshift)
hugepd_free(tlb, hugepte);