From: Paul Burton Date: Tue, 19 Apr 2016 08:25:05 +0000 (+0100) Subject: MIPS: mm: Fix MIPS32 36b physical addressing (alchemy, netlogic) X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=7b2cb64f91f25a7293b10054e20d1c0734ffab6f;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git MIPS: mm: Fix MIPS32 36b physical addressing (alchemy, netlogic) There are 2 distinct cases in which a kernel for a MIPS32 CPU (CONFIG_CPU_MIPS32=y) may use 64 bit physical addresses (CONFIG_PHYS_ADDR_T_64BIT=y): - 36 bit physical addressing as used by RMI Alchemy & Netlogic XLP/XLR CPUs. - MIPS32r5 eXtended Physical Addressing (XPA). These 2 cases are distinct in that they require different behaviour from the kernel - the EntryLo registers have different formats. Until Linux v4.1 we only supported the first case, with code conditional upon the 2 aforementioned Kconfig variables being set. Commit c5b367835cfc ("MIPS: Add support for XPA.") added support for the second case, but did so by modifying the code that existed for the first case rather than treating the 2 cases as distinct. Since the EntryLo registers have different formats this breaks the 36 bit Alchemy/XLP/XLR case. Fix this by splitting the 2 cases, with XPA cases now being conditional upon CONFIG_XPA and the non-XPA case matching the code as it existed prior to commit c5b367835cfc ("MIPS: Add support for XPA."). Signed-off-by: Paul Burton Reported-by: Manuel Lauss Tested-by: Manuel Lauss Fixes: c5b367835cfc ("MIPS: Add support for XPA.") Cc: James Hogan Cc: David Daney Cc: Huacai Chen Cc: Maciej W. Rozycki Cc: Paul Gortmaker Cc: Aneesh Kumar K.V Cc: Peter Zijlstra (Intel) Cc: David Hildenbrand Cc: Andrew Morton Cc: Ingo Molnar Cc: Alex Smith Cc: Kirill A. Shutemov Cc: stable@vger.kernel.org # v4.1+ Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/13119/ Signed-off-by: Ralf Baechle --- diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index 181bd8e7db7d..d21f3da7bdb6 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -103,7 +103,7 @@ static inline void pmd_clear(pmd_t *pmdp) pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); } -#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_XPA) #define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT)) static inline pte_t @@ -118,6 +118,20 @@ pfn_pte(unsigned long pfn, pgprot_t prot) return pte; } +#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) + +#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) + +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) +{ + pte_t pte; + + pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f); + pte.pte_low = pgprot_val(prot); + + return pte; +} + #else #ifdef CONFIG_CPU_VR41XX @@ -166,7 +180,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #else -#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_XPA) /* Swap entries must have VALID and GLOBAL bits cleared. */ #define __swp_type(x) (((x).val >> 4) & 0x1f) @@ -175,6 +189,15 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) +#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) + +/* Swap entries must have VALID and GLOBAL bits cleared. */ +#define __swp_type(x) (((x).val >> 2) & 0x1f) +#define __swp_offset(x) ((x).val >> 7) +#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) +#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) + #else /* * Constraints: diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index 5bc663d330d2..58e8bf815404 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -32,11 +32,11 @@ * unpredictable things. The code (when it is written) to deal with * this problem will be in the update_mmu_cache() code for the r4k. */ -#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_XPA) /* - * Page table bit offsets used for 64 bit physical addressing on MIPS32, - * for example with Alchemy, Netlogic XLP/XLR or XPA. + * Page table bit offsets used for 64 bit physical addressing on + * MIPS32r5 with XPA. */ enum pgtable_bits { /* Used by TLB hardware (placed in EntryLo*) */ @@ -59,6 +59,27 @@ enum pgtable_bits { */ #define _PFNX_MASK 0xffffff +#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) + +/* + * Page table bit offsets used for 36 bit physical addressing on MIPS32, + * for example with Alchemy or Netlogic XLP/XLR. + */ +enum pgtable_bits { + /* Used by TLB hardware (placed in EntryLo*) */ + _PAGE_GLOBAL_SHIFT, + _PAGE_VALID_SHIFT, + _PAGE_DIRTY_SHIFT, + _CACHE_SHIFT, + + /* Used only by software (masked out before writing EntryLo*) */ + _PAGE_PRESENT_SHIFT = _CACHE_SHIFT + 3, + _PAGE_NO_READ_SHIFT, + _PAGE_WRITE_SHIFT, + _PAGE_ACCESSED_SHIFT, + _PAGE_MODIFIED_SHIFT, +}; + #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) /* Page table bits used for r3k systems */ @@ -116,7 +137,7 @@ enum pgtable_bits { #endif /* Used by TLB hardware (placed in EntryLo*) */ -#if (defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)) +#if defined(CONFIG_XPA) # define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT) #elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) # define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0) diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index f6de4f49b1b2..e07a105cafc2 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -133,7 +133,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) -#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) +#ifdef CONFIG_XPA +# define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) +#else +# define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) +#endif + #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) @@ -143,14 +148,21 @@ static inline void set_pte(pte_t *ptep, pte_t pte) smp_wmb(); ptep->pte_low = pte.pte_low; +#ifdef CONFIG_XPA if (pte.pte_high & _PAGE_GLOBAL) { +#else + if (pte.pte_low & _PAGE_GLOBAL) { +#endif pte_t *buddy = ptep_buddy(ptep); /* * Make sure the buddy is global too (if it's !none, * it better already be global) */ - if (pte_none(*buddy)) + if (pte_none(*buddy)) { + if (!config_enabled(CONFIG_XPA)) + buddy->pte_low |= _PAGE_GLOBAL; buddy->pte_high |= _PAGE_GLOBAL; + } } } @@ -160,8 +172,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt htw_stop(); /* Preserve global status for the pair */ - if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) - null.pte_high = _PAGE_GLOBAL; + if (config_enabled(CONFIG_XPA)) { + if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) + null.pte_high = _PAGE_GLOBAL; + } else { + if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) + null.pte_low = null.pte_high = _PAGE_GLOBAL; + } set_pte_at(mm, addr, ptep, null); htw_start(); @@ -302,6 +319,8 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } static inline pte_t pte_wrprotect(pte_t pte) { pte.pte_low &= ~_PAGE_WRITE; + if (!config_enabled(CONFIG_XPA)) + pte.pte_low &= ~_PAGE_SILENT_WRITE; pte.pte_high &= ~_PAGE_SILENT_WRITE; return pte; } @@ -309,6 +328,8 @@ static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_mkclean(pte_t pte) { pte.pte_low &= ~_PAGE_MODIFIED; + if (!config_enabled(CONFIG_XPA)) + pte.pte_low &= ~_PAGE_SILENT_WRITE; pte.pte_high &= ~_PAGE_SILENT_WRITE; return pte; } @@ -316,6 +337,8 @@ static inline pte_t pte_mkclean(pte_t pte) static inline pte_t pte_mkold(pte_t pte) { pte.pte_low &= ~_PAGE_ACCESSED; + if (!config_enabled(CONFIG_XPA)) + pte.pte_low &= ~_PAGE_SILENT_READ; pte.pte_high &= ~_PAGE_SILENT_READ; return pte; } @@ -323,24 +346,33 @@ static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkwrite(pte_t pte) { pte.pte_low |= _PAGE_WRITE; - if (pte.pte_low & _PAGE_MODIFIED) + if (pte.pte_low & _PAGE_MODIFIED) { + if (!config_enabled(CONFIG_XPA)) + pte.pte_low |= _PAGE_SILENT_WRITE; pte.pte_high |= _PAGE_SILENT_WRITE; + } return pte; } static inline pte_t pte_mkdirty(pte_t pte) { pte.pte_low |= _PAGE_MODIFIED; - if (pte.pte_low & _PAGE_WRITE) + if (pte.pte_low & _PAGE_WRITE) { + if (!config_enabled(CONFIG_XPA)) + pte.pte_low |= _PAGE_SILENT_WRITE; pte.pte_high |= _PAGE_SILENT_WRITE; + } return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte.pte_low |= _PAGE_ACCESSED; - if (!(pte.pte_low & _PAGE_NO_READ)) + if (!(pte.pte_low & _PAGE_NO_READ)) { + if (!config_enabled(CONFIG_XPA)) + pte.pte_low |= _PAGE_SILENT_READ; pte.pte_high |= _PAGE_SILENT_READ; + } return pte; } #else @@ -438,7 +470,7 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) -#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_XPA) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); @@ -447,6 +479,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; return pte; } +#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte.pte_low &= _PAGE_CHG_MASK; + pte.pte_high &= (_PFN_MASK | _CACHE_MASK); + pte.pte_low |= pgprot_val(newprot); + pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); + return pte; +} #else static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 6c6a843b0d17..134c988bc61f 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -98,8 +98,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) idx += in_interrupt() ? FIX_N_COLOURS : 0; vaddr = __fix_to_virt(FIX_CMAP_END - idx); pte = mk_pte(page, prot); -#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_XPA) entrylo = pte_to_entrylo(pte.pte_high); +#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) + entrylo = pte.pte_high; #else entrylo = pte_to_entrylo(pte_val(pte)); #endif diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 67966f30c522..db4adf9cc65c 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1011,25 +1011,21 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) { - /* - * 64bit address support (36bit on a 32bit CPU) in a 32bit - * Kernel is a special case. Only a few CPUs use it. - */ - if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) { + if (config_enabled(CONFIG_XPA)) { int pte_off_even = sizeof(pte_t) / 2; int pte_off_odd = pte_off_even + sizeof(pte_t); -#ifdef CONFIG_XPA const int scratch = 1; /* Our extra working register */ uasm_i_addu(p, scratch, 0, ptep); -#endif + uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ - uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); + + uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ + UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, ptep, C0_ENTRYLO1); -#ifdef CONFIG_XPA + uasm_i_lw(p, tmp, 0, scratch); uasm_i_lw(p, ptep, sizeof(pte_t), scratch); uasm_i_lui(p, scratch, 0xff); @@ -1038,7 +1034,22 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) uasm_i_and(p, ptep, scratch, ptep); uasm_i_mthc0(p, tmp, C0_ENTRYLO0); uasm_i_mthc0(p, ptep, C0_ENTRYLO1); -#endif + return; + } + + /* + * 64bit address support (36bit on a 32bit CPU) in a 32bit + * Kernel is a special case. Only a few CPUs use it. + */ + if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) { + int pte_off_even = sizeof(pte_t) / 2; + int pte_off_odd = pte_off_even + sizeof(pte_t); + + uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); + + uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ + UASM_i_MTC0(p, ptep, C0_ENTRYLO1); return; } @@ -1637,7 +1648,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, #ifdef CONFIG_PHYS_ADDR_T_64BIT unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); - if (!cpu_has_64bits) { + if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) { const int scratch = 1; /* Our extra working register */ uasm_i_lui(p, scratch, (mode >> 16));