From 206a2a73a62d37c8b8f6ddd3180c202b2e7298ab Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Tue, 6 May 2014 14:02:27 +0100 Subject: [PATCH] arm64: mm: Create gigabyte kernel logical mappings where possible We have the capability to map 1GB level 1 blocks when using a 4K granule. This patch adjusts the create_mapping logic s.t. when mapping physical memory on boot, we attempt to use a 1GB block if both the VA and PA start and end are 1GB aligned. This both reduces the levels of lookup required to resolve a kernel logical address, as well as reduces TLB pressure on cores that support 1GB TLB entries. Signed-off-by: Steve Capper Tested-by: Jungseok Lee [catalin.marinas@arm.com: s/prot_sect_kernel/PROT_SECT_NORMAL_EXEC/] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable-hwdef.h | 2 ++ arch/arm64/include/asm/pgtable.h | 7 +++++++ arch/arm64/mm/mmu.c | 28 +++++++++++++++++++++++++- 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 5fc8a66c3924..955e8c5f0afb 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -29,6 +29,8 @@ */ #define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1) +#define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0) +#define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0) /* * Level 2 descriptor (PMD). diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 752348dbb4f3..3de4ef8bfd82 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -259,6 +259,7 @@ static inline pmd_t pte_pmd(pte_t pte) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) +#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) #define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) @@ -292,6 +293,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_SECT) +#ifdef ARM64_64K_PAGES +#define pud_sect(pud) (0) +#else +#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ + PUD_TYPE_SECT) +#endif static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 2c0e1dda8163..3a729de96f15 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -195,7 +195,30 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, do { next = pud_addr_end(addr, end); - alloc_init_pmd(pud, addr, next, phys); + + /* + * For 4K granule only, attempt to put down a 1GB block + */ + if ((PAGE_SHIFT == 12) && + ((addr | next | phys) & ~PUD_MASK) == 0) { + pud_t old_pud = *pud; + set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC)); + + /* + * If we have an old value for a pud, it will + * be pointing to a pmd table that we no longer + * need (from swapper_pg_dir). + * + * Look up the old pmd table and free it. + */ + if (!pud_none(old_pud)) { + phys_addr_t table = __pa(pmd_offset(&old_pud, 0)); + memblock_free(table, PAGE_SIZE); + flush_tlb_all(); + } + } else { + alloc_init_pmd(pud, addr, next, phys); + } phys += next - addr; } while (pud++, addr = next, addr != end); } @@ -338,6 +361,9 @@ int kern_addr_valid(unsigned long addr) if (pud_none(*pud)) return 0; + if (pud_sect(*pud)) + return pfn_valid(pud_pfn(*pud)); + pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return 0; -- 2.20.1