ARM: LPAE: accomodate >32-bit addresses for page table base
authorCyril Chemparathy <cyril@ti.com>
Sat, 21 Jul 2012 19:55:04 +0000 (15:55 -0400)
committerWill Deacon <will.deacon@arm.com>
Thu, 30 May 2013 15:02:15 +0000 (16:02 +0100)
This patch redefines the early boot time use of the R4 register to steal a few
low order bits (ARCH_PGD_SHIFT bits) on LPAE systems.  This allows for up to
38-bit physical addresses.

Signed-off-by: Cyril Chemparathy <cyril@ti.com>
Signed-off-by: Vitaly Andrianov <vitalya@ti.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Tested-by: Subash Patel <subash.rp@samsung.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm/include/asm/memory.h
arch/arm/kernel/head.S
arch/arm/kernel/smp.c
arch/arm/mm/proc-v7-3level.S

index 57870ab313c52cd103b327363b0191e76efad9f6..e506088a767808aa81773a08724a17208310d779 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/types.h>
 #include <linux/sizes.h>
 
+#include <asm/cache.h>
+
 #ifdef CONFIG_NEED_MACH_MEMORY_H
 #include <mach/memory.h>
 #endif
 #define page_to_phys(page)     (__pfn_to_phys(page_to_pfn(page)))
 #define phys_to_page(phys)     (pfn_to_page(__phys_to_pfn(phys)))
 
+/*
+ * Minimum guaranted alignment in pgd_alloc().  The page table pointers passed
+ * around in head.S and proc-*.S are shifted by this amount, in order to
+ * leave spare high bits for systems with physical address extension.  This
+ * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
+ * gives us about 38-bits or so.
+ */
+#ifdef CONFIG_ARM_LPAE
+#define ARCH_PGD_SHIFT         L1_CACHE_SHIFT
+#else
+#define ARCH_PGD_SHIFT         0
+#endif
+#define ARCH_PGD_MASK          ((1 << ARCH_PGD_SHIFT) - 1)
+
 #ifndef __ASSEMBLY__
 
 /*
index 8bac553fe213def562dec9e30cad88c827d6239c..45e8935cae4e44198ca4d197a66f219b7e7b8105 100644 (file)
@@ -156,7 +156,7 @@ ENDPROC(stext)
  *
  * Returns:
  *  r0, r3, r5-r7 corrupted
- *  r4 = physical page table address
+ *  r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
  */
 __create_page_tables:
        pgtbl   r4, r8                          @ page table address
@@ -331,6 +331,7 @@ __create_page_tables:
 #endif
 #ifdef CONFIG_ARM_LPAE
        sub     r4, r4, #0x1000         @ point to the PGD table
+       mov     r4, r4, lsr #ARCH_PGD_SHIFT
 #endif
        mov     pc, lr
 ENDPROC(__create_page_tables)
@@ -408,7 +409,7 @@ __secondary_data:
  *  r0  = cp#15 control register
  *  r1  = machine ID
  *  r2  = atags or dtb pointer
- *  r4  = page table pointer
+ *  r4  = page table (see ARCH_PGD_SHIFT in asm/memory.h)
  *  r9  = processor ID
  *  r13 = *virtual* address to jump to upon completion
  */
@@ -427,10 +428,7 @@ __enable_mmu:
 #ifdef CONFIG_CPU_ICACHE_DISABLE
        bic     r0, r0, #CR_I
 #endif
-#ifdef CONFIG_ARM_LPAE
-       mov     r5, #0
-       mcrr    p15, 0, r4, r5, c2              @ load TTBR0
-#else
+#ifndef CONFIG_ARM_LPAE
        mov     r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
                      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
                      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
index 550d63cef68e4be6ada87a3c7942ccd767061c6d..217b755aadd455f2901a7143fc9848a8ceb0feec 100644 (file)
@@ -78,6 +78,13 @@ void __init smp_set_ops(struct smp_operations *ops)
                smp_ops = *ops;
 };
 
+static unsigned long get_arch_pgd(pgd_t *pgd)
+{
+       phys_addr_t pgdir = virt_to_phys(pgd);
+       BUG_ON(pgdir & ARCH_PGD_MASK);
+       return pgdir >> ARCH_PGD_SHIFT;
+}
+
 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
        int ret;
@@ -87,8 +94,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
         * its stack and the page tables.
         */
        secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
-       secondary_data.pgdir = virt_to_phys(idmap_pgd);
-       secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
+       secondary_data.pgdir = get_arch_pgd(idmap_pgd);
+       secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
        __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
        outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
 
index 58ab7477bb615810cb36c1cef8ef073f62a219e3..5ffe1956c6d95c5ba227986347caddec5d0dacda 100644 (file)
@@ -114,6 +114,7 @@ ENDPROC(cpu_v7_set_pte_ext)
         */
        .macro  v7_ttb_setup, zero, ttbr0, ttbr1, tmp
        ldr     \tmp, =swapper_pg_dir           @ swapper_pg_dir virtual address
+       mov     \tmp, \tmp, lsr #ARCH_PGD_SHIFT
        cmp     \ttbr1, \tmp                    @ PHYS_OFFSET > PAGE_OFFSET?
        mrc     p15, 0, \tmp, c2, c0, 2         @ TTB control register
        orr     \tmp, \tmp, #TTB_EAE
@@ -128,8 +129,15 @@ ENDPROC(cpu_v7_set_pte_ext)
         */
        orrls   \tmp, \tmp, #TTBR1_SIZE                         @ TTBCR.T1SZ
        mcr     p15, 0, \tmp, c2, c0, 2                         @ TTBCR
+       mov     \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT)        @ upper bits
+       mov     \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT             @ lower bits
        addls   \ttbr1, \ttbr1, #TTBR1_OFFSET
        mcrr    p15, 1, \ttbr1, \zero, c2                       @ load TTBR1
+       mov     \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT)        @ upper bits
+       mov     \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT             @ lower bits
+       mcrr    p15, 0, \ttbr0, \zero, c2                       @ load TTBR0
+       mcrr    p15, 1, \ttbr1, \zero, c2                       @ load TTBR1
+       mcrr    p15, 0, \ttbr0, \zero, c2                       @ load TTBR0
        .endm
 
        __CPUINIT