From 20f704b69af63bffbc8e70bcf21990318a8912f5 Mon Sep 17 00:00:00 2001 From: Richard Kuo Date: Fri, 30 Nov 2012 14:53:56 -0600 Subject: [PATCH] Hexagon: fix initial page table setup prior to jump to VA Use the exact number of pages needed to be mapped pre-VA-jump, then map 896MB afterwards, which the arch mem init will fix up. Signed-off-by: Richard Kuo --- arch/hexagon/kernel/head.S | 85 ++++++++++++++++++++++++++++++++------ 1 file changed, 73 insertions(+), 12 deletions(-) diff --git a/arch/hexagon/kernel/head.S b/arch/hexagon/kernel/head.S index 477320c455f7..8e88d246ca69 100644 --- a/arch/hexagon/kernel/head.S +++ b/arch/hexagon/kernel/head.S @@ -25,6 +25,9 @@ #include #include #include +#include + +#define SEGTABLE_ENTRIES #0x0e0 __INIT ENTRY(stext) @@ -63,27 +66,73 @@ ENTRY(stext) | __HEXAGON_C_WB_L2 << 6 \ | __HVM_PDE_S_4MB) - r1 = pc - r2.H = #0xffc0 - r2.L = #0x0000 - r1 = and(r1,r2) /* round PC to 4MB boundary */ + /* + * Get number of VA=PA entries; only really needed for jump + * to hyperspace; gets blown away immediately after + */ + + { + r1.l = #LO(_end); + r2.l = #LO(stext); + r3 = #1; + } + { + r1.h = #HI(_end); + r2.h = #HI(stext); + r3 = asl(r3, #22); + } + { + r1 = sub(r1, r2); + r3 = add(r3, #-1); + } /* r1 = _end - stext */ + r1 = add(r1, r3); /* + (4M-1) */ + r26 = lsr(r1, #22); /* / 4M = # of entries */ + + r1 = r25; + r2.h = #0xffc0; + r2.l = #0x0000; /* round back down to 4MB boundary */ + r1 = and(r1,r2); r2 = lsr(r1, #22) /* 4MB page number */ r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */ r0 = add(r0,r2) /* r0 = address of correct PTE */ r2 = #PTE_BITS r1 = add(r1,r2) /* r1 = 4MB PTE for the first entry */ r2.h = #0x0040 - r2.l = #0x0000 /* 4MB */ - memw(r0 ++ #4) = r1 - r1 = add(r1, r2) + r2.l = #0x0000 /* 4MB increments */ + loop0(1f,r26); +1: memw(r0 ++ #4) = r1 + { r1 = add(r1, r2); } :endloop0 - r0 = r24 + /* Also need to overwrite the initial 0xc0000000 entries */ + /* PAGE_OFFSET >> (4MB shift - 4 bytes per entry shift) */ + R1.H = #HI(PAGE_OFFSET >> (22 - 2)) + R1.L = #LO(PAGE_OFFSET >> (22 - 2)) + + r0 = add(r1, r24); /* advance to 0xc0000000 entry */ + r1 = r25; + r2.h = #0xffc0; + r2.l = #0x0000; /* round back down to 4MB boundary */ + r1 = and(r1,r2); /* for huge page */ + r2 = #PTE_BITS + r1 = add(r1,r2); + r2.h = #0x0040 + r2.l = #0x0000 /* 4MB increments */ + + loop0(1f,SEGTABLE_ENTRIES); +1: + memw(r0 ++ #4) = r1; + { r1 = add(r1,r2); } :endloop0 + + r0 = r24; /* * The subroutine wrapper around the virtual instruction touches * no memory, so we should be able to use it even here. + * Note that in this version, R1 and R2 get "clobbered"; see + * vm_ops.S */ + r1 = #VM_TRANS_TYPE_TABLE call __vmnewmap; /* Jump into virtual address range. */ @@ -97,17 +146,29 @@ ENTRY(stext) __head_s_vaddr_target: /* * Tear down VA=PA translation now that we are running - * in the desgnated kernel segments. + * in kernel virtual space. */ r0 = #__HVM_PDE_S_INVALID - r1 = r24 - loop0(1f,#0x100) + + r1.h = #0xffc0; + r1.l = #0x0000; + r2 = r25; /* phys_offset */ + r2 = and(r1,r2); + + r1.l = #lo(swapper_pg_dir) + r1.h = #hi(swapper_pg_dir) + r2 = lsr(r2, #22) /* 4MB page number */ + r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */ + r1 = add(r1,r2); + loop0(1f,r26) + 1: { memw(R1 ++ #4) = R0 }:endloop0 r0 = r24 + r1 = #VM_TRANS_TYPE_TABLE call __vmnewmap /* Go ahead and install the trap0 return so angel calls work */ @@ -150,7 +211,7 @@ __head_s_vaddr_target: r2 = sub(r2,r0); call memset; - /* Set PHYS_OFFSET; should still be in R25 */ + /* Set PHYS_OFFSET; should be in R25 */ #ifdef CONFIG_HEXAGON_PHYS_OFFSET r0.l = #LO(__phys_offset); r0.h = #HI(__phys_offset); -- 2.20.1