From: James Morse Date: Fri, 29 Apr 2016 17:27:03 +0000 (+0100) Subject: arm64: kvm: Fix kvm teardown for systems using the extended idmap X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=c612505f860c6d4fac03924879982adcd042e239;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git arm64: kvm: Fix kvm teardown for systems using the extended idmap If memory is located above 1<] dump_backtrace+0x0/0x240 [ 2.571818] [] show_stack+0x14/0x20 [ 2.576858] [] dump_stack+0x94/0xb8 [ 2.581899] [] panic+0x10c/0x250 [ 2.586677] [] panic+0x0/0x250 [ 2.591281] SMP: stopping secondary CPUs [ 3.649692] SMP: failed to stop secondary CPUs 0-2,4-7 [ 3.654818] Kernel Offset: disabled [ 3.658293] Memory Limit: none [ 3.661337] ---[ end Kernel panic - not syncing: HYP panic: [ 3.661337] PS:200003c9 PC:0000007ffffff820 ESR:86000005 [ 3.661337] FAR:0000007ffffff820 HPFAR:00000000003ffff0 PAR:0000000000000000 [ 3.661337] VCPU: (null) [ 3.661337] Reported-by: Will Deacon Reviewed-by: Marc Zyngier Signed-off-by: James Morse Signed-off-by: Will Deacon --- diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b6f194d9f6b2..88a34670ddef 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -46,7 +46,8 @@ int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_arch_dev_ioctl_check_extension(long ext); -phys_addr_t kvm_hyp_reset_entry(void); +unsigned long kvm_hyp_reset_entry(void); +void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); struct kvm_arch { /* The VMID generation used for the virt. memory system */ diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 44ec4cb23ae7..a873a6d8be90 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -140,6 +140,11 @@ merged: ENDPROC(__kvm_hyp_init) /* + * Reset kvm back to the hyp stub. This is the trampoline dance in + * reverse. If kvm used an extended idmap, __extended_idmap_trampoline + * calls this code directly in the idmap. In this case switching to the + * boot tables is a no-op. + * * x0: HYP boot pgd * x1: HYP phys_idmap_start */ diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index ce9e5e5f28cf..70254a65bd5b 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -164,3 +164,22 @@ alternative_endif eret ENDPROC(__fpsimd_guest_restore) + +/* + * When using the extended idmap, we don't have a trampoline page we can use + * while we switch pages tables during __kvm_hyp_reset. Accessing the idmap + * directly would be ideal, but if we're using the extended idmap then the + * idmap is located above HYP_PAGE_OFFSET, and the address will be masked by + * kvm_call_hyp using kern_hyp_va. + * + * x0: HYP boot pgd + * x1: HYP phys_idmap_start + */ +ENTRY(__extended_idmap_trampoline) + mov x4, x1 + adr_l x3, __kvm_hyp_reset + + /* insert __kvm_hyp_reset()s offset into phys_idmap_start */ + bfi x4, x3, #0, #PAGE_SHIFT + br x4 +ENDPROC(__extended_idmap_trampoline) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 4062e6dd4cc1..b1ad730e1567 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -135,12 +135,28 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) extern char __hyp_idmap_text_start[]; -phys_addr_t kvm_hyp_reset_entry(void) +unsigned long kvm_hyp_reset_entry(void) { - unsigned long offset; - - offset = (unsigned long)__kvm_hyp_reset - - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK); - - return TRAMPOLINE_VA + offset; + if (!__kvm_cpu_uses_extended_idmap()) { + unsigned long offset; + + /* + * Find the address of __kvm_hyp_reset() in the trampoline page. + * This is present in the running page tables, and the boot page + * tables, so we call the code here to start the trampoline + * dance in reverse. + */ + offset = (unsigned long)__kvm_hyp_reset + - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK); + + return TRAMPOLINE_VA + offset; + } else { + /* + * KVM is running with merged page tables, which don't have the + * trampoline page mapped. We know the idmap is still mapped, + * but can't be called into directly. Use + * __extended_idmap_trampoline to do the call. + */ + return (unsigned long)kvm_ksym_ref(__extended_idmap_trampoline); + } }