From 27d6c865211662721e6cf305706e4a3da35f12b4 Mon Sep 17 00:00:00 2001 From: Nadav Har'El Date: Wed, 25 May 2011 23:06:59 +0300 Subject: [PATCH] KVM: nVMX: Implement VMCLEAR This patch implements the VMCLEAR instruction. Signed-off-by: Nadav Har'El Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/vmx.c | 65 +++++++++++++++++++++++++++++++++++++++++++++- arch/x86/kvm/x86.c | 1 + 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8432448fe356..c7600009dacb 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -166,6 +166,9 @@ struct __packed vmcs12 { u32 revision_id; u32 abort; + u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ + u32 padding[7]; /* room for future expansion */ + u64 io_bitmap_a; u64 io_bitmap_b; u64 msr_bitmap; @@ -4814,6 +4817,66 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu, get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; } +/* Emulate the VMCLEAR instruction */ +static int handle_vmclear(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + gva_t gva; + gpa_t vmptr; + struct vmcs12 *vmcs12; + struct page *page; + struct x86_exception e; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), + vmcs_read32(VMX_INSTRUCTION_INFO), &gva)) + return 1; + + if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, + sizeof(vmptr), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + + if (!IS_ALIGNED(vmptr, PAGE_SIZE)) { + nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); + skip_emulated_instruction(vcpu); + return 1; + } + + if (vmptr == vmx->nested.current_vmptr) { + kunmap(vmx->nested.current_vmcs12_page); + nested_release_page(vmx->nested.current_vmcs12_page); + vmx->nested.current_vmptr = -1ull; + vmx->nested.current_vmcs12 = NULL; + } + + page = nested_get_page(vcpu, vmptr); + if (page == NULL) { + /* + * For accurate processor emulation, VMCLEAR beyond available + * physical memory should do nothing at all. However, it is + * possible that a nested vmx bug, not a guest hypervisor bug, + * resulted in this case, so let's shut down before doing any + * more damage: + */ + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + return 1; + } + vmcs12 = kmap(page); + vmcs12->launch_state = 0; + kunmap(page); + nested_release_page(page); + + nested_free_vmcs02(vmx, vmptr); + + skip_emulated_instruction(vcpu); + nested_vmx_succeed(vcpu); + return 1; +} + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -4835,7 +4898,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_INVD] = handle_invd, [EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_VMCALL] = handle_vmcall, - [EXIT_REASON_VMCLEAR] = handle_vmx_insn, + [EXIT_REASON_VMCLEAR] = handle_vmclear, [EXIT_REASON_VMLAUNCH] = handle_vmx_insn, [EXIT_REASON_VMPTRLD] = handle_vmx_insn, [EXIT_REASON_VMPTRST] = handle_vmx_insn, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 27d12a3b1890..c1b5a1817e43 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -347,6 +347,7 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) vcpu->arch.cr2 = fault->address; kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } +EXPORT_SYMBOL_GPL(kvm_inject_page_fault); void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { -- 2.20.1