void kvm_exit_arch(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
-int kvm_mmu_init(struct kvm_vcpu *vcpu);
+int kvm_mmu_create(struct kvm_vcpu *vcpu);
+int kvm_mmu_setup(struct kvm_vcpu *vcpu);
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
if (r < 0)
goto out_free_vcpus;
- kvm_arch_ops->vcpu_load(vcpu);
+ r = kvm_mmu_create(vcpu);
+ if (r < 0)
+ goto out_free_vcpus;
- r = kvm_arch_ops->vcpu_setup(vcpu);
+ kvm_arch_ops->vcpu_load(vcpu);
+ r = kvm_mmu_setup(vcpu);
if (r >= 0)
- r = kvm_mmu_init(vcpu);
-
+ r = kvm_arch_ops->vcpu_setup(vcpu);
vcpu_put(vcpu);
if (r < 0)
return -ENOMEM;
}
-int kvm_mmu_init(struct kvm_vcpu *vcpu)
+int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
- int r;
-
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
ASSERT(list_empty(&vcpu->free_pages));
- r = alloc_mmu_pages(vcpu);
- if (r)
- goto out;
-
- r = init_kvm_mmu(vcpu);
- if (r)
- goto out_free_pages;
+ return alloc_mmu_pages(vcpu);
+}
- return 0;
+int kvm_mmu_setup(struct kvm_vcpu *vcpu)
+{
+ ASSERT(vcpu);
+ ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+ ASSERT(!list_empty(&vcpu->free_pages));
-out_free_pages:
- free_mmu_pages(vcpu);
-out:
- return r;
+ return init_kvm_mmu(vcpu);
}
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)