if (r)
return r;
- ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
gfn = gva >> PAGE_SHIFT;
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;
- ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
if (unlikely(error_code & PFERR_RSVD_MASK)) {
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
struct kvm_mmu *context = &vcpu->arch.mmu;
- ASSERT(vcpu);
ASSERT(!VALID_PAGE(context->root_hpa));
if (!is_paging(vcpu))
{
struct kvm_mmu *context = &vcpu->arch.mmu;
- ASSERT(vcpu);
ASSERT(!VALID_PAGE(context->root_hpa));
context->shadow_root_level = kvm_x86_ops->get_tdp_level();
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
-
kvm_mmu_unload(vcpu);
init_kvm_mmu(vcpu);
}
struct page *page;
int i;
- ASSERT(vcpu);
-
/*
* When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
* Therefore we need to allocate shadow page tables in the first
int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
-
vcpu->arch.walk_mmu = &vcpu->arch.mmu;
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
vcpu->arch.mmu.translate_gpa = translate_gpa;
void kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
init_kvm_mmu(vcpu);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
-
kvm_mmu_unload(vcpu);
free_mmu_pages(vcpu);
mmu_free_memory_caches(vcpu);