#define PFERR_USER_MASK (1U << 2)
#define PFERR_RSVD_MASK (1U << 3)
#define PFERR_FETCH_MASK (1U << 4)
-#define PFERR_NESTED_MASK (1U << 31)
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
void kvm_propagate_fault(struct kvm_vcpu *vcpu)
{
- u32 nested, error;
-
- error = vcpu->arch.fault.error_code;
- nested = error & PFERR_NESTED_MASK;
- error = error & ~PFERR_NESTED_MASK;
-
- vcpu->arch.fault.error_code = error;
-
- if (mmu_is_nested(vcpu) && !nested)
+ if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
vcpu->arch.nested_mmu.inject_page_fault(vcpu);
else
vcpu->arch.mmu.inject_page_fault(vcpu);
+
+ vcpu->arch.fault.nested = false;
}
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
access |= PFERR_USER_MASK;
t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
if (t_gpa == UNMAPPED_GVA)
- vcpu->arch.fault.error_code |= PFERR_NESTED_MASK;
+ vcpu->arch.fault.nested = true;
return t_gpa;
}