#define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */
#define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */
-#define VMX_EPT_DEFAULT_GAW 3
-#define VMX_EPT_MAX_GAW 0x4
#define VMX_EPT_MT_EPTE_SHIFT 3
-#define VMX_EPT_GAW_EPTP_SHIFT 3
-#define VMX_EPT_AD_ENABLE_BIT (1ull << 6)
-#define VMX_EPT_DEFAULT_MT 0x6ull
+#define VMX_EPTP_PWL_MASK 0x38ull
+#define VMX_EPTP_PWL_4 0x18ull
+#define VMX_EPTP_AD_ENABLE_BIT (1ull << 6)
+#define VMX_EPTP_MT_MASK 0x7ull
+#define VMX_EPTP_MT_WB 0x6ull
+#define VMX_EPTP_MT_UC 0x0ull
#define VMX_EPT_READABLE_MASK 0x1ull
#define VMX_EPT_WRITABLE_MASK 0x2ull
#define VMX_EPT_EXECUTABLE_MASK 0x4ull
static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
{
- u64 eptp;
+ u64 eptp = VMX_EPTP_MT_WB | VMX_EPTP_PWL_4;
/* TODO write the value reading from MSR */
- eptp = VMX_EPT_DEFAULT_MT |
- VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
if (enable_ept_ad_bits &&
(!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
- eptp |= VMX_EPT_AD_ENABLE_BIT;
+ eptp |= VMX_EPTP_AD_ENABLE_BIT;
eptp |= (root_hpa & PAGE_MASK);
return eptp;
static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u64 mask = address & 0x7;
int maxphyaddr = cpuid_maxphyaddr(vcpu);
/* Check for memory type validity */
- switch (mask) {
- case 0:
+ switch (address & VMX_EPTP_MT_MASK) {
+ case VMX_EPTP_MT_UC:
if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_UC_BIT))
return false;
break;
- case 6:
+ case VMX_EPTP_MT_WB:
if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_WB_BIT))
return false;
break;
return false;
}
- /* Bits 5:3 must be 3 */
- if (((address >> VMX_EPT_GAW_EPTP_SHIFT) & 0x7) != VMX_EPT_DEFAULT_GAW)
+ /* only 4 levels page-walk length are valid */
+ if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
return false;
/* Reserved bits should not be set */
return false;
/* AD, if set, should be supported */
- if ((address & VMX_EPT_AD_ENABLE_BIT)) {
+ if (address & VMX_EPTP_AD_ENABLE_BIT) {
if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPT_AD_BIT))
return false;
}
&address, index * 8, 8))
return 1;
- accessed_dirty = !!(address & VMX_EPT_AD_ENABLE_BIT);
+ accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
/*
* If the (L2) guest does a vmfunc to the currently
static int get_ept_level(void)
{
- return VMX_EPT_DEFAULT_GAW + 1;
+ return 4;
}
static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
{
- return nested_ept_get_cr3(vcpu) & VMX_EPT_AD_ENABLE_BIT;
+ return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
}
/* Callbacks for nested_ept_init_mmu_context: */