struct kvm_segment *var, int seg);
void (*set_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
- int (*is_long_mode)(struct kvm_vcpu *vcpu);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu,
return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL;
}
+static inline int is_long_mode(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_X86_64
+ return vcpu->shadow_efer & EFER_LME;
+#else
+ return 0;
+#endif
+}
+
static inline int is_pae(struct kvm_vcpu *vcpu)
{
return vcpu->cr4 & CR4_PAE_MASK;
return;
}
- if (kvm_arch_ops->is_long_mode(vcpu)) {
+ if (is_long_mode(vcpu)) {
if (!(cr4 & CR4_PAE_MASK)) {
printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
"in long mode\n");
void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
- if (kvm_arch_ops->is_long_mode(vcpu)) {
+ if (is_long_mode(vcpu)) {
if ( cr3 & CR3_L_MODE_RESEVED_BITS) {
printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
inject_gp(vcpu);
if (!is_paging(vcpu))
return nonpaging_init_context(vcpu);
- else if (kvm_arch_ops->is_long_mode(vcpu))
+ else if (is_long_mode(vcpu))
return paging64_init_context(vcpu);
else if (is_pae(vcpu))
return paging32E_init_context(vcpu);
hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK);
walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);
- ASSERT((!kvm_arch_ops->is_long_mode(vcpu) && is_pae(vcpu)) ||
+ ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
walker->table = (pt_element_t *)( (unsigned long)walker->table |
(walker->table[index] & PT_PAGE_SIZE_MASK) &&
(PTTYPE == 64 || is_pse(vcpu))))
return &walker->table[index];
- if (walker->level != 3 || kvm_arch_ops->is_long_mode(vcpu))
+ if (walker->level != 3 || is_long_mode(vcpu))
walker->inherited_ar &= walker->table[index];
paddr = safe_gpa_to_hpa(vcpu, walker->table[index] & PT_BASE_ADDR_MASK);
kunmap_atomic(walker->table, KM_USER0);
asm volatile ("mov %0, %%dr7" :: "r" (val));
}
-static inline int svm_is_long_mode(struct kvm_vcpu *vcpu)
-{
- return vcpu->svm->vmcb->save.efer & KVM_EFER_LMA;
-}
-
static inline void force_new_asid(struct kvm_vcpu *vcpu)
{
vcpu->svm->asid_generation--;
.get_segment_base = svm_get_segment_base,
.get_segment = svm_get_segment,
.set_segment = svm_set_segment,
- .is_long_mode = svm_is_long_mode,
.get_cs_db_l_bits = svm_get_cs_db_l_bits,
.set_cr0 = svm_set_cr0,
.set_cr0_no_modeswitch = svm_set_cr0,
vmcs_write32(sf->ar_bytes, ar);
}
-static int vmx_is_long_mode(struct kvm_vcpu *vcpu)
-{
- return vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_CONTROLS_IA32E_MASK;
-}
-
static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{
u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
.get_segment_base = vmx_get_segment_base,
.get_segment = vmx_get_segment,
.set_segment = vmx_set_segment,
- .is_long_mode = vmx_is_long_mode,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
.set_cr0 = vmx_set_cr0,
.set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,