1 #ifndef ASM_KVM_CACHE_REGS_H
2 #define ASM_KVM_CACHE_REGS_H
4 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
5 #define KVM_POSSIBLE_CR4_GUEST_BITS \
6 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
7 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
9 static inline unsigned long kvm_register_read(struct kvm_vcpu
*vcpu
,
12 if (!test_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
))
13 kvm_x86_ops
->cache_reg(vcpu
, reg
);
15 return vcpu
->arch
.regs
[reg
];
18 static inline void kvm_register_write(struct kvm_vcpu
*vcpu
,
22 vcpu
->arch
.regs
[reg
] = val
;
23 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_dirty
);
24 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
);
27 static inline unsigned long kvm_rip_read(struct kvm_vcpu
*vcpu
)
29 return kvm_register_read(vcpu
, VCPU_REGS_RIP
);
32 static inline void kvm_rip_write(struct kvm_vcpu
*vcpu
, unsigned long val
)
34 kvm_register_write(vcpu
, VCPU_REGS_RIP
, val
);
37 static inline u64
kvm_pdptr_read(struct kvm_vcpu
*vcpu
, int index
)
39 if (!test_bit(VCPU_EXREG_PDPTR
,
40 (unsigned long *)&vcpu
->arch
.regs_avail
))
41 kvm_x86_ops
->cache_reg(vcpu
, VCPU_EXREG_PDPTR
);
43 return vcpu
->arch
.pdptrs
[index
];
46 static inline ulong
kvm_read_cr0_bits(struct kvm_vcpu
*vcpu
, ulong mask
)
48 ulong tmask
= mask
& KVM_POSSIBLE_CR0_GUEST_BITS
;
49 if (tmask
& vcpu
->arch
.cr0_guest_owned_bits
)
50 kvm_x86_ops
->decache_cr0_guest_bits(vcpu
);
51 return vcpu
->arch
.cr0
& mask
;
54 static inline ulong
kvm_read_cr0(struct kvm_vcpu
*vcpu
)
56 return kvm_read_cr0_bits(vcpu
, ~0UL);
59 static inline ulong
kvm_read_cr4_bits(struct kvm_vcpu
*vcpu
, ulong mask
)
61 ulong tmask
= mask
& KVM_POSSIBLE_CR4_GUEST_BITS
;
62 if (tmask
& vcpu
->arch
.cr4_guest_owned_bits
)
63 kvm_x86_ops
->decache_cr4_guest_bits(vcpu
);
64 return vcpu
->arch
.cr4
& mask
;
67 static inline ulong
kvm_read_cr4(struct kvm_vcpu
*vcpu
)
69 return kvm_read_cr4_bits(vcpu
, ~0UL);