wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
}
-static void svm_update_cpl(struct kvm_vcpu *vcpu)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
- int cpl;
-
- if (!is_protmode(vcpu))
- cpl = 0;
- else if (svm->vmcb->save.rflags & X86_EFLAGS_VM)
- cpl = 3;
- else
- cpl = svm->vmcb->save.cs.selector & 0x3;
-
- svm->vmcb->save.cpl = cpl;
-}
-
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
{
return to_svm(vcpu)->vmcb->save.rflags;
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
- unsigned long old_rflags = to_svm(vcpu)->vmcb->save.rflags;
-
+ /*
+ * Any change of EFLAGS.VM is accompained by a reload of SS
+ * (caused by either a task switch or an inter-privilege IRET),
+ * so we do not need to update the CPL here.
+ */
to_svm(vcpu)->vmcb->save.rflags = rflags;
- if ((old_rflags ^ rflags) & X86_EFLAGS_VM)
- svm_update_cpl(vcpu);
}
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
}
- if (seg == VCPU_SREG_CS)
- svm_update_cpl(vcpu);
+
+ /*
+ * This is always accurate, except if SYSRET returned to a segment
+ * with SS.DPL != 3. Intel does not have this quirk, and always
+ * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
+ * would entail passing the CPL to userspace and back.
+ */
+ if (seg == VCPU_SREG_SS)
+ svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
mark_dirty(svm->vmcb, VMCB_SEG);
}
struct kvm_vcpu vcpu;
unsigned long host_rsp;
u8 fail;
- u8 cpl;
bool nmi_known_unmasked;
u32 exit_intr_info;
u32 idt_vectoring_info;
fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
-
- /* CPL is always 0 when CPU enters protected mode */
- __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
- vmx->cpl = 0;
}
static void fix_rmode_seg(int seg, struct kvm_segment *save)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!is_protmode(vcpu))
+ if (unlikely(vmx->rmode.vm86_active))
return 0;
-
- if (!is_long_mode(vcpu)
- && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */
- return 3;
-
- if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
- __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
- vmx->cpl = vmx_read_guest_seg_selector(vmx, VCPU_SREG_CS) & 3;
+ else {
+ int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
+ return AR_DPL(ar);
}
-
- return vmx->cpl;
}
-
static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
u32 ar;
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
vmx_segment_cache_clear(vmx);
- if (seg == VCPU_SREG_CS)
- __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
vmx->rmode.segs[seg] = *var;
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
| (1 << VCPU_EXREG_RFLAGS)
- | (1 << VCPU_EXREG_CPL)
| (1 << VCPU_EXREG_PDPTR)
| (1 << VCPU_EXREG_SEGMENTS)
| (1 << VCPU_EXREG_CR3));