KVM: ppc: Handle guest idle by emulating MSR[WE] writes
authorHollis Blanchard <hollisb@us.ibm.com>
Fri, 25 Apr 2008 22:55:49 +0000 (17:55 -0500)
committerAvi Kivity <avi@qumranet.com>
Sun, 4 May 2008 11:44:44 +0000 (14:44 +0300)
This reduces host CPU usage when the guest is idle. However, the guest must
set MSR[WE] in its idle loop, which Linux did not do until 2.6.26.

Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Jerone Young <jyoung5@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
arch/powerpc/kvm/booke_guest.c
arch/powerpc/kvm/powerpc.c
include/asm-powerpc/kvm_host.h
include/asm-powerpc/kvm_ppc.h

index 6d9884a6884aa2379f68992738ec40baf141e0bc..b3db6f4576ad36698c36d605e2b2d3fcbbcda887 100644 (file)
@@ -49,6 +49,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "inst_emu",   VCPU_STAT(emulated_inst_exits) },
        { "dec",        VCPU_STAT(dec_exits) },
        { "ext_intr",   VCPU_STAT(ext_intr_exits) },
+       { "halt_wakeup", VCPU_STAT(halt_wakeup) },
        { NULL }
 };
 
index bad40bd2d3ac5e966926487cb10df89513e2acc6..777e0f34e0ea12d60e643a47f0b68af5a6bdc930 100644 (file)
@@ -36,13 +36,12 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
 
 int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
 {
-       /* XXX implement me */
-       return 0;
+       return !!(v->arch.pending_exceptions);
 }
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 {
-       return 1;
+       return !(v->arch.msr & MSR_WE);
 }
 
 
@@ -214,6 +213,11 @@ static void kvmppc_decrementer_func(unsigned long data)
        struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
 
        kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);
+
+       if (waitqueue_active(&vcpu->wq)) {
+               wake_up_interruptible(&vcpu->wq);
+               vcpu->stat.halt_wakeup++;
+       }
 }
 
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
@@ -339,6 +343,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        int r;
        sigset_t sigsaved;
 
+       vcpu_load(vcpu);
+
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
@@ -363,12 +369,20 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 
+       vcpu_put(vcpu);
+
        return r;
 }
 
 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
 {
        kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);
+
+       if (waitqueue_active(&vcpu->wq)) {
+               wake_up_interruptible(&vcpu->wq);
+               vcpu->stat.halt_wakeup++;
+       }
+
        return 0;
 }
 
index 04ffbb8e0a357f972e8743c61891c314eb7962c2..81a69d7110171bc5bc51fda14d8704de28af4b11 100644 (file)
@@ -59,6 +59,7 @@ struct kvm_vcpu_stat {
        u32 emulated_inst_exits;
        u32 dec_exits;
        u32 ext_intr_exits;
+       u32 halt_wakeup;
 };
 
 struct tlbe {
index 7ac820308a7e19c2e397d399e38fefe0d7ae361b..b35a7e3ef9782b0579072ee8492013f5dd7519f1 100644 (file)
@@ -77,12 +77,17 @@ static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
        clear_bit(priority, &vcpu->arch.pending_exceptions);
 }
 
+/* Helper function for "full" MSR writes. No need to call this if only EE is
+ * changing. */
 static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
 {
        if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
                kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
 
        vcpu->arch.msr = new_msr;
+
+       if (vcpu->arch.msr & MSR_WE)
+               kvm_vcpu_block(vcpu);
 }
 
 #endif /* __POWERPC_KVM_PPC_H__ */