KVM: PPC: booke: use shadow_msr
authorScott Wood <scottwood@freescale.com>
Tue, 14 Jun 2011 23:34:29 +0000 (18:34 -0500)
committerAvi Kivity <avi@redhat.com>
Tue, 12 Jul 2011 10:16:32 +0000 (13:16 +0300)
Keep the guest MSR and the guest-mode true MSR separate, rather than
modifying the guest MSR on each guest entry to produce a true MSR.

Any bits which should be modified based on guest MSR must be explicitly
propagated from vcpu->arch.shared->msr to vcpu->arch.shadow_msr in
kvmppc_set_msr().

While we're modifying the guest entry code, reorder a few instructions
to bury some load latencies.

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/booke_interrupts.S

index 186f150b9b8970ed217661721b5132f2f9622c59..12cb1807e8d7f96d1a1bcfc18d93160705b46c33 100644 (file)
@@ -219,12 +219,12 @@ struct kvm_vcpu_arch {
 #endif
 
 #ifdef CONFIG_PPC_BOOK3S
-       ulong shadow_msr;
        ulong hflags;
        ulong guest_owned_ext;
 #endif
        u32 vrsave; /* also USPRG0 */
        u32 mmucr;
+       ulong shadow_msr;
        ulong sprg4;
        ulong sprg5;
        ulong sprg6;
index 36e1c8a29be885c498e04b77b2f11e683221e08d..25de8e4808a45fd6f75c366731b66fc8d73b20bc 100644 (file)
@@ -404,12 +404,12 @@ int main(void)
        DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
        DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
        DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
+       DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
 
        /* book3s */
 #ifdef CONFIG_PPC_BOOK3S
        DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
        DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
-       DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
        DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
        DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
        DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
index 8462b3a1c1c7501632fc08585c7be028d8eb6601..05cedb5f82100a01351c465d757cb5309ab0edeb 100644 (file)
@@ -514,6 +514,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 
        vcpu->arch.pc = 0;
        vcpu->arch.shared->msr = 0;
+       vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
        kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
 
        vcpu->arch.shadow_pid = 1;
index b58ccae95904f30c92fffcc00e28cb58e3cc4fad..55410cc45ad734b898d588bd6e2fb35a233dfa4e 100644 (file)
@@ -24,8 +24,6 @@
 #include <asm/page.h>
 #include <asm/asm-offsets.h>
 
-#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
-
 #define VCPU_GPR(n)     (VCPU_GPRS + (n * 4))
 
 /* The host stack layout: */
@@ -405,20 +403,17 @@ lightweight_exit:
 
        /* Finish loading guest volatiles and jump to guest. */
        lwz     r3, VCPU_CTR(r4)
+       lwz     r5, VCPU_CR(r4)
+       lwz     r6, VCPU_PC(r4)
+       lwz     r7, VCPU_SHADOW_MSR(r4)
        mtctr   r3
-       lwz     r3, VCPU_CR(r4)
-       mtcr    r3
+       mtcr    r5
+       mtsrr0  r6
+       mtsrr1  r7
        lwz     r5, VCPU_GPR(r5)(r4)
        lwz     r6, VCPU_GPR(r6)(r4)
        lwz     r7, VCPU_GPR(r7)(r4)
        lwz     r8, VCPU_GPR(r8)(r4)
-       lwz     r3, VCPU_PC(r4)
-       mtsrr0  r3
-       lwz     r3, VCPU_SHARED(r4)
-       lwz     r3, (VCPU_SHARED_MSR + 4)(r3)
-       oris    r3, r3, KVMPPC_MSR_MASK@h
-       ori     r3, r3, KVMPPC_MSR_MASK@l
-       mtsrr1  r3
 
        /* Clear any debug events which occurred since we disabled MSR[DE].
         * XXX This gives us a 3-instruction window in which a breakpoint