KVM: PPC: Use now shadowed vcpu fields
authorAlexander Graf <agraf@suse.de>
Thu, 15 Apr 2010 22:11:44 +0000 (00:11 +0200)
committerAvi Kivity <avi@redhat.com>
Mon, 17 May 2010 09:18:32 +0000 (12:18 +0300)
The shadow vcpu now contains some fields we don't use from the vcpu anymore.
Access to them happens using inline functions that happily use the shadow
vcpu fields.

So let's now ifdef them out to booke only and add asm-offsets.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/paca.h
arch/powerpc/kernel/asm-offsets.c

index 22801f802312397cc86c8d487779728b9bfc8bf1..5a83995105f8feff5be4d9683e5332f0b46b2d54 100644 (file)
@@ -191,11 +191,11 @@ struct kvm_vcpu_arch {
        u32 qpr[32];
 #endif
 
+#ifdef CONFIG_BOOKE
        ulong pc;
        ulong ctr;
        ulong lr;
 
-#ifdef CONFIG_BOOKE
        ulong xer;
        u32 cr;
 #endif
@@ -203,7 +203,6 @@ struct kvm_vcpu_arch {
        ulong msr;
 #ifdef CONFIG_PPC_BOOK3S
        ulong shadow_msr;
-       ulong shadow_srr1;
        ulong hflags;
        ulong guest_owned_ext;
 #endif
@@ -258,14 +257,13 @@ struct kvm_vcpu_arch {
        struct dentry *debugfs_exit_timing;
 #endif
 
+#ifdef CONFIG_BOOKE
        u32 last_inst;
-#ifdef CONFIG_PPC64
-       u32 fault_dsisr;
-#endif
        ulong fault_dear;
        ulong fault_esr;
        ulong queued_dear;
        ulong queued_esr;
+#endif
        gpa_t paddr_accessed;
 
        u8 io_gpr; /* GPR used as IO source/target */
index 33347ea4b47a39dc01bd188ea2a3a509181def2b..224eb371ca1dee8ec9d1fb28d0add9f564d68ebf 100644 (file)
@@ -137,14 +137,8 @@ struct paca_struct {
        u64 startspurr;                 /* SPURR value snapshot */
 
 #ifdef CONFIG_KVM_BOOK3S_HANDLER
-       struct  {
-               u64     esid;
-               u64     vsid;
-       } kvm_slb[64];                  /* guest SLB */
        /* We use this to store guest state in */
        struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
-       u8 kvm_slb_max;                 /* highest used guest slb entry */
-       u8 kvm_in_guest;                /* are we inside the guest? */
 #endif
 };
 
index 57a8c49c88304dec44cc8db9a639e139e90977e6..e8003ff81eef218478318e2e1692bd0521632630 100644 (file)
@@ -50,6 +50,9 @@
 #endif
 #ifdef CONFIG_KVM
 #include <linux/kvm_host.h>
+#ifndef CONFIG_BOOKE
+#include <asm/kvm_book3s.h>
+#endif
 #endif
 
 #ifdef CONFIG_PPC32
@@ -191,33 +194,9 @@ int main(void)
        DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
        DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-       DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
-       DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
-       DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
-       DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
-       DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
-       DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
-       DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
-       DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
-       DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
-       DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
-       DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
-       DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
-       DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
-       DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
-       DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
-       DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
-       DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
-       DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
-       DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
-       DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
-       DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
-       DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
-                                           shadow_vcpu.vmhandler));
-       DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
-                                          shadow_vcpu.scratch0));
-       DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
-                                          shadow_vcpu.scratch1));
+       DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
+       DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
+       DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
 #endif
 #endif /* CONFIG_PPC64 */
 
@@ -412,9 +391,6 @@ int main(void)
        DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
        DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
        DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
-       DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
-       DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
-       DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
        DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
        DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
        DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
@@ -422,26 +398,67 @@ int main(void)
        DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
        DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
 
-       DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
-       DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
-       DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
-
        /* book3s */
 #ifdef CONFIG_PPC_BOOK3S
-       DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
        DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
-       DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
        DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
        DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
-       DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
        DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
        DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
        DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
        DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
        DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
+       DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
+                          offsetof(struct kvmppc_vcpu_book3s, vcpu));
+       DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr));
+       DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer));
+       DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr));
+       DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr));
+       DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc));
+       DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0]));
+       DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1]));
+       DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2]));
+       DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3]));
+       DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4]));
+       DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5]));
+       DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6]));
+       DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7]));
+       DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8]));
+       DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9]));
+       DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10]));
+       DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11]));
+       DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12]));
+       DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13]));
+       DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1));
+       DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2));
+       DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                        vmhandler));
+       DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                       scratch0));
+       DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                       scratch1));
+       DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                       in_guest));
+       DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                          fault_dsisr));
+       DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                        fault_dar));
+       DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                        last_inst));
+       DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
+                                          shadow_srr1));
+#ifdef CONFIG_PPC_BOOK3S_32
+       DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr));
+#endif
 #else
        DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
        DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
+       DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+       DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+       DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+       DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+       DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
+       DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
 #endif /* CONFIG_PPC_BOOK3S */
 #endif
 #ifdef CONFIG_44x