KVM: PPC: Book3E: Increase FPU laziness
authorMihai Caraman <mihai.caraman@freescale.com>
Wed, 20 Aug 2014 13:36:22 +0000 (16:36 +0300)
committerAlexander Graf <agraf@suse.de>
Mon, 22 Sep 2014 08:11:32 +0000 (10:11 +0200)
Increase FPU laziness by loading the guest state into the unit before entering
the guest instead of doing it on each vcpu schedule. Without this improvement
an interrupt may claim floating point corrupting guest state.

Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/booke.h
arch/powerpc/kvm/e500mc.c

index 074b7fc795b78fd30add335c223c30583bbaf957..91e7217db9d9e5e819d6f579a80102eb93362aea 100644 (file)
@@ -124,6 +124,40 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
 }
 #endif
 
+/*
+ * Load up guest vcpu FP state if it's needed.
+ * It also set the MSR_FP in thread so that host know
+ * we're holding FPU, and then host can help to save
+ * guest vcpu FP state if other threads require to use FPU.
+ * This simulates an FP unavailable fault.
+ *
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+       if (!(current->thread.regs->msr & MSR_FP)) {
+               enable_kernel_fp();
+               load_fp_state(&vcpu->arch.fp);
+               current->thread.fp_save_area = &vcpu->arch.fp;
+               current->thread.regs->msr |= MSR_FP;
+       }
+#endif
+}
+
+/*
+ * Save guest vcpu FP state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+       if (current->thread.regs->msr & MSR_FP)
+               giveup_fpu(current);
+       current->thread.fp_save_area = NULL;
+#endif
+}
+
 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 {
 #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
@@ -658,12 +692,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
        /*
         * Since we can't trap on MSR_FP in GS-mode, we consider the guest
-        * as always using the FPU.  Kernel usage of FP (via
-        * enable_kernel_fp()) in this thread must not occur while
-        * vcpu->fpu_active is set.
+        * as always using the FPU.
         */
-       vcpu->fpu_active = 1;
-
        kvmppc_load_guest_fp(vcpu);
 #endif
 
@@ -687,8 +717,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
 #ifdef CONFIG_PPC_FPU
        kvmppc_save_guest_fp(vcpu);
-
-       vcpu->fpu_active = 0;
 #endif
 
 out:
@@ -1194,6 +1222,7 @@ out:
                else {
                        /* interrupts now hard-disabled */
                        kvmppc_fix_ee_before_entry();
+                       kvmppc_load_guest_fp(vcpu);
                }
        }
 
index f753543c56fa4aff28f1ba81cc24a6b1593dc114..e73d513f72d0c9c49ba414c6cc3314767ceb9392 100644 (file)
@@ -116,40 +116,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
 extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
                                          ulong *spr_val);
 
-/*
- * Load up guest vcpu FP state if it's needed.
- * It also set the MSR_FP in thread so that host know
- * we're holding FPU, and then host can help to save
- * guest vcpu FP state if other threads require to use FPU.
- * This simulates an FP unavailable fault.
- *
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-       if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
-               enable_kernel_fp();
-               load_fp_state(&vcpu->arch.fp);
-               current->thread.fp_save_area = &vcpu->arch.fp;
-               current->thread.regs->msr |= MSR_FP;
-       }
-#endif
-}
-
-/*
- * Save guest vcpu FP state into thread.
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
-       if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
-               giveup_fpu(current);
-       current->thread.fp_save_area = NULL;
-#endif
-}
-
 static inline void kvmppc_clear_dbsr(void)
 {
        mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
index 000cf8242e7dc11df1de694e5d5fe11dacc23b47..454934990672f50b8cae6703816ef6e25e5a91d2 100644 (file)
@@ -145,8 +145,6 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
                kvmppc_e500_tlbil_all(vcpu_e500);
                __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu;
        }
-
-       kvmppc_load_guest_fp(vcpu);
 }
 
 static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)