kvm/ppc: Call trace_hardirqs_on before entry
authorScott Wood <scottwood@freescale.com>
Wed, 10 Jul 2013 22:47:39 +0000 (17:47 -0500)
committerAlexander Graf <agraf@suse.de>
Wed, 10 Jul 2013 22:51:28 +0000 (00:51 +0200)
Currently this is only being done on 64-bit.  Rather than just move it
out of the 64-bit ifdef, move it to kvm_lazy_ee_enable() so that it is
consistent with lazy ee state, and so that we don't track more host
code as interrupts-enabled than necessary.

Rename kvm_lazy_ee_enable() to kvm_fix_ee_before_entry() to reflect
that this function now has a role on 32-bit as well.

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/powerpc.c

index 5a26bfcd0bbc389190ff1a7a39524d1140e3039d..b15554a26c20b8fd6bd25896164e91f91e27c5a4 100644 (file)
@@ -394,10 +394,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
        }
 }
 
-/* Please call after prepare_to_enter. This function puts the lazy ee state
-   back to normal mode, without actually enabling interrupts. */
-static inline void kvmppc_lazy_ee_enable(void)
+/*
+ * Please call after prepare_to_enter. This function puts the lazy ee and irq
+ * disabled tracking state back to normal mode, without actually enabling
+ * interrupts.
+ */
+static inline void kvmppc_fix_ee_before_entry(void)
 {
+       trace_hardirqs_on();
+
 #ifdef CONFIG_PPC64
        /* Only need to enable IRQs by hard enabling them after this */
        local_paca->irq_happened = 0;
index 19498a567a81f25c67c7f1b3f010182fcb680e2b..ddfaf560e5037e928cd521c8403cfc9d74bd5ff6 100644 (file)
@@ -890,7 +890,7 @@ program_interrupt:
                        local_irq_enable();
                        r = s;
                } else {
-                       kvmppc_lazy_ee_enable();
+                       kvmppc_fix_ee_before_entry();
                }
        }
 
@@ -1161,7 +1161,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        if (vcpu->arch.shared->msr & MSR_FP)
                kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
 
-       kvmppc_lazy_ee_enable();
+       kvmppc_fix_ee_before_entry();
 
        ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
index dcc94f016007f485e769f189cce610c77734f56f..6e353517c4ad6c1cbb02f4630edb2a7d0146d326 100644 (file)
@@ -698,7 +698,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        kvmppc_load_guest_fp(vcpu);
 #endif
 
-       kvmppc_lazy_ee_enable();
+       kvmppc_fix_ee_before_entry();
 
        ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
@@ -1168,7 +1168,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        local_irq_enable();
                        r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
                } else {
-                       kvmppc_lazy_ee_enable();
+                       kvmppc_fix_ee_before_entry();
                }
        }
 
index 6316ee336e888e22636f557d1623c54b30d7a207..4e05f8c693b4fdef4ce71d0df27cc4dffba50030 100644 (file)
@@ -117,8 +117,6 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
                        kvm_guest_exit();
                        continue;
                }
-
-               trace_hardirqs_on();
 #endif
 
                kvm_guest_enter();