kvm/ppc: IRQ disabling cleanup
authorScott Wood <scottwood@freescale.com>
Fri, 10 Jan 2014 01:18:40 +0000 (19:18 -0600)
committerAlexander Graf <agraf@suse.de>
Mon, 27 Jan 2014 15:00:55 +0000 (16:00 +0100)
Simplify the handling of lazy EE by going directly from fully-enabled
to hard-disabled.  This replaces the lazy_irq_pending() check
(including its misplaced kvm_guest_exit() call).

As suggested by Tiejun Chen, move the interrupt disabling into
kvmppc_prepare_to_enter() rather than have each caller do it.  Also
move the IRQ enabling on heavyweight exit into
kvmppc_prepare_to_enter().

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/powerpc.c

index 629277df47986d13b427f46519505b2e230a0cf7..fcd53f0d34bad9a26b3b74bc46ecc55b0c673f79 100644 (file)
@@ -456,6 +456,12 @@ static inline void kvmppc_fix_ee_before_entry(void)
        trace_hardirqs_on();
 
 #ifdef CONFIG_PPC64
+       /*
+        * To avoid races, the caller must have gone directly from having
+        * interrupts fully-enabled to hard-disabled.
+        */
+       WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
+
        /* Only need to enable IRQs by hard enabling them after this */
        local_paca->irq_happened = 0;
        local_paca->soft_enabled = 1;
index aedba681bb94d1dfd2df0b7f93c91eb3a37079d6..e82fafdaf88047d872b9b3b54c37361f5f6239d7 100644 (file)
@@ -999,14 +999,14 @@ program_interrupt:
                 * and if we really did time things so badly, then we just exit
                 * again due to a host external interrupt.
                 */
-               local_irq_disable();
                s = kvmppc_prepare_to_enter(vcpu);
-               if (s <= 0) {
-                       local_irq_enable();
+               if (s <= 0)
                        r = s;
-               } else {
+               else {
+                       /* interrupts now hard-disabled */
                        kvmppc_fix_ee_before_entry();
                }
+
                kvmppc_handle_lost_ext(vcpu);
        }
 
@@ -1219,12 +1219,10 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
         * really did time things so badly, then we just exit again due to
         * a host external interrupt.
         */
-       local_irq_disable();
        ret = kvmppc_prepare_to_enter(vcpu);
-       if (ret <= 0) {
-               local_irq_enable();
+       if (ret <= 0)
                goto out;
-       }
+       /* interrupts now hard-disabled */
 
        /* Save FPU state in thread_struct */
        if (current->thread.regs->msr & MSR_FP)
index 6a8c32ec4173fe818313e7d9db9a6ad0c05c873d..07b89c711898974423d2e02a3a40fcb948d6e681 100644 (file)
@@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
                local_irq_enable();
                kvm_vcpu_block(vcpu);
                clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
-               local_irq_disable();
+               hard_irq_disable();
 
                kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
                r = 1;
@@ -688,13 +688,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                return -EINVAL;
        }
 
-       local_irq_disable();
        s = kvmppc_prepare_to_enter(vcpu);
        if (s <= 0) {
-               local_irq_enable();
                ret = s;
                goto out;
        }
+       /* interrupts now hard-disabled */
 
 #ifdef CONFIG_PPC_FPU
        /* Save userspace FPU state in stack */
@@ -1187,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
         * aren't already exiting to userspace for some other reason.
         */
        if (!(r & RESUME_HOST)) {
-               local_irq_disable();
                s = kvmppc_prepare_to_enter(vcpu);
-               if (s <= 0) {
-                       local_irq_enable();
+               if (s <= 0)
                        r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
-               } else {
+               else {
+                       /* interrupts now hard-disabled */
                        kvmppc_fix_ee_before_entry();
                }
        }
index 026dfaaa4772acefa276e8569228fc1a55c620e8..3cf541a53e2aef14a00e467e0c4daec16c255e7a 100644 (file)
@@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  */
 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
 {
-       int r = 1;
+       int r;
+
+       WARN_ON(irqs_disabled());
+       hard_irq_disable();
 
-       WARN_ON_ONCE(!irqs_disabled());
        while (true) {
                if (need_resched()) {
                        local_irq_enable();
                        cond_resched();
-                       local_irq_disable();
+                       hard_irq_disable();
                        continue;
                }
 
@@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
                        local_irq_enable();
                        trace_kvm_check_requests(vcpu);
                        r = kvmppc_core_check_requests(vcpu);
-                       local_irq_disable();
+                       hard_irq_disable();
                        if (r > 0)
                                continue;
                        break;
@@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
                        continue;
                }
 
-#ifdef CONFIG_PPC64
-               /* lazy EE magic */
-               hard_irq_disable();
-               if (lazy_irq_pending()) {
-                       /* Got an interrupt in between, try again */
-                       local_irq_enable();
-                       local_irq_disable();
-                       kvm_guest_exit();
-                       continue;
-               }
-#endif
-
                kvm_guest_enter();
-               break;
+               return 1;
        }
 
+       /* return to host */
+       local_irq_enable();
        return r;
 }
 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);