KVM: PPC: Rename deliver_interrupts to prepare_to_enter
authorScott Wood <scottwood@freescale.com>
Wed, 9 Nov 2011 00:23:20 +0000 (18:23 -0600)
committerAvi Kivity <avi@redhat.com>
Mon, 5 Mar 2012 12:52:25 +0000 (14:52 +0200)
This function also updates paravirt int_pending, so rename it
to be more obvious that this is a collection of checks run prior
to (re)entering a guest.

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/powerpc.c

index a284f209e2dfab402a88f82e1fc4f273a183ceec..c089927f64cca9e75cea5ef3c57272c449a5b32f 100644 (file)
@@ -94,7 +94,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
 
-extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
index e41ac6f7dcf15105aa4b784fce7d43d1599c4d64..73fc9f0461077b83e003b0be43ccf0e381cece5a 100644 (file)
@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
        return true;
 }
 
-void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
+void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 {
        unsigned long *pending = &vcpu->arch.pending_exceptions;
        unsigned long old_pending = vcpu->arch.pending_exceptions;
index 336983da9e726c1e144b9a29be03934308800958..536adee59c0768f6e40f21596429e04d1c1b8eab 100644 (file)
@@ -482,7 +482,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
        if (now > vcpu->arch.dec_expires) {
                /* decrementer has already gone negative */
                kvmppc_core_queue_dec(vcpu);
-               kvmppc_core_deliver_interrupts(vcpu);
+               kvmppc_core_prepare_to_enter(vcpu);
                return;
        }
        dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
@@ -797,7 +797,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
                list_for_each_entry_safe(v, vn, &vc->runnable_threads,
                                         arch.run_list) {
-                       kvmppc_core_deliver_interrupts(v);
+                       kvmppc_core_prepare_to_enter(v);
                        if (signal_pending(v->arch.run_task)) {
                                kvmppc_remove_runnable(vc, v);
                                v->stat.signal_exits++;
@@ -857,7 +857,7 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
                if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
                    !(vcpu->arch.shregs.msr & MSR_PR)) {
                        r = kvmppc_pseries_do_hcall(vcpu);
-                       kvmppc_core_deliver_interrupts(vcpu);
+                       kvmppc_core_prepare_to_enter(vcpu);
                }
        } while (r == RESUME_GUEST);
        return r;
index e2cfb9e1e20ebdea21c726e946e1f37aef1c8c6f..f3628581fb7c9d3f4effd700b9e61528ca55ce4e 100644 (file)
@@ -764,7 +764,7 @@ program_interrupt:
                        /* In case an interrupt came in that was triggered
                         * from userspace (like DEC), we need to check what
                         * to inject now! */
-                       kvmppc_core_deliver_interrupts(vcpu);
+                       kvmppc_core_prepare_to_enter(vcpu);
                }
        }
 
index 9c785896e86769a923ac2a06e50b67127da3f4a6..e082e348c88200a36acc5f4a10f3a5ff1a7772a1 100644 (file)
@@ -289,7 +289,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
 }
 
 /* Check pending exceptions and deliver one, if possible. */
-void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
+void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 {
        unsigned long *pending = &vcpu->arch.pending_exceptions;
        unsigned long old_pending = vcpu->arch.pending_exceptions;
@@ -611,7 +611,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
        local_irq_disable();
 
-       kvmppc_core_deliver_interrupts(vcpu);
+       kvmppc_core_prepare_to_enter(vcpu);
 
        if (!(r & RESUME_HOST)) {
                /* To avoid clobbering exit_reason, only check for signals if
index 3cf6fba513ac311b61fdc3137b7e9196e0359347..6186ec0d939b7fe619901bcdf345572c1ccc8514 100644 (file)
@@ -559,7 +559,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                vcpu->arch.hcall_needed = 0;
        }
 
-       kvmppc_core_deliver_interrupts(vcpu);
+       kvmppc_core_prepare_to_enter(vcpu);
 
        r = kvmppc_vcpu_run(run, vcpu);