KVM: arm/arm64: Move timer IRQ map to latest possible time
authorChristoffer Dall <christoffer.dall@linaro.org>
Wed, 18 May 2016 15:26:00 +0000 (16:26 +0100)
committerChristoffer Dall <christoffer.dall@linaro.org>
Fri, 20 May 2016 13:39:41 +0000 (15:39 +0200)
We are about to modify the VGIC to allocate all data structures
dynamically and store mapped IRQ information on a per-IRQ struct, which
is indeed allocated dynamically at init time.

Therefore, we cannot record the mapped IRQ info from the timer at timer
reset time like it's done now, because VCPU reset happens before timer
init.

A possible later time to do this is on the first run of a per VCPU, it
just requires us to move the enable state to be a per-VCPU state and do
the lookup of the physical IRQ number when we are about to run the VCPU.

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
arch/arm/kvm/arm.c
include/kvm/arm_arch_timer.h
virt/kvm/arm/arch_timer.c
virt/kvm/arm/hyp/timer-sr.c

index be4b6394a0620de3037a68a98f364fa941b2c958..ceb9345bcf07283ab5099dc3bc3e8f4b335739e2 100644 (file)
@@ -459,7 +459,7 @@ static void update_vttbr(struct kvm *kvm)
 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 {
        struct kvm *kvm = vcpu->kvm;
-       int ret;
+       int ret = 0;
 
        if (likely(vcpu->arch.has_run_once))
                return 0;
@@ -482,9 +482,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
         * interrupts from the virtual timer with a userspace gic.
         */
        if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
-               kvm_timer_enable(kvm);
+               ret = kvm_timer_enable(vcpu);
 
-       return 0;
+       return ret;
 }
 
 bool kvm_arch_intc_initialized(struct kvm *kvm)
index a47b7dee434dd08d1802c4a285f583b40204e56e..dda39d8fa189b50d897c7c59bbba5b1f8bdab649 100644 (file)
@@ -24,9 +24,6 @@
 #include <linux/workqueue.h>
 
 struct arch_timer_kvm {
-       /* Is the timer enabled */
-       bool                    enabled;
-
        /* Virtual offset */
        cycle_t                 cntvoff;
 };
@@ -55,10 +52,13 @@ struct arch_timer_cpu {
 
        /* Active IRQ state caching */
        bool                            active_cleared_last;
+
+       /* Is the timer enabled */
+       bool                    enabled;
 };
 
 int kvm_timer_hyp_init(void);
-void kvm_timer_enable(struct kvm *kvm);
+int kvm_timer_enable(struct kvm_vcpu *vcpu);
 void kvm_timer_init(struct kvm *kvm);
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
                         const struct kvm_irq_level *irq);
index 3232105e6afd24f7971eec06c5c60a4a11214cd7..e2d5b6f988fb60b00aed69bff82118c0ba787ee7 100644 (file)
@@ -197,7 +197,7 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
         * because the guest would never see the interrupt.  Instead wait
         * until we call this function from kvm_timer_flush_hwstate.
         */
-       if (!vgic_initialized(vcpu->kvm))
+       if (!vgic_initialized(vcpu->kvm) || !timer->enabled)
                return -ENODEV;
 
        if (kvm_timer_should_fire(vcpu) != timer->irq.level)
@@ -333,9 +333,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
                         const struct kvm_irq_level *irq)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-       struct irq_desc *desc;
-       struct irq_data *data;
-       int phys_irq;
 
        /*
         * The vcpu timer irq number cannot be determined in
@@ -354,26 +351,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
        timer->cntv_ctl = 0;
        kvm_timer_update_state(vcpu);
 
-       /*
-        * Find the physical IRQ number corresponding to the host_vtimer_irq
-        */
-       desc = irq_to_desc(host_vtimer_irq);
-       if (!desc) {
-               kvm_err("%s: no interrupt descriptor\n", __func__);
-               return -EINVAL;
-       }
-
-       data = irq_desc_get_irq_data(desc);
-       while (data->parent_data)
-               data = data->parent_data;
-
-       phys_irq = data->hwirq;
-
-       /*
-        * Tell the VGIC that the virtual interrupt is tied to a
-        * physical interrupt. We do that once per VCPU.
-        */
-       return kvm_vgic_map_phys_irq(vcpu, irq->irq, phys_irq);
+       return 0;
 }
 
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
@@ -501,10 +479,40 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
        kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq);
 }
 
-void kvm_timer_enable(struct kvm *kvm)
+int kvm_timer_enable(struct kvm_vcpu *vcpu)
 {
-       if (kvm->arch.timer.enabled)
-               return;
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct irq_desc *desc;
+       struct irq_data *data;
+       int phys_irq;
+       int ret;
+
+       if (timer->enabled)
+               return 0;
+
+       /*
+        * Find the physical IRQ number corresponding to the host_vtimer_irq
+        */
+       desc = irq_to_desc(host_vtimer_irq);
+       if (!desc) {
+               kvm_err("%s: no interrupt descriptor\n", __func__);
+               return -EINVAL;
+       }
+
+       data = irq_desc_get_irq_data(desc);
+       while (data->parent_data)
+               data = data->parent_data;
+
+       phys_irq = data->hwirq;
+
+       /*
+        * Tell the VGIC that the virtual interrupt is tied to a
+        * physical interrupt. We do that once per VCPU.
+        */
+       ret = kvm_vgic_map_phys_irq(vcpu, timer->irq.irq, phys_irq);
+       if (ret)
+               return ret;
+
 
        /*
         * There is a potential race here between VCPUs starting for the first
@@ -515,7 +523,9 @@ void kvm_timer_enable(struct kvm *kvm)
         * the arch timers are enabled.
         */
        if (timecounter && wqueue)
-               kvm->arch.timer.enabled = 1;
+               timer->enabled = 1;
+
+       return 0;
 }
 
 void kvm_timer_init(struct kvm *kvm)
index ea00d69e7078ccfaa8420c13a98a0c54b698b243..798866a8d8756b07dd815d7a8bc8dec8f6019e13 100644 (file)
 /* vcpu is already in the HYP VA space */
 void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
 {
-       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
        u64 val;
 
-       if (kvm->arch.timer.enabled) {
+       if (timer->enabled) {
                timer->cntv_ctl = read_sysreg_el0(cntv_ctl);
                timer->cntv_cval = read_sysreg_el0(cntv_cval);
        }
@@ -60,7 +59,7 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
        val |= CNTHCTL_EL1PCTEN;
        write_sysreg(val, cnthctl_el2);
 
-       if (kvm->arch.timer.enabled) {
+       if (timer->enabled) {
                write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
                write_sysreg_el0(timer->cntv_cval, cntv_cval);
                isb();