KVM: arm/arm64: vgic-v2: Avoid accessing GICH registers
authorMarc Zyngier <marc.zyngier@arm.com>
Tue, 2 Feb 2016 19:35:34 +0000 (19:35 +0000)
committerMarc Zyngier <marc.zyngier@arm.com>
Wed, 9 Mar 2016 04:22:20 +0000 (04:22 +0000)
GICv2 registers are *slow*. As in "terrifyingly slow". Which is bad.
But we're equaly bad, as we make a point in accessing them even if
we don't have any interrupt in flight.

A good solution is to first find out if we have anything useful to
write into the GIC, and if we don't, to simply not do it. This
involves tracking which LRs actually have something valid there.

Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
include/kvm/arm_vgic.h
virt/kvm/arm/hyp/vgic-v2-sr.c

index 13a3d537811b9f7d12b3fa892b8312330f89d70f..f473fd65fab5a0200691e26706200df99dc1e751 100644 (file)
@@ -321,6 +321,8 @@ struct vgic_cpu {
 
        /* Protected by the distributor's irq_phys_map_lock */
        struct list_head        irq_phys_map_list;
+
+       u64             live_lrs;
 };
 
 #define LR_EMPTY       0xff
index 9514a7d90d7155276f1d9a54336b3b029ead5c0d..aa0fdb89827ff4a57eda4ab5834a4c7213568cc8 100644 (file)
@@ -36,28 +36,41 @@ void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
 
        nr_lr = vcpu->arch.vgic_cpu.nr_lr;
        cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
-       cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
-       eisr0  = readl_relaxed(base + GICH_EISR0);
-       elrsr0 = readl_relaxed(base + GICH_ELRSR0);
-       if (unlikely(nr_lr > 32)) {
-               eisr1  = readl_relaxed(base + GICH_EISR1);
-               elrsr1 = readl_relaxed(base + GICH_ELRSR1);
-       } else {
-               eisr1 = elrsr1 = 0;
-       }
+
+       if (vcpu->arch.vgic_cpu.live_lrs) {
+               eisr0  = readl_relaxed(base + GICH_EISR0);
+               elrsr0 = readl_relaxed(base + GICH_ELRSR0);
+               cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
+               cpu_if->vgic_apr    = readl_relaxed(base + GICH_APR);
+
+               if (unlikely(nr_lr > 32)) {
+                       eisr1  = readl_relaxed(base + GICH_EISR1);
+                       elrsr1 = readl_relaxed(base + GICH_ELRSR1);
+               } else {
+                       eisr1 = elrsr1 = 0;
+               }
+
 #ifdef CONFIG_CPU_BIG_ENDIAN
-       cpu_if->vgic_eisr  = ((u64)eisr0 << 32) | eisr1;
-       cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
+               cpu_if->vgic_eisr  = ((u64)eisr0 << 32) | eisr1;
+               cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
 #else
-       cpu_if->vgic_eisr  = ((u64)eisr1 << 32) | eisr0;
-       cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
+               cpu_if->vgic_eisr  = ((u64)eisr1 << 32) | eisr0;
+               cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
 #endif
-       cpu_if->vgic_apr    = readl_relaxed(base + GICH_APR);
 
-       writel_relaxed(0, base + GICH_HCR);
+               for (i = 0; i < nr_lr; i++)
+                       if (vcpu->arch.vgic_cpu.live_lrs & (1UL << i))
+                               cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
 
-       for (i = 0; i < nr_lr; i++)
-               cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
+               writel_relaxed(0, base + GICH_HCR);
+
+               vcpu->arch.vgic_cpu.live_lrs = 0;
+       } else {
+               cpu_if->vgic_eisr = 0;
+               cpu_if->vgic_elrsr = ~0UL;
+               cpu_if->vgic_misr = 0;
+               cpu_if->vgic_apr = 0;
+       }
 }
 
 /* vcpu is already in the HYP VA space */
@@ -68,15 +81,30 @@ void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
        struct vgic_dist *vgic = &kvm->arch.vgic;
        void __iomem *base = kern_hyp_va(vgic->vctrl_base);
        int i, nr_lr;
+       u64 live_lrs = 0;
 
        if (!base)
                return;
 
-       writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
-       writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
-       writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
-
        nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+
        for (i = 0; i < nr_lr; i++)
-               writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4));
+               if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
+                       live_lrs |= 1UL << i;
+
+       if (live_lrs) {
+               writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
+               writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
+               for (i = 0; i < nr_lr; i++) {
+                       u32 val = 0;
+
+                       if (live_lrs & (1UL << i))
+                               val = cpu_if->vgic_lr[i];
+
+                       writel_relaxed(val, base + GICH_LR0 + (i * 4));
+               }
+       }
+
+       writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
+       vcpu->arch.vgic_cpu.live_lrs = live_lrs;
 }