KVM: arm/arm64: VGIC/ITS: protect kvm_read_guest() calls with SRCU lock
authorAndre Przywara <andre.przywara@arm.com>
Fri, 11 May 2018 14:20:14 +0000 (15:20 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 22 May 2018 16:53:57 +0000 (18:53 +0200)
commit bf308242ab98b5d1648c3663e753556bef9bec01 upstream.

kvm_read_guest() will eventually look up in kvm_memslots(), which requires
either to hold the kvm->slots_lock or to be inside a kvm->srcu critical
section.
In contrast to x86 and s390 we don't take the SRCU lock on every guest
exit, so we have to do it individually for each kvm_read_guest() call.

Provide a wrapper which does that and use that everywhere.

Note that ending the SRCU critical section before returning from the
kvm_read_guest() wrapper is safe, because the data has been *copied*, so
we don't need to rely on valid references to the memslot anymore.

Cc: Stable <stable@vger.kernel.org> # 4.8+
Reported-by: Jan Glauber <jan.glauber@caviumnetworks.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/include/asm/kvm_mmu.h
arch/arm64/include/asm/kvm_mmu.h
virt/kvm/arm/vgic/vgic-its.c

index eb46fc81a440c3384ff55efe1ab26c43cd6c86db..08cd720eae0110e354d7055b8a8841ffc81a7075 100644 (file)
@@ -221,6 +221,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
        return 8;
 }
 
+/*
+ * We are not in the kvm->srcu critical section most of the time, so we take
+ * the SRCU read lock here. Since we copy the data from the user page, we
+ * can immediately drop the lock again.
+ */
+static inline int kvm_read_guest_lock(struct kvm *kvm,
+                                     gpa_t gpa, void *data, unsigned long len)
+{
+       int srcu_idx = srcu_read_lock(&kvm->srcu);
+       int ret = kvm_read_guest(kvm, gpa, data, len);
+
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+       return ret;
+}
+
 static inline void *kvm_get_hyp_vector(void)
 {
        return kvm_ksym_ref(__kvm_hyp_vector);
index 2d6d4bd9de52b48cbaaeae1cea86544c3f406bec..fe55b516f018d0adf8d0b31d1d93d66618931cce 100644 (file)
@@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
        return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
+/*
+ * We are not in the kvm->srcu critical section most of the time, so we take
+ * the SRCU read lock here. Since we copy the data from the user page, we
+ * can immediately drop the lock again.
+ */
+static inline int kvm_read_guest_lock(struct kvm *kvm,
+                                     gpa_t gpa, void *data, unsigned long len)
+{
+       int srcu_idx = srcu_read_lock(&kvm->srcu);
+       int ret = kvm_read_guest(kvm, gpa, data, len);
+
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+       return ret;
+}
+
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 #include <asm/mmu.h>
 
index 4131560d9f745e65357faddb345455676062fa30..d72b8481f2500ca470a8e3eb935589ff29dc4926 100644 (file)
@@ -279,8 +279,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
        u8 prop;
        int ret;
 
-       ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
-                            &prop, 1);
+       ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
+                                 &prop, 1);
 
        if (ret)
                return ret;
@@ -413,8 +413,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
                 * this very same byte in the last iteration. Reuse that.
                 */
                if (byte_offset != last_byte_offset) {
-                       ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
-                                            &pendmask, 1);
+                       ret = kvm_read_guest_lock(vcpu->kvm,
+                                                 pendbase + byte_offset,
+                                                 &pendmask, 1);
                        if (ret) {
                                kfree(intids);
                                return ret;
@@ -740,7 +741,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
                return false;
 
        /* Each 1st level entry is represented by a 64-bit value. */
-       if (kvm_read_guest(its->dev->kvm,
+       if (kvm_read_guest_lock(its->dev->kvm,
                           BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
                           &indirect_ptr, sizeof(indirect_ptr)))
                return false;
@@ -1297,8 +1298,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
        cbaser = CBASER_ADDRESS(its->cbaser);
 
        while (its->cwriter != its->creadr) {
-               int ret = kvm_read_guest(kvm, cbaser + its->creadr,
-                                        cmd_buf, ITS_CMD_SIZE);
+               int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
+                                             cmd_buf, ITS_CMD_SIZE);
                /*
                 * If kvm_read_guest() fails, this could be due to the guest
                 * programming a bogus value in CBASER or something else going