arm64: KVM: Tighten guest core register access from userspace
authorDave Martin <Dave.Martin@arm.com>
Thu, 27 Sep 2018 15:53:21 +0000 (16:53 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 4 Oct 2018 00:01:51 +0000 (17:01 -0700)
commit d26c25a9d19b5976b319af528886f89cf455692d upstream.

We currently allow userspace to access the core register file
in about any possible way, including straddling multiple
registers and doing unaligned accesses.

This is not the expected use of the ABI, and nobody is actually
using it that way. Let's tighten it by explicitly checking
the size and alignment for each field of the register file.

Cc: <stable@vger.kernel.org>
Fixes: 2f4a07c5f9fe ("arm64: KVM: guest one-reg interface")
Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
[maz: rewrote Dave's initial patch to be more easily backported]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/kvm/guest.c

index d3e0a2ffb383dd1ef276df6853f331e596166ebf..cc055e483e62a4ce4af35ba3a0cd56d99083ecab 100644 (file)
@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
        return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 }
 
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+       u64 off = core_reg_offset_from_id(reg->id);
+       int size;
+
+       switch (off) {
+       case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+            KVM_REG_ARM_CORE_REG(regs.regs[30]):
+       case KVM_REG_ARM_CORE_REG(regs.sp):
+       case KVM_REG_ARM_CORE_REG(regs.pc):
+       case KVM_REG_ARM_CORE_REG(regs.pstate):
+       case KVM_REG_ARM_CORE_REG(sp_el1):
+       case KVM_REG_ARM_CORE_REG(elr_el1):
+       case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+            KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+               size = sizeof(__u64);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+            KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+               size = sizeof(__uint128_t);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+               size = sizeof(__u32);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if (KVM_REG_SIZE(reg->id) == size &&
+           IS_ALIGNED(off, size / sizeof(__u32)))
+               return 0;
+
+       return -EINVAL;
+}
+
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
        /*
@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
                return -EFAULT;
 
@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
                return -EINVAL;