KVM: PPC: Rework H_PUT_TCE/H_GET_TCE handlers
authorAlexey Kardashevskiy <aik@ozlabs.ru>
Mon, 15 Feb 2016 01:55:04 +0000 (12:55 +1100)
committerPaul Mackerras <paulus@samba.org>
Tue, 16 Feb 2016 02:44:26 +0000 (13:44 +1100)
This reworks the existing H_PUT_TCE/H_GET_TCE handlers to have following
patches applied nicer.

This moves the ioba boundaries check to a helper and adds a check for
least bits which have to be zeros.

The patch is pretty mechanical (only check for least ioba bits is added)
so no change in behaviour is expected.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/kvm/book3s_64_vio_hv.c

index 89e96b3e00398d0fb45bd841924a7f6b541f6718..f29ba2c63e07a2ff4281b5339757f36d74049955 100644 (file)
 #include <asm/ppc-opcode.h>
 #include <asm/kvm_host.h>
 #include <asm/udbg.h>
+#include <asm/iommu.h>
 
 #define TCES_PER_PAGE  (PAGE_SIZE / sizeof(u64))
 
+/*
+ * Finds a TCE table descriptor by LIOBN.
+ *
+ * WARNING: This will be called in real or virtual mode on HV KVM and virtual
+ *          mode on PR KVM
+ */
+static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
+               unsigned long liobn)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct kvmppc_spapr_tce_table *stt;
+
+       list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list)
+               if (stt->liobn == liobn)
+                       return stt;
+
+       return NULL;
+}
+
+/*
+ * Validates IO address.
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ *          mode on PR KVM
+ */
+static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
+               unsigned long ioba, unsigned long npages)
+{
+       unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1;
+       unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K;
+       unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K;
+
+       if ((ioba & mask) || (idx + npages > size) || (idx + npages < idx))
+               return H_PARAMETER;
+
+       return H_SUCCESS;
+}
+
 /* WARNING: This will be called in real-mode on HV KVM and virtual
  *          mode on PR KVM
  */
 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                      unsigned long ioba, unsigned long tce)
 {
-       struct kvm *kvm = vcpu->kvm;
-       struct kvmppc_spapr_tce_table *stt;
+       struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+       long ret;
+       unsigned long idx;
+       struct page *page;
+       u64 *tbl;
 
        /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
        /*          liobn, ioba, tce); */
 
-       list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
-               if (stt->liobn == liobn) {
-                       unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
-                       struct page *page;
-                       u64 *tbl;
-
-                       /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p  window_size=0x%x\n", */
-                       /*          liobn, stt, stt->window_size); */
-                       if (ioba >= stt->window_size)
-                               return H_PARAMETER;
-
-                       page = stt->pages[idx / TCES_PER_PAGE];
-                       tbl = (u64 *)page_address(page);
-
-                       /* FIXME: Need to validate the TCE itself */
-                       /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
-                       tbl[idx % TCES_PER_PAGE] = tce;
-                       return H_SUCCESS;
-               }
-       }
-
-       /* Didn't find the liobn, punt it to userspace */
-       return H_TOO_HARD;
+       if (!stt)
+               return H_TOO_HARD;
+
+       ret = kvmppc_ioba_validate(stt, ioba, 1);
+       if (ret != H_SUCCESS)
+               return ret;
+
+       idx = ioba >> SPAPR_TCE_SHIFT;
+       page = stt->pages[idx / TCES_PER_PAGE];
+       tbl = (u64 *)page_address(page);
+
+       /* FIXME: Need to validate the TCE itself */
+       /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
+       tbl[idx % TCES_PER_PAGE] = tce;
+
+       return H_SUCCESS;
 }
 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
 
 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                      unsigned long ioba)
 {
-       struct kvm *kvm = vcpu->kvm;
-       struct kvmppc_spapr_tce_table *stt;
+       struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+       long ret;
+       unsigned long idx;
+       struct page *page;
+       u64 *tbl;
 
-       list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
-               if (stt->liobn == liobn) {
-                       unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
-                       struct page *page;
-                       u64 *tbl;
+       if (!stt)
+               return H_TOO_HARD;
 
-                       if (ioba >= stt->window_size)
-                               return H_PARAMETER;
+       ret = kvmppc_ioba_validate(stt, ioba, 1);
+       if (ret != H_SUCCESS)
+               return ret;
 
-                       page = stt->pages[idx / TCES_PER_PAGE];
-                       tbl = (u64 *)page_address(page);
+       idx = ioba >> SPAPR_TCE_SHIFT;
+       page = stt->pages[idx / TCES_PER_PAGE];
+       tbl = (u64 *)page_address(page);
 
-                       vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
-                       return H_SUCCESS;
-               }
-       }
+       vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
 
-       /* Didn't find the liobn, punt it to userspace */
-       return H_TOO_HARD;
+       return H_SUCCESS;
 }
 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);