KVM: PPC: Inform the userspace about TCE update failures
authorAlexey Kardashevskiy <aik@ozlabs.ru>
Mon, 10 Sep 2018 08:29:09 +0000 (18:29 +1000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 24 Nov 2019 07:22:57 +0000 (08:22 +0100)
[ Upstream commit f7960e299f13f069d6f3d4e157d91bfca2669677 ]

We return H_TOO_HARD from TCE update handlers when we think that
the next handler (realmode -> virtual mode -> user mode) has a chance to
handle the request; H_HARDWARE/H_CLOSED otherwise.

This changes the handlers to return H_TOO_HARD on every error giving
the userspace an opportunity to handle any request or at least log
them all.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_64_vio_hv.c

index 2c6cce8e7cfd00f0cd11b7c46e1ea8c93806a011..5e44462960213bba8b3955caeb222da7d33b0238 100644 (file)
@@ -404,7 +404,7 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
        long ret;
 
        if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
-               return H_HARDWARE;
+               return H_TOO_HARD;
 
        if (dir == DMA_NONE)
                return H_SUCCESS;
@@ -434,15 +434,15 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
                return H_TOO_HARD;
 
        if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
-               return H_HARDWARE;
+               return H_TOO_HARD;
 
        if (mm_iommu_mapped_inc(mem))
-               return H_CLOSED;
+               return H_TOO_HARD;
 
        ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
        if (WARN_ON_ONCE(ret)) {
                mm_iommu_mapped_dec(mem);
-               return H_HARDWARE;
+               return H_TOO_HARD;
        }
 
        if (dir != DMA_NONE)
index 23d6d1592f117ccf101649d811d4b585f8195dec..c75e5664fe3d8a8984faaa907217e360d6ac3d85 100644 (file)
@@ -264,14 +264,14 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
 
        if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
                        &hpa)))
-               return H_HARDWARE;
+               return H_TOO_HARD;
 
        pua = (void *) vmalloc_to_phys(pua);
        if (WARN_ON_ONCE_RM(!pua))
                return H_HARDWARE;
 
        if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
-               return H_CLOSED;
+               return H_TOO_HARD;
 
        ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
        if (ret) {
@@ -448,7 +448,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 
                rmap = (void *) vmalloc_to_phys(rmap);
                if (WARN_ON_ONCE_RM(!rmap))
-                       return H_HARDWARE;
+                       return H_TOO_HARD;
 
                /*
                 * Synchronize with the MMU notifier callbacks in