arm/arm64: KVM: Enforce unconditional flush to PoC when mapping to stage-2
authorMarc Zyngier <marc.zyngier@arm.com>
Wed, 25 Jan 2017 12:29:59 +0000 (12:29 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 12 Mar 2017 05:37:28 +0000 (06:37 +0100)
commit 8f36ebaf21fdae99c091c67e8b6fab33969f2667 upstream.

When we fault in a page, we flush it to the PoC (Point of Coherency)
if the faulting vcpu has its own caches off, so that it can observe
the page we just brought it.

But if the vcpu has its caches on, we skip that step. Bad things
happen when *another* vcpu tries to access that page with its own
caches disabled. At that point, there is no garantee that the
data has made it to the PoC, and we access stale data.

The obvious fix is to always flush to PoC when a page is faulted
in, no matter what the state of the vcpu is.

Fixes: 2d58b733c876 ("arm64: KVM: force cache clean on page fault when caches are off")
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/include/asm/kvm_mmu.h
arch/arm64/include/asm/kvm_mmu.h

index 405aa18833073b88b52103363b54d534d9c69263..23d5cad56ddcd57a6670223879c8cbec308411b8 100644 (file)
@@ -204,18 +204,12 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
         * and iterate over the range.
         */
 
-       bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
-
        VM_BUG_ON(size & ~PAGE_MASK);
 
-       if (!need_flush && !icache_is_pipt())
-               goto vipt_cache;
-
        while (size) {
                void *va = kmap_atomic_pfn(pfn);
 
-               if (need_flush)
-                       kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+               kvm_flush_dcache_to_poc(va, PAGE_SIZE);
 
                if (icache_is_pipt())
                        __cpuc_coherent_user_range((unsigned long)va,
@@ -227,7 +221,6 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
                kunmap_atomic(va);
        }
 
-vipt_cache:
        if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
                /* any kind of VIPT cache */
                __flush_icache_all();
index 61505676d0853bb65710fc9ad7746db8f58e4658..819b21a9851c39567bb02d4579f654b474473c4c 100644 (file)
@@ -236,8 +236,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
 {
        void *va = page_address(pfn_to_page(pfn));
 
-       if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
-               kvm_flush_dcache_to_poc(va, size);
+       kvm_flush_dcache_to_poc(va, size);
 
        if (!icache_is_aliasing()) {            /* PIPT */
                flush_icache_range((unsigned long)va,