arm64: KVM: force cache clean on page fault when caches are off
authorMarc Zyngier <marc.zyngier@arm.com>
Tue, 14 Jan 2014 19:13:10 +0000 (19:13 +0000)
committerMarc Zyngier <marc.zyngier@arm.com>
Mon, 3 Mar 2014 01:15:20 +0000 (01:15 +0000)
In order for the guest with caches off to observe data written
contained in a given page, we need to make sure that page is
committed to memory, and not just hanging in the cache (as
guest accesses are completely bypassing the cache until it
decides to enable it).

For this purpose, hook into the coherent_icache_guest_page
function and flush the region if the guest SCTLR_EL1
register doesn't show the MMU  and caches as being enabled.
The function also get renamed to coherent_cache_guest_page.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/mmu.c
arch/arm64/include/asm/kvm_mmu.h

index 2d122adcdb22108984a46f8483b5d2ed6b37b811..6d0f3d3023b7568692d96fb1edfb44e4e3a84d6b 100644 (file)
@@ -116,8 +116,8 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
 
 struct kvm;
 
-static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
-                                             unsigned long size)
+static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
+                                            unsigned long size)
 {
        /*
         * If we are going to insert an instruction page and the icache is
index 7789857d147034b8cbfd1e44d599f81db085215b..fc71a8df0e1316b5a40282f7d648211a708d5e6c 100644 (file)
@@ -715,7 +715,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        kvm_set_s2pmd_writable(&new_pmd);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE);
+               coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
                ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
        } else {
                pte_t new_pte = pfn_pte(pfn, PAGE_S2);
@@ -723,7 +723,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        kvm_set_s2pte_writable(&new_pte);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_icache_guest_page(kvm, hva, PAGE_SIZE);
+               coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
                ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
        }
 
index 7f1f9408ff66e897e8988d0b844ab9a3e8f903a0..6eaf69b5e42ccbabe09673cfe161b1c181844a1d 100644 (file)
@@ -106,7 +106,6 @@ static inline bool kvm_is_write_fault(unsigned long esr)
        return true;
 }
 
-static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
 static inline void kvm_clean_pgd(pgd_t *pgd) {}
 static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
 static inline void kvm_clean_pte(pte_t *pte) {}
@@ -124,9 +123,19 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
 
 struct kvm;
 
-static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
-                                             unsigned long size)
+#define kvm_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
+
+static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 {
+       return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
+}
+
+static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
+                                            unsigned long size)
+{
+       if (!vcpu_has_cache_enabled(vcpu))
+               kvm_flush_dcache_to_poc((void *)hva, size);
+
        if (!icache_is_aliasing()) {            /* PIPT */
                flush_icache_range(hva, hva + size);
        } else if (!icache_is_aivivt()) {       /* non ASID-tagged VIVT */
@@ -135,7 +144,6 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
        }
 }
 
-#define kvm_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
 #define kvm_virt_to_phys(x)            __virt_to_phys((unsigned long)(x))
 
 #endif /* __ASSEMBLY__ */