kvm-arm: Cleanup kvm_* wrappers
authorSuzuki K Poulose <suzuki.poulose@arm.com>
Tue, 22 Mar 2016 17:14:25 +0000 (17:14 +0000)
committerChristoffer Dall <christoffer.dall@linaro.org>
Thu, 21 Apr 2016 12:58:20 +0000 (14:58 +0200)
Now that we have switched to explicit page table routines,
get rid of the obsolete kvm_* wrappers.

Also, kvm_tlb_flush_vmid_by_ipa is now called only on stage2
page tables, hence get rid of the redundant check.

Cc: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/mmu.c
arch/arm64/include/asm/kvm_mmu.h

index 5522cdd9dedf652946df614186dec69fce668ecf..a7736d53a40837303ada583ee263b160f745fd64 100644 (file)
@@ -136,22 +136,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
        return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
 }
 
-
-/* Open coded p*d_addr_end that can deal with 64bit addresses */
-#define kvm_pgd_addr_end(addr, end)                                    \
-({     u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;            \
-       (__boundary - 1 < (end) - 1)? __boundary: (end);                \
-})
-
-#define kvm_pud_addr_end(addr,end)             (end)
-
-#define kvm_pmd_addr_end(addr, end)                                    \
-({     u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK;                \
-       (__boundary - 1 < (end) - 1)? __boundary: (end);                \
-})
-
-#define kvm_pgd_index(addr)                    pgd_index(addr)
-
 static inline bool kvm_page_empty(void *ptr)
 {
        struct page *ptr_page = virt_to_page(ptr);
index f2a6d9b8ca2d84f754d55b49f4ed2a5a19f6fec2..d3fa96e0f7094d549bbb28b24e696083b9abf77f 100644 (file)
@@ -66,14 +66,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
 
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
-       /*
-        * This function also gets called when dealing with HYP page
-        * tables. As HYP doesn't have an associated struct kvm (and
-        * the HYP page tables are fairly static), we don't do
-        * anything there.
-        */
-       if (kvm)
-               kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
+       kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
 }
 
 /*
index edf3c62c660ede8d0330da2538b49a328fcc48e0..a3c0d05311ef36d3faae34dc1e831ec2129115a6 100644 (file)
@@ -153,13 +153,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
        return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
 }
 
-
-#define kvm_pgd_addr_end(addr, end)    pgd_addr_end(addr, end)
-#define kvm_pud_addr_end(addr, end)    pud_addr_end(addr, end)
-#define kvm_pmd_addr_end(addr, end)    pmd_addr_end(addr, end)
-
-#define kvm_pgd_index(addr)    (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
-
 static inline void *kvm_get_hwpgd(struct kvm *kvm)
 {
        pgd_t *pgd = kvm->arch.pgd;
@@ -232,23 +225,6 @@ static inline bool kvm_page_empty(void *ptr)
        return page_count(ptr_page) == 1;
 }
 
-#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
-
-#ifdef __PAGETABLE_PMD_FOLDED
-#define kvm_pmd_table_empty(kvm, pmdp) (0)
-#else
-#define kvm_pmd_table_empty(kvm, pmdp) \
-       (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
-#endif
-
-#ifdef __PAGETABLE_PUD_FOLDED
-#define kvm_pud_table_empty(kvm, pudp) (0)
-#else
-#define kvm_pud_table_empty(kvm, pudp) \
-       (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
-#endif
-
-
 #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
 
 #ifdef __PAGETABLE_PMD_FOLDED