From 6c41a413fd44af8eae2949869d4d57ce681a0c30 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Jun 2016 18:40:51 +0100 Subject: [PATCH] arm/arm64: Get rid of KERN_TO_HYP We have both KERN_TO_HYP and kern_hyp_va, which do the exact same thing. Let's standardize on the latter. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_hyp.h | 2 -- arch/arm/include/asm/kvm_mmu.h | 2 +- arch/arm/kvm/mmu.c | 18 +++++++++--------- arch/arm64/include/asm/kvm_mmu.h | 1 - 4 files changed, 10 insertions(+), 13 deletions(-) diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index e38fce2270ac..6eaff28f2ff3 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -25,8 +25,6 @@ #define __hyp_text __section(.hyp.text) notrace -#define kern_hyp_va(v) (v) - #define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 #define __ACCESS_CP15_64(Op1, CRm) \ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 73c28180bb63..3bb803d6814b 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -26,7 +26,7 @@ * We directly use the kernel VA for the HYP, as we can directly share * the mapping (HTTBR "covers" TTBR1). */ -#define KERN_TO_HYP(kva) (kva) +#define kern_hyp_va(kva) (kva) /* * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 8a0aa37605c5..bda27b6b1aa2 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -506,9 +506,9 @@ void free_hyp_pgds(void) if (hyp_pgd) { unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE); for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) - unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); + unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE); for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) - unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); + unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE); free_pages((unsigned long)hyp_pgd, hyp_pgd_order); hyp_pgd = NULL; @@ -670,8 +670,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot) { phys_addr_t phys_addr; unsigned long virt_addr; - unsigned long start = KERN_TO_HYP((unsigned long)from); - unsigned long end = KERN_TO_HYP((unsigned long)to); + unsigned long start = kern_hyp_va((unsigned long)from); + unsigned long end = kern_hyp_va((unsigned long)to); if (is_kernel_in_hyp_mode()) return 0; @@ -705,8 +705,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot) */ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) { - unsigned long start = KERN_TO_HYP((unsigned long)from); - unsigned long end = KERN_TO_HYP((unsigned long)to); + unsigned long start = kern_hyp_va((unsigned long)from); + unsigned long end = kern_hyp_va((unsigned long)to); if (is_kernel_in_hyp_mode()) return 0; @@ -1711,10 +1711,10 @@ int kvm_mmu_init(void) kvm_info("IDMAP page: %lx\n", hyp_idmap_start); kvm_info("HYP VA range: %lx:%lx\n", - KERN_TO_HYP(PAGE_OFFSET), KERN_TO_HYP(~0UL)); + kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); - if (hyp_idmap_start >= KERN_TO_HYP(PAGE_OFFSET) && - hyp_idmap_start < KERN_TO_HYP(~0UL)) { + if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && + hyp_idmap_start < kern_hyp_va(~0UL)) { /* * The idmap page is intersecting with the VA space, * it is not safe to continue further. diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 9226f8be6341..b6bb83400cd8 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -133,7 +133,6 @@ static inline unsigned long __kern_hyp_va(unsigned long v) } #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) -#define KERN_TO_HYP(v) kern_hyp_va(v) /* * We currently only support a 40bit IPA. -- 2.20.1