From: Paolo Bonzini Date: Mon, 18 May 2015 13:11:46 +0000 (+0200) Subject: KVM: x86: pass struct kvm_mmu_page to gfn_to_rmap X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=e4cd1da944ed9d2acd2e4ccabf61ec443735f6db;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git KVM: x86: pass struct kvm_mmu_page to gfn_to_rmap This is always available (with one exception in the auditing code), and with the same auditing exception the level was coming from sp->role.level. Later, the spte's role will also be used to look up the right memslots array. Reviewed-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a65ce12470f8..0d01cbbcf3eb 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1043,12 +1043,12 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, /* * Take gfn and return the reverse mapping to it. */ -static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) +static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp) { struct kvm_memory_slot *slot; slot = gfn_to_memslot(kvm, gfn); - return __gfn_to_rmap(gfn, level, slot); + return __gfn_to_rmap(gfn, sp->role.level, slot); } static bool rmap_can_add(struct kvm_vcpu *vcpu) @@ -1066,7 +1066,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) sp = page_header(__pa(spte)); kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); - rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); + rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); return pte_list_add(vcpu, spte, rmapp); } @@ -1078,7 +1078,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) sp = page_header(__pa(spte)); gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); - rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); + rmapp = gfn_to_rmap(kvm, gfn, sp); pte_list_remove(spte, rmapp); } @@ -1612,7 +1612,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) sp = page_header(__pa(spte)); - rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); + rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); kvm_flush_remote_tlbs(vcpu->kvm); diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index 368d53497314..9d99f17aa3be 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c @@ -146,7 +146,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) return; } - rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); + rmapp = gfn_to_rmap(kvm, gfn, rev_sp); if (!*rmapp) { if (!__ratelimit(&ratelimit_state)) return; @@ -191,11 +191,15 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) unsigned long *rmapp; u64 *sptep; struct rmap_iterator iter; + struct kvm_memslots *slots; + struct kvm_memory_slot *slot; if (sp->role.direct || sp->unsync || sp->role.invalid) return; - rmapp = gfn_to_rmap(kvm, sp->gfn, PT_PAGE_TABLE_LEVEL); + slots = kvm_memslots(kvm); + slot = __gfn_to_memslot(slots, sp->gfn); + rmapp = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot); for_each_rmap_spte(rmapp, &iter, sptep) if (is_writable_pte(*sptep))