From 505aef8f30a95f7e4abf2c07e54ded1521587ba0 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Thu, 22 Sep 2011 16:56:06 +0800 Subject: [PATCH] KVM: MMU: cleanup FNAME(invlpg) Directly Use mmu_page_zap_pte to zap spte in FNAME(invlpg), also remove the same code between FNAME(invlpg) and FNAME(sync_page) Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 16 ++++++++------ arch/x86/kvm/paging_tmpl.h | 44 +++++++++++++++----------------------- 2 files changed, 27 insertions(+), 33 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b432a71a1839..d15f908649e7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1809,7 +1809,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, } } -static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, +static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *spte) { u64 pte; @@ -1817,17 +1817,21 @@ static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, pte = *spte; if (is_shadow_present_pte(pte)) { - if (is_last_spte(pte, sp->role.level)) + if (is_last_spte(pte, sp->role.level)) { drop_spte(kvm, spte); - else { + if (is_large_pte(pte)) + --kvm->stat.lpages; + } else { child = page_header(pte & PT64_BASE_ADDR_MASK); drop_parent_pte(child, spte); } - } else if (is_mmio_spte(pte)) + return true; + } + + if (is_mmio_spte(pte)) mmu_spte_clear_no_track(spte); - if (is_large_pte(pte)) - --kvm->stat.lpages; + return false; } static void kvm_mmu_page_unlink_children(struct kvm *kvm, diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 92994100638b..d8d3906649da 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -656,6 +656,18 @@ out_unlock: return 0; } +static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) +{ + int offset = 0; + + WARN_ON(sp->role.level != 1); + + if (PTTYPE == 32) + offset = sp->role.quadrant << PT64_LEVEL_BITS; + + return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); +} + static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) { struct kvm_shadow_walk_iterator iterator; @@ -663,7 +675,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) gpa_t pte_gpa = -1; int level; u64 *sptep; - int need_flush = 0; vcpu_clear_mmio_info(vcpu, gva); @@ -675,36 +686,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) sp = page_header(__pa(sptep)); if (is_last_spte(*sptep, level)) { - int offset, shift; - if (!sp->unsync) break; - shift = PAGE_SHIFT - - (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level; - offset = sp->role.quadrant << shift; - - pte_gpa = (sp->gfn << PAGE_SHIFT) + offset; + pte_gpa = FNAME(get_level1_sp_gpa)(sp); pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); - if (is_shadow_present_pte(*sptep)) { - if (is_large_pte(*sptep)) - --vcpu->kvm->stat.lpages; - drop_spte(vcpu->kvm, sptep); - need_flush = 1; - } else if (is_mmio_spte(*sptep)) - mmu_spte_clear_no_track(sptep); - - break; + if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) + kvm_flush_remote_tlbs(vcpu->kvm); } if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) break; } - if (need_flush) - kvm_flush_remote_tlbs(vcpu->kvm); - atomic_inc(&vcpu->kvm->arch.invlpg_counter); spin_unlock(&vcpu->kvm->mmu_lock); @@ -769,19 +764,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, */ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { - int i, offset, nr_present; + int i, nr_present = 0; bool host_writable; gpa_t first_pte_gpa; - offset = nr_present = 0; - /* direct kvm_mmu_page can not be unsync. */ BUG_ON(sp->role.direct); - if (PTTYPE == 32) - offset = sp->role.quadrant << PT64_LEVEL_BITS; - - first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); + first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); for (i = 0; i < PT64_ENT_PER_PAGE; i++) { unsigned pte_access; -- 2.20.1