KVM: MMU: move TLB flush out of __kvm_sync_page
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 24 Feb 2016 09:03:27 +0000 (10:03 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 8 Mar 2016 11:33:17 +0000 (12:33 +0100)
By doing this, kvm_sync_pages can use __kvm_sync_page instead of
reinventing it.  Because of kvm_mmu_flush_or_zap, the code does not
end up being more complex than before, and more cleanups to kvm_sync_pages
will come in the next patches.

Reviewed-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c

index 6dae2356b9f5016aabe58d8ab33c572ee3bae3b6..45a8a0605a0990a1f21fcfe13b73d900be1c64b8 100644 (file)
@@ -1932,10 +1932,24 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                return 1;
        }
 
-       kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        return 0;
 }
 
+static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
+                                struct list_head *invalid_list,
+                                bool remote_flush, bool local_flush)
+{
+       if (!list_empty(invalid_list)) {
+               kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
+               return;
+       }
+
+       if (remote_flush)
+               kvm_flush_remote_tlbs(vcpu->kvm);
+       else if (local_flush)
+               kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+}
+
 static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
                                   struct kvm_mmu_page *sp)
 {
@@ -1943,8 +1957,7 @@ static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
        int ret;
 
        ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
-       if (ret)
-               kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+       kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, !ret);
 
        return ret;
 }
@@ -1975,17 +1988,11 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
 
                WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
                kvm_unlink_unsync_page(vcpu->kvm, s);
-               if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
-                       (vcpu->arch.mmu.sync_page(vcpu, s))) {
-                       kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
-                       continue;
-               }
-               flush = true;
+               if (!__kvm_sync_page(vcpu, s, &invalid_list, false))
+                       flush = true;
        }
 
-       kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
-       if (flush)
-               kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+       kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
 }
 
 struct mmu_page_path {
@@ -2071,6 +2078,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
 
        while (mmu_unsync_walk(parent, &pages)) {
                bool protected = false;
+               bool flush = false;
 
                for_each_sp(pages, sp, parents, i)
                        protected |= rmap_write_protect(vcpu, sp->gfn);
@@ -2079,10 +2087,12 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
                        kvm_flush_remote_tlbs(vcpu->kvm);
 
                for_each_sp(pages, sp, parents, i) {
-                       kvm_sync_page(vcpu, sp, &invalid_list);
+                       if (!kvm_sync_page(vcpu, sp, &invalid_list))
+                               flush = true;
+
                        mmu_pages_clear_parents(&parents);
                }
-               kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+               kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
                cond_resched_lock(&vcpu->kvm->mmu_lock);
        }
 }
@@ -4188,21 +4198,6 @@ static bool need_remote_flush(u64 old, u64 new)
        return (old & ~new & PT64_PERM_MASK) != 0;
 }
 
-static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
-                                struct list_head *invalid_list,
-                                bool remote_flush, bool local_flush)
-{
-       if (!list_empty(invalid_list)) {
-               kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
-               return;
-       }
-
-       if (remote_flush)
-               kvm_flush_remote_tlbs(vcpu->kvm);
-       else if (local_flush)
-               kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
-}
-
 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
                                    const u8 *new, int *bytes)
 {