KVM: MMU: cleanup spte_write_protect
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Wed, 20 Jun 2012 07:57:39 +0000 (15:57 +0800)
committerAvi Kivity <avi@redhat.com>
Wed, 11 Jul 2012 13:51:16 +0000 (16:51 +0300)
Use __drop_large_spte to cleanup this function and comment spte_write_protect

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c

index d04d6305a725629b45750add88fef2ddeab441d1..ed9e9680608230d1271ea5ff0438fa227c3a1ee5 100644 (file)
@@ -1050,7 +1050,33 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
                rmap_remove(kvm, sptep);
 }
 
-/* Return true if the spte is dropped. */
+
+static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
+{
+       if (is_large_pte(*sptep)) {
+               WARN_ON(page_header(__pa(sptep))->role.level ==
+                       PT_PAGE_TABLE_LEVEL);
+               drop_spte(kvm, sptep);
+               --kvm->stat.lpages;
+               return true;
+       }
+
+       return false;
+}
+
+static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
+{
+       if (__drop_large_spte(vcpu->kvm, sptep))
+               kvm_flush_remote_tlbs(vcpu->kvm);
+}
+
+/*
+ * Write-protect on the specified @sptep due to dirty page logging or
+ * protecting shadow page table. @flush indicates whether tlb need be
+ * flushed.
+ *
+ * Return true if the spte is dropped.
+ */
 static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush)
 {
        u64 spte = *sptep;
@@ -1061,13 +1087,9 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush)
        rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
 
        *flush |= true;
-       if (is_large_pte(spte)) {
-               WARN_ON(page_header(__pa(sptep))->role.level ==
-                      PT_PAGE_TABLE_LEVEL);
-               drop_spte(kvm, sptep);
-               --kvm->stat.lpages;
+
+       if (__drop_large_spte(kvm, sptep))
                return true;
-       }
 
        spte = spte & ~PT_WRITABLE_MASK;
        mmu_spte_update(sptep, spte);
@@ -1878,15 +1900,6 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
        mmu_spte_set(sptep, spte);
 }
 
-static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
-{
-       if (is_large_pte(*sptep)) {
-               drop_spte(vcpu->kvm, sptep);
-               --vcpu->kvm->stat.lpages;
-               kvm_flush_remote_tlbs(vcpu->kvm);
-       }
-}
-
 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                                   unsigned direct_access)
 {