KVM: MMU: only update unsync page in invlpg path
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Sat, 15 May 2010 10:53:35 +0000 (18:53 +0800)
committerAvi Kivity <avi@redhat.com>
Sun, 1 Aug 2010 07:35:50 +0000 (10:35 +0300)
Only unsync pages need updated at invlpg time since other shadow
pages are write-protected

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/paging_tmpl.h

index 22f13797f5214914e4d1e110d6e0e0e6bf474b5a..0671d7a29c3c90db5e3964cb22efafba50196033 100644 (file)
@@ -461,6 +461,7 @@ out_unlock:
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct kvm_shadow_walk_iterator iterator;
+       struct kvm_mmu_page *sp;
        gpa_t pte_gpa = -1;
        int level;
        u64 *sptep;
@@ -472,10 +473,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
                level = iterator.level;
                sptep = iterator.sptep;
 
+               sp = page_header(__pa(sptep));
                if (is_last_spte(*sptep, level)) {
-                       struct kvm_mmu_page *sp = page_header(__pa(sptep));
                        int offset, shift;
 
+                       if (!sp->unsync)
+                               break;
+
                        shift = PAGE_SHIFT -
                                  (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
                        offset = sp->role.quadrant << shift;
@@ -493,7 +497,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
                        break;
                }
 
-               if (!is_shadow_present_pte(*sptep))
+               if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
                        break;
        }