KVM: x86: MMU: Simplify force_pt_level calculation code in FNAME(page_fault)()
authorTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Fri, 16 Oct 2015 08:05:13 +0000 (17:05 +0900)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 16 Oct 2015 08:34:00 +0000 (10:34 +0200)
As a bonus, an extra memory slot search can be eliminated when
is_self_change_mapping is true.

Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/paging_tmpl.h

index 07f1a4ede637f954c44277781655c39393c8543c..8ebc3a5560cec96c73e8b1e1b7d6caf9a14455ab 100644 (file)
@@ -743,15 +743,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
        is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
              &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
 
-       if (walker.level >= PT_DIRECTORY_LEVEL)
-               force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
-                  || is_self_change_mapping;
-       else
+       if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
+               force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
+               if (!force_pt_level) {
+                       level = min(walker.level, mapping_level(vcpu, walker.gfn));
+                       walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
+               }
+       } else
                force_pt_level = true;
-       if (!force_pt_level) {
-               level = min(walker.level, mapping_level(vcpu, walker.gfn));
-               walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
-       }
 
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();