KVM: MMU: Move sync_page() first pte address calculation out of loop
authorGui Jianfeng <guijianfeng@cn.fujitsu.com>
Fri, 16 Apr 2010 09:16:40 +0000 (17:16 +0800)
committerAvi Kivity <avi@redhat.com>
Mon, 17 May 2010 09:18:06 +0000 (12:18 +0300)
Move first pte address calculation out of loop to save some cycles.

Signed-off-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/paging_tmpl.h

index d9dea288e51d287141018580caaad85013720ce0..5910557b3f33dde25fc393a4279edaca62f5b53b 100644 (file)
@@ -572,12 +572,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
        int i, offset, nr_present;
        bool reset_host_protection;
+       gpa_t first_pte_gpa;
 
        offset = nr_present = 0;
 
        if (PTTYPE == 32)
                offset = sp->role.quadrant << PT64_LEVEL_BITS;
 
+       first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+
        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
                unsigned pte_access;
                pt_element_t gpte;
@@ -587,8 +590,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                if (!is_shadow_present_pte(sp->spt[i]))
                        continue;
 
-               pte_gpa = gfn_to_gpa(sp->gfn);
-               pte_gpa += (i+offset) * sizeof(pt_element_t);
+               pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 
                if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
                                          sizeof(pt_element_t)))