KVM: MMU: Use ptep_user for cmpxchg_gpte()
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Sun, 1 May 2011 05:33:07 +0000 (14:33 +0900)
committerAvi Kivity <avi@redhat.com>
Sun, 22 May 2011 12:48:14 +0000 (08:48 -0400)
The address of the gpte was already calculated and stored in ptep_user
before entering cmpxchg_gpte().

This patch makes cmpxchg_gpte() to use that to make it clear that we
are using the same address during walk_addr_generic().

Note that the unlikely annotations are used to show that the conditions
are something unusual rather than for performance.

Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/kvm/paging_tmpl.h

index e3f81418797e639109e484f3b5bd8751c072d910..6c4dc010c4cbfdc2dd49f1e82696022373e817fd 100644 (file)
@@ -79,21 +79,19 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
 }
 
 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                        gfn_t table_gfn, unsigned index,
-                        pt_element_t orig_pte, pt_element_t new_pte)
+                              pt_element_t __user *ptep_user, unsigned index,
+                              pt_element_t orig_pte, pt_element_t new_pte)
 {
+       int npages;
        pt_element_t ret;
        pt_element_t *table;
        struct page *page;
-       gpa_t gpa;
 
-       gpa = mmu->translate_gpa(vcpu, table_gfn << PAGE_SHIFT,
-                                PFERR_USER_MASK|PFERR_WRITE_MASK);
-       if (gpa == UNMAPPED_GVA)
+       npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
+       /* Check if the user is doing something meaningless. */
+       if (unlikely(npages != 1))
                return -EFAULT;
 
-       page = gfn_to_page(vcpu->kvm, gpa_to_gfn(gpa));
-
        table = kmap_atomic(page, KM_USER0);
        ret = CMPXCHG(&table[index], orig_pte, new_pte);
        kunmap_atomic(table, KM_USER0);
@@ -220,9 +218,9 @@ walk:
                        int ret;
                        trace_kvm_mmu_set_accessed_bit(table_gfn, index,
                                                       sizeof(pte));
-                       ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn,
-                                       index, pte, pte|PT_ACCESSED_MASK);
-                       if (ret < 0) {
+                       ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
+                                                 pte, pte|PT_ACCESSED_MASK);
+                       if (unlikely(ret < 0)) {
                                present = false;
                                break;
                        } else if (ret)
@@ -279,9 +277,9 @@ walk:
                int ret;
 
                trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
-               ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, index, pte,
-                           pte|PT_DIRTY_MASK);
-               if (ret < 0) {
+               ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
+                                         pte, pte|PT_DIRTY_MASK);
+               if (unlikely(ret < 0)) {
                        present = false;
                        goto error;
                } else if (ret)