KVM: x86: MMU: Move handle_mmio_page_fault() call to kvm_mmu_page_fault()
authorTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Mon, 22 Feb 2016 08:23:41 +0000 (17:23 +0900)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 23 Feb 2016 13:20:27 +0000 (14:20 +0100)
Rather than placing a handle_mmio_page_fault() call in each
vcpu->arch.mmu.page_fault() handler, moving it up to
kvm_mmu_page_fault() makes the code better:

 - avoids code duplication
 - for kvm_arch_async_page_ready(), which is the other caller of
   vcpu->arch.mmu.page_fault(), removes an extra error_code check
 - avoids returning both RET_MMIO_PF_* values and raw integer values
   from vcpu->arch.mmu.page_fault()

Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index a28b734774ac8898eb3f53787e53bc234becec1d..2ce389245bd8ca8f50de873e7c505d378bc9817b 100644 (file)
@@ -3370,13 +3370,6 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
 
        pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
 
-       if (unlikely(error_code & PFERR_RSVD_MASK)) {
-               r = handle_mmio_page_fault(vcpu, gva, true);
-
-               if (likely(r != RET_MMIO_PF_INVALID))
-                       return r;
-       }
-
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                return r;
@@ -3460,13 +3453,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
 
        MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
-       if (unlikely(error_code & PFERR_RSVD_MASK)) {
-               r = handle_mmio_page_fault(vcpu, gpa, true);
-
-               if (likely(r != RET_MMIO_PF_INVALID))
-                       return r;
-       }
-
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                return r;
@@ -4361,18 +4347,27 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
        enum emulation_result er;
        bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
 
+       if (unlikely(error_code & PFERR_RSVD_MASK)) {
+               r = handle_mmio_page_fault(vcpu, cr2, direct);
+               if (r == RET_MMIO_PF_EMULATE) {
+                       emulation_type = 0;
+                       goto emulate;
+               }
+               if (r == RET_MMIO_PF_RETRY)
+                       return 1;
+               if (r < 0)
+                       return r;
+       }
+
        r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
        if (r < 0)
-               goto out;
-
-       if (!r) {
-               r = 1;
-               goto out;
-       }
+               return r;
+       if (!r)
+               return 1;
 
        if (mmio_info_in_cache(vcpu, cr2, direct))
                emulation_type = 0;
-
+emulate:
        er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
 
        switch (er) {
@@ -4386,8 +4381,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
        default:
                BUG();
        }
-out:
-       return r;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
index 6c9fed957cce1c897f0a490ec74c3f4e4f76d4dc..05827ff7bd2e65e790adcc5dc3c5a87793872518 100644 (file)
@@ -702,23 +702,16 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 
        pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 
-       if (unlikely(error_code & PFERR_RSVD_MASK)) {
-               r = handle_mmio_page_fault(vcpu, addr, mmu_is_nested(vcpu));
-               if (likely(r != RET_MMIO_PF_INVALID))
-                       return r;
-
-               /*
-                * page fault with PFEC.RSVD  = 1 is caused by shadow
-                * page fault, should not be used to walk guest page
-                * table.
-                */
-               error_code &= ~PFERR_RSVD_MASK;
-       };
-
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                return r;
 
+       /*
+        * If PFEC.RSVD is set, this is a shadow page fault.
+        * The bit needs to be cleared before walking guest page tables.
+        */
+       error_code &= ~PFERR_RSVD_MASK;
+
        /*
         * Look up the guest pte for the faulting address.
         */