KVM: MMU: remove mmu_is_invalid
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Tue, 16 Oct 2012 12:08:01 +0000 (20:08 +0800)
committerAvi Kivity <avi@redhat.com>
Wed, 17 Oct 2012 14:39:15 +0000 (16:39 +0200)
Remove mmu_is_invalid and use is_invalid_pfn instead

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index d289fee1ffb8631c0b93f663104965fe7def7b11..7fe1e038be217d20f15e78d5a2371e2f2e2fc657 100644 (file)
@@ -2700,11 +2700,6 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
        }
 }
 
-static bool mmu_invalid_pfn(pfn_t pfn)
-{
-       return unlikely(is_invalid_pfn(pfn));
-}
-
 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
                                pfn_t pfn, unsigned access, int *ret_val)
 {
index 714e2c01a6fe0fd714f66146c8613f17f31f4a68..045d31ae8eb307b9bbef7429998b2254598cebb9 100644 (file)
@@ -340,7 +340,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
        pte_access = sp->role.access & gpte_access(vcpu, gpte);
        protect_clean_gpte(&pte_access, gpte);
        pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
-       if (mmu_invalid_pfn(pfn))
+       if (is_invalid_pfn(pfn))
                return;
 
        /*
@@ -416,7 +416,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
                gfn = gpte_to_gfn(gpte);
                pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
                                      pte_access & ACC_WRITE_MASK);
-               if (mmu_invalid_pfn(pfn))
+               if (is_invalid_pfn(pfn))
                        break;
 
                mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,