KVM: x86: remove now unneeded hugepage gfn adjustment
authorPaolo Bonzini <pbonzini@redhat.com>
Sun, 23 Jun 2019 17:15:49 +0000 (19:15 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 16 Nov 2019 09:29:51 +0000 (10:29 +0100)
commit d679b32611c0102ce33b9e1a4e4b94854ed1812a upstream.

After the previous patch, the low bits of the gfn are masked in
both FNAME(fetch) and __direct_map, so we do not need to clear them
in transparent_hugepage_adjust.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index bf370b63928ecbca273fc78a6350c67da1b793d9..5354eef3c3b4e35a4a503534e76b4abe81191796 100644 (file)
@@ -2824,11 +2824,10 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
 }
 
 static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
-                                       gfn_t *gfnp, kvm_pfn_t *pfnp,
+                                       gfn_t gfn, kvm_pfn_t *pfnp,
                                        int *levelp)
 {
        kvm_pfn_t pfn = *pfnp;
-       gfn_t gfn = *gfnp;
        int level = *levelp;
 
        /*
@@ -2855,8 +2854,6 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
                mask = KVM_PAGES_PER_HPAGE(level) - 1;
                VM_BUG_ON((gfn & mask) != (pfn & mask));
                if (pfn & mask) {
-                       gfn &= ~mask;
-                       *gfnp = gfn;
                        kvm_release_pfn_clean(pfn);
                        pfn &= ~mask;
                        kvm_get_pfn(pfn);
@@ -3060,7 +3057,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
                goto out_unlock;
        make_mmu_pages_available(vcpu);
        if (likely(!force_pt_level))
-               transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
+               transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
        r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault);
 out_unlock:
        spin_unlock(&vcpu->kvm->mmu_lock);
@@ -3595,7 +3592,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
                goto out_unlock;
        make_mmu_pages_available(vcpu);
        if (likely(!force_pt_level))
-               transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
+               transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
        r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault);
 out_unlock:
        spin_unlock(&vcpu->kvm->mmu_lock);
index c0d87ba4217d5f08dd7aad93e6eefc8e363e6782..116a94361da1c0919497880e3441c83c327bd4ba 100644 (file)
@@ -806,7 +806,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
        kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
        make_mmu_pages_available(vcpu);
        if (!force_pt_level)
-               transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
+               transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
        r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
                         level, pfn, map_writable, prefault);
        kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);