KVM: rename x86 kvm->arch.n_alloc_mmu_pages
authorDave Hansen <dave@linux.vnet.ibm.com>
Fri, 20 Aug 2010 01:11:14 +0000 (18:11 -0700)
committerAvi Kivity <avi@redhat.com>
Sun, 24 Oct 2010 08:51:18 +0000 (10:51 +0200)
arch.n_alloc_mmu_pages is a poor choice of name. This value truly
means, "the number of pages which _may_ be allocated".  But,
reading the name, "n_alloc_mmu_pages" implies "the number of allocated
mmu pages", which is dead wrong.

It's really the high watermark, so let's give it a name to match:
nr_max_mmu_pages.  This change will make the next few patches
much more obvious and easy to read.

Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: Tim Pepper <lnxninja@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c

index c52e2eb40a1e254339658481621be634b427a8f0..02963684cd282348f5171838daabff747d435fe7 100644 (file)
@@ -369,7 +369,7 @@ struct kvm_vcpu_arch {
 struct kvm_arch {
        unsigned int n_free_mmu_pages;
        unsigned int n_requested_mmu_pages;
-       unsigned int n_alloc_mmu_pages;
+       unsigned int n_max_mmu_pages;
        atomic_t invlpg_counter;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        /*
index 625b178946613f8661d5ef12e27189ff5272b5c6..6979e7d1464e3c72e7cf68dfe6883f06b6dd6e83 100644 (file)
@@ -1696,7 +1696,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
        int used_pages;
        LIST_HEAD(invalid_list);
 
-       used_pages = kvm->arch.n_alloc_mmu_pages - kvm_mmu_available_pages(kvm);
+       used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm);
        used_pages = max(0, used_pages);
 
        /*
@@ -1721,9 +1721,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
        }
        else
                kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
-                                        - kvm->arch.n_alloc_mmu_pages;
+                                        - kvm->arch.n_max_mmu_pages;
 
-       kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
+       kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages;
 }
 
 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
@@ -3141,7 +3141,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 
                idx = srcu_read_lock(&kvm->srcu);
                spin_lock(&kvm->mmu_lock);
-               npages = kvm->arch.n_alloc_mmu_pages -
+               npages = kvm->arch.n_max_mmu_pages -
                         kvm_mmu_available_pages(kvm);
                cache_count += npages;
                if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
index c0004eb354d3263f6bb15361a5d0798582e07fe0..4b4d2836240f0e88632ef5dfbdc34f0bc0182903 100644 (file)
@@ -2759,7 +2759,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
 
 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
 {
-       return kvm->arch.n_alloc_mmu_pages;
+       return kvm->arch.n_max_mmu_pages;
 }
 
 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)