KVM: x86: work on all available address spaces
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 18 May 2015 11:33:16 +0000 (13:33 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 5 Jun 2015 15:26:37 +0000 (17:26 +0200)
This patch has no semantic change, but it prepares for the introduction
of a second address space for system management mode.

A new function x86_set_memory_region (and the "slots_lock taken"
counterpart __x86_set_memory_region) is introduced in order to
operate on all address spaces when adding or deleting private
memory slots.

Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 2fd420255c2f9bf22f48a83283fc920fe4d3592c..5a5e13af6e03d3703d74c521ef41558858dbb976 100644 (file)
@@ -1189,4 +1189,9 @@ int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
 void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
 void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
 
+int __x86_set_memory_region(struct kvm *kvm,
+                           const struct kvm_userspace_memory_region *mem);
+int x86_set_memory_region(struct kvm *kvm,
+                         const struct kvm_userspace_memory_region *mem);
+
 #endif /* _ASM_X86_KVM_HOST_H */
index 3814f483ac453f2ae23faed3b2bb2cc7e9981fab..7619e9e1745c0d9f9a2eee2e4c92867bd22a9b22 100644 (file)
@@ -1503,30 +1503,33 @@ static int kvm_handle_hva_range(struct kvm *kvm,
        struct kvm_memory_slot *memslot;
        struct slot_rmap_walk_iterator iterator;
        int ret = 0;
+       int i;
 
-       slots = kvm_memslots(kvm);
-
-       kvm_for_each_memslot(memslot, slots) {
-               unsigned long hva_start, hva_end;
-               gfn_t gfn_start, gfn_end;
+       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+               slots = __kvm_memslots(kvm, i);
+               kvm_for_each_memslot(memslot, slots) {
+                       unsigned long hva_start, hva_end;
+                       gfn_t gfn_start, gfn_end;
 
-               hva_start = max(start, memslot->userspace_addr);
-               hva_end = min(end, memslot->userspace_addr +
-                                       (memslot->npages << PAGE_SHIFT));
-               if (hva_start >= hva_end)
-                       continue;
-               /*
-                * {gfn(page) | page intersects with [hva_start, hva_end)} =
-                * {gfn_start, gfn_start+1, ..., gfn_end-1}.
-                */
-               gfn_start = hva_to_gfn_memslot(hva_start, memslot);
-               gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
-
-               for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
-                               PT_MAX_HUGEPAGE_LEVEL, gfn_start, gfn_end - 1,
-                               &iterator)
-                       ret |= handler(kvm, iterator.rmap, memslot,
-                                      iterator.gfn, iterator.level, data);
+                       hva_start = max(start, memslot->userspace_addr);
+                       hva_end = min(end, memslot->userspace_addr +
+                                     (memslot->npages << PAGE_SHIFT));
+                       if (hva_start >= hva_end)
+                               continue;
+                       /*
+                        * {gfn(page) | page intersects with [hva_start, hva_end)} =
+                        * {gfn_start, gfn_start+1, ..., gfn_end-1}.
+                        */
+                       gfn_start = hva_to_gfn_memslot(hva_start, memslot);
+                       gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
+
+                       for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
+                                                PT_MAX_HUGEPAGE_LEVEL,
+                                                gfn_start, gfn_end - 1,
+                                                &iterator)
+                               ret |= handler(kvm, iterator.rmap, memslot,
+                                              iterator.gfn, iterator.level, data);
+               }
        }
 
        return ret;
@@ -4536,21 +4539,23 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
 {
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
-
-       slots = kvm_memslots(kvm);
+       int i;
 
        spin_lock(&kvm->mmu_lock);
-       kvm_for_each_memslot(memslot, slots) {
-               gfn_t start, end;
-
-               start = max(gfn_start, memslot->base_gfn);
-               end = min(gfn_end, memslot->base_gfn + memslot->npages);
-               if (start >= end)
-                       continue;
+       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+               slots = __kvm_memslots(kvm, i);
+               kvm_for_each_memslot(memslot, slots) {
+                       gfn_t start, end;
+
+                       start = max(gfn_start, memslot->base_gfn);
+                       end = min(gfn_end, memslot->base_gfn + memslot->npages);
+                       if (start >= end)
+                               continue;
 
-               slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
-                               PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
-                               start, end - 1, true);
+                       slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
+                                               PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
+                                               start, end - 1, true);
+               }
        }
 
        spin_unlock(&kvm->mmu_lock);
@@ -4907,15 +4912,18 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
        unsigned int  nr_pages = 0;
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
+       int i;
 
-       slots = kvm_memslots(kvm);
+       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+               slots = __kvm_memslots(kvm, i);
 
-       kvm_for_each_memslot(memslot, slots)
-               nr_pages += memslot->npages;
+               kvm_for_each_memslot(memslot, slots)
+                       nr_pages += memslot->npages;
+       }
 
        nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
        nr_mmu_pages = max(nr_mmu_pages,
-                       (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+                          (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
 
        return nr_mmu_pages;
 }
index 8c80b7d7343ce6cd9b8b87dff5640025e1d706e8..862fa8f2c61d93a5f2a315d5bec852232ec323d6 100644 (file)
@@ -4115,7 +4115,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
        kvm_userspace_mem.flags = 0;
        kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
        kvm_userspace_mem.memory_size = PAGE_SIZE;
-       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
+       r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
        if (r)
                goto out;
 
@@ -4150,7 +4150,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
        kvm_userspace_mem.guest_phys_addr =
                kvm->arch.ept_identity_map_addr;
        kvm_userspace_mem.memory_size = PAGE_SIZE;
-       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
+       r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
 
        return r;
 }
@@ -4956,7 +4956,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
                .flags = 0,
        };
 
-       ret = kvm_set_memory_region(kvm, &tss_mem);
+       ret = x86_set_memory_region(kvm, &tss_mem);
        if (ret)
                return ret;
        kvm->arch.tss_addr = addr;
index a510f135180a76ef5ee0b61044c5640b9578607e..caa0d5f8e6b36428d885a38455a7476a7539a046 100644 (file)
@@ -7963,6 +7963,40 @@ void kvm_arch_sync_events(struct kvm *kvm)
        kvm_free_pit(kvm);
 }
 
+int __x86_set_memory_region(struct kvm *kvm,
+                           const struct kvm_userspace_memory_region *mem)
+{
+       int i, r;
+
+       /* Called with kvm->slots_lock held.  */
+       BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
+
+       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+               struct kvm_userspace_memory_region m = *mem;
+
+               m.slot |= i << 16;
+               r = __kvm_set_memory_region(kvm, &m);
+               if (r < 0)
+                       return r;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(__x86_set_memory_region);
+
+int x86_set_memory_region(struct kvm *kvm,
+                         const struct kvm_userspace_memory_region *mem)
+{
+       int r;
+
+       mutex_lock(&kvm->slots_lock);
+       r = __x86_set_memory_region(kvm, mem);
+       mutex_unlock(&kvm->slots_lock);
+
+       return r;
+}
+EXPORT_SYMBOL_GPL(x86_set_memory_region);
+
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
        if (current->mm == kvm->mm) {
@@ -7974,13 +8008,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                struct kvm_userspace_memory_region mem;
                memset(&mem, 0, sizeof(mem));
                mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
-               kvm_set_memory_region(kvm, &mem);
+               x86_set_memory_region(kvm, &mem);
 
                mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
-               kvm_set_memory_region(kvm, &mem);
+               x86_set_memory_region(kvm, &mem);
 
                mem.slot = TSS_PRIVATE_MEMSLOT;
-               kvm_set_memory_region(kvm, &mem);
+               x86_set_memory_region(kvm, &mem);
        }
        kvm_iommu_unmap_guest(kvm);
        kfree(kvm->arch.vpic);