KVM: split kvm_arch_set_memory_region into prepare and commit
authorMarcelo Tosatti <mtosatti@redhat.com>
Wed, 23 Dec 2009 16:35:18 +0000 (14:35 -0200)
committerMarcelo Tosatti <mtosatti@redhat.com>
Mon, 1 Mar 2010 15:35:44 +0000 (12:35 -0300)
Required for SRCU convertion later.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/ia64/kvm/kvm-ia64.c
arch/powerpc/kvm/powerpc.c
arch/s390/kvm/kvm-s390.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 1ca1dbf4811718f71d4cf890ef6b0a3176bba4af..0757c7027986fadf16ef3a07aa00e95b9282fed5 100644 (file)
@@ -1578,15 +1578,15 @@ out:
        return r;
 }
 
-int kvm_arch_set_memory_region(struct kvm *kvm,
-               struct kvm_userspace_memory_region *mem,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+               struct kvm_memory_slot *memslot,
                struct kvm_memory_slot old,
+               struct kvm_userspace_memory_region *mem,
                int user_alloc)
 {
        unsigned long i;
        unsigned long pfn;
-       int npages = mem->memory_size >> PAGE_SHIFT;
-       struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
+       int npages = memslot->npages;
        unsigned long base_gfn = memslot->base_gfn;
 
        if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
@@ -1610,6 +1610,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
        return 0;
 }
 
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+               struct kvm_userspace_memory_region *mem,
+               struct kvm_memory_slot old,
+               int user_alloc)
+{
+       return;
+}
+
 void kvm_arch_flush_shadow(struct kvm *kvm)
 {
        kvm_flush_remote_tlbs(kvm);
index f06cf93b178ec2d037a39eeee202b567a42d6eb2..4633e7850dd2bf336706228f9ce558ee95faf9af 100644 (file)
@@ -165,14 +165,24 @@ long kvm_arch_dev_ioctl(struct file *filp,
        return -EINVAL;
 }
 
-int kvm_arch_set_memory_region(struct kvm *kvm,
-                               struct kvm_userspace_memory_region *mem,
-                               struct kvm_memory_slot old,
-                               int user_alloc)
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                   struct kvm_memory_slot *memslot,
+                                   struct kvm_memory_slot old,
+                                   struct kvm_userspace_memory_region *mem,
+                                   int user_alloc)
 {
        return 0;
 }
 
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+               struct kvm_userspace_memory_region *mem,
+               struct kvm_memory_slot old,
+               int user_alloc)
+{
+       return;
+}
+
+
 void kvm_arch_flush_shadow(struct kvm *kvm)
 {
 }
index 3fa0a10e4668f1ec5df6b0a892ba11b4131cd7c9..c8002193d9d41649c2279e8b68c9dd1afdf09617 100644 (file)
@@ -690,14 +690,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 }
 
 /* Section: memory related */
-int kvm_arch_set_memory_region(struct kvm *kvm,
-                               struct kvm_userspace_memory_region *mem,
-                               struct kvm_memory_slot old,
-                               int user_alloc)
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                  struct kvm_memory_slot *memslot,
+                                  struct kvm_memory_slot old,
+                                  struct kvm_userspace_memory_region *mem,
+                                  int user_alloc)
 {
-       int i;
-       struct kvm_vcpu *vcpu;
-
        /* A few sanity checks. We can have exactly one memory slot which has
           to start at guest virtual zero and which has to be located at a
           page boundary in userland and which has to end at a page boundary.
@@ -720,14 +718,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
        if (!user_alloc)
                return -EINVAL;
 
+       return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                               struct kvm_userspace_memory_region *mem,
+                               struct kvm_memory_slot old,
+                               int user_alloc)
+{
+       int i;
+       struct kvm_vcpu *vcpu;
+
        /* request update of sie control block for all available vcpus */
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
                        continue;
                kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
        }
-
-       return 0;
 }
 
 void kvm_arch_flush_shadow(struct kvm *kvm)
index 1ce8331914305e4e391956c9b6ff1362e538484f..43da65feed49499fc6a46366b7cd225b815e2dda 100644 (file)
@@ -5228,13 +5228,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
        kfree(kvm);
 }
 
-int kvm_arch_set_memory_region(struct kvm *kvm,
-                               struct kvm_userspace_memory_region *mem,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                               struct kvm_memory_slot *memslot,
                                struct kvm_memory_slot old,
+                               struct kvm_userspace_memory_region *mem,
                                int user_alloc)
 {
-       int npages = mem->memory_size >> PAGE_SHIFT;
-       struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot];
+       int npages = memslot->npages;
 
        /*To keep backward compatibility with older userspace,
         *x86 needs to hanlde !user_alloc case.
@@ -5254,26 +5254,35 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
                        if (IS_ERR((void *)userspace_addr))
                                return PTR_ERR((void *)userspace_addr);
 
-                       /* set userspace_addr atomically for kvm_hva_to_rmapp */
-                       spin_lock(&kvm->mmu_lock);
                        memslot->userspace_addr = userspace_addr;
-                       spin_unlock(&kvm->mmu_lock);
-               } else {
-                       if (!old.user_alloc && old.rmap) {
-                               int ret;
-
-                               down_write(&current->mm->mmap_sem);
-                               ret = do_munmap(current->mm, old.userspace_addr,
-                                               old.npages * PAGE_SIZE);
-                               up_write(&current->mm->mmap_sem);
-                               if (ret < 0)
-                                       printk(KERN_WARNING
-                                      "kvm_vm_ioctl_set_memory_region: "
-                                      "failed to munmap memory\n");
-                       }
                }
        }
 
+
+       return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                               struct kvm_userspace_memory_region *mem,
+                               struct kvm_memory_slot old,
+                               int user_alloc)
+{
+
+       int npages = mem->memory_size >> PAGE_SHIFT;
+
+       if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
+               int ret;
+
+               down_write(&current->mm->mmap_sem);
+               ret = do_munmap(current->mm, old.userspace_addr,
+                               old.npages * PAGE_SIZE);
+               up_write(&current->mm->mmap_sem);
+               if (ret < 0)
+                       printk(KERN_WARNING
+                              "kvm_vm_ioctl_set_memory_region: "
+                              "failed to munmap memory\n");
+       }
+
        spin_lock(&kvm->mmu_lock);
        if (!kvm->arch.n_requested_mmu_pages) {
                unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
@@ -5282,8 +5291,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
 
        kvm_mmu_slot_remove_write_access(kvm, mem->slot);
        spin_unlock(&kvm->mmu_lock);
-
-       return 0;
 }
 
 void kvm_arch_flush_shadow(struct kvm *kvm)
index 782bfb185f8ad5db6221e291315639ea0414262b..3c44687b34259ab641044c185bfeeaaccb692ea6 100644 (file)
@@ -253,7 +253,12 @@ int kvm_set_memory_region(struct kvm *kvm,
 int __kvm_set_memory_region(struct kvm *kvm,
                            struct kvm_userspace_memory_region *mem,
                            int user_alloc);
-int kvm_arch_set_memory_region(struct kvm *kvm,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                               struct kvm_memory_slot *memslot,
+                               struct kvm_memory_slot old,
+                               struct kvm_userspace_memory_region *mem,
+                               int user_alloc);
+void kvm_arch_commit_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem,
                                struct kvm_memory_slot old,
                                int user_alloc);
index 86dd8f3d29c97f1555cb75295bc409e9d0d2e020..c9f6cfe831209396098b552afd02290be350e0ab 100644 (file)
@@ -663,6 +663,10 @@ skip_lpage:
        if (!npages)
                kvm_arch_flush_shadow(kvm);
 
+       r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
+       if (r)
+               goto out_free;
+
        spin_lock(&kvm->mmu_lock);
        if (mem->slot >= kvm->memslots->nmemslots)
                kvm->memslots->nmemslots = mem->slot + 1;
@@ -670,13 +674,7 @@ skip_lpage:
        *memslot = new;
        spin_unlock(&kvm->mmu_lock);
 
-       r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
-       if (r) {
-               spin_lock(&kvm->mmu_lock);
-               *memslot = old;
-               spin_unlock(&kvm->mmu_lock);
-               goto out_free;
-       }
+       kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
 
        kvm_free_physmem_slot(&old, npages ? &new : NULL);
        /* Slot deletion case: we have to update the current slot */