kvm: exclude ioeventfd from counting kvm_io_range limit
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / virt / kvm / kvm_main.c
index 302681c4aa4465bb21b69524d7c0d3a5341f4a4e..36d14e50f25c5652e24b0f4789053aeabeecf918 100644 (file)
@@ -52,6 +52,7 @@
 
 #include <asm/processor.h>
 #include <asm/io.h>
+#include <asm/ioctl.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 
@@ -105,12 +106,12 @@ bool kvm_is_mmio_pfn(pfn_t pfn)
        if (pfn_valid(pfn)) {
                int reserved;
                struct page *tail = pfn_to_page(pfn);
-               struct page *head = compound_trans_head(tail);
+               struct page *head = compound_head(tail);
                reserved = PageReserved(head);
                if (head != tail) {
                        /*
                         * "head" is not a dangling pointer
-                        * (compound_trans_head takes care of that)
+                        * (compound_head takes care of that)
                         * but the hugepage may have been splitted
                         * from under us (and we may not hold a
                         * reference count on the head page so it can
@@ -467,6 +468,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
        if (!kvm)
                return ERR_PTR(-ENOMEM);
 
+       spin_lock_init(&kvm->mmu_lock);
+       atomic_inc(&current->mm->mm_count);
+       kvm->mm = current->mm;
+       kvm_eventfd_init(kvm);
+       mutex_init(&kvm->lock);
+       mutex_init(&kvm->irq_lock);
+       mutex_init(&kvm->slots_lock);
+       atomic_set(&kvm->users_count, 1);
+       INIT_LIST_HEAD(&kvm->devices);
+
        r = kvm_arch_init_vm(kvm, type);
        if (r)
                goto out_err_nodisable;
@@ -496,16 +507,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
                        goto out_err;
        }
 
-       spin_lock_init(&kvm->mmu_lock);
-       kvm->mm = current->mm;
-       atomic_inc(&kvm->mm->mm_count);
-       kvm_eventfd_init(kvm);
-       mutex_init(&kvm->lock);
-       mutex_init(&kvm->irq_lock);
-       mutex_init(&kvm->slots_lock);
-       atomic_set(&kvm->users_count, 1);
-       INIT_LIST_HEAD(&kvm->devices);
-
        r = kvm_init_mmu_notifier(kvm);
        if (r)
                goto out_err;
@@ -525,6 +526,7 @@ out_err_nodisable:
                kfree(kvm->buses[i]);
        kfree(kvm->memslots);
        kvm_arch_free_vm(kvm);
+       mmdrop(current->mm);
        return ERR_PTR(r);
 }
 
@@ -605,8 +607,10 @@ static void kvm_destroy_vm(struct kvm *kvm)
        list_del(&kvm->vm_list);
        raw_spin_unlock(&kvm_lock);
        kvm_free_irq_routing(kvm);
-       for (i = 0; i < KVM_NR_BUSES; i++)
+       for (i = 0; i < KVM_NR_BUSES; i++) {
                kvm_io_bus_destroy(kvm->buses[i]);
+               kvm->buses[i] = NULL;
+       }
        kvm_coalesced_mmio_free(kvm);
 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
        mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -1548,8 +1552,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
        ghc->generation = slots->generation;
        ghc->len = len;
        ghc->memslot = gfn_to_memslot(kvm, start_gfn);
-       ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
-       if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
+       ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
+       if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
                ghc->hva += offset;
        } else {
                /*
@@ -1904,6 +1908,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        int r;
        struct kvm_vcpu *vcpu, *v;
 
+       if (id >= KVM_MAX_VCPUS)
+               return -EINVAL;
+
        vcpu = kvm_arch_vcpu_create(kvm, id);
        if (IS_ERR(vcpu))
                return PTR_ERR(vcpu);
@@ -1978,6 +1985,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
        if (vcpu->kvm->mm != current->mm)
                return -EIO;
 
+       if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
+               return -EINVAL;
+
 #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
        /*
         * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
@@ -2439,7 +2449,7 @@ static long kvm_vm_ioctl(struct file *filp,
                if (copy_from_user(&routing, argp, sizeof(routing)))
                        goto out;
                r = -EINVAL;
-               if (routing.nr >= KVM_MAX_IRQ_ROUTES)
+               if (routing.nr > KVM_MAX_IRQ_ROUTES)
                        goto out;
                if (routing.flags)
                        goto out;
@@ -2926,7 +2936,8 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
        struct kvm_io_bus *new_bus, *bus;
 
        bus = kvm->buses[bus_idx];
-       if (bus->dev_count > NR_IOBUS_DEVS - 1)
+       /* exclude ioeventfd which is limited by maximum fd */
+       if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
                return -ENOSPC;
 
        new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
@@ -2951,6 +2962,14 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
        struct kvm_io_bus *new_bus, *bus;
 
        bus = kvm->buses[bus_idx];
+
+       /*
+        * It's possible the bus being released before hand. If so,
+        * we're done here.
+        */
+       if (!bus)
+               return 0;
+
        r = -ENOENT;
        for (i = 0; i < bus->dev_count; i++)
                if (bus->range[i].dev == dev) {