#ifdef CONFIG_HAVE_KVM_IRQFD
+static struct workqueue_struct *irqfd_cleanup_wq;
static void
irqfd_inject(struct work_struct *work)
list_del_init(&irqfd->list);
- schedule_work(&irqfd->shutdown);
+ queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}
int __attribute__((weak)) kvm_arch_set_irq_inatomic(
* so that we guarantee there will not be any more interrupts on this
* gsi once this deassign function returns.
*/
- flush_work(&irqfd->shutdown);
+ flush_workqueue(irqfd_cleanup_wq);
return 0;
}
* Block until we know all outstanding shutdown jobs have completed
* since we do not take a kvm* reference.
*/
- flush_work(&irqfd->shutdown);
+ flush_workqueue(irqfd_cleanup_wq);
}
spin_unlock_irq(&kvm->irqfds.lock);
}
+/*
+ * create a host-wide workqueue for issuing deferred shutdown requests
+ * aggregated from all vm* instances. We need our own isolated
+ * queue to ease flushing work items when a VM exits.
+ */
+int kvm_irqfd_init(void)
+{
+ irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
+ if (!irqfd_cleanup_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
void kvm_irqfd_exit(void)
{
+ destroy_workqueue(irqfd_cleanup_wq);
}
#endif
* kvm_arch_init makes sure there's at most one caller
* for architectures that support multiple implementations,
* like intel and amd on x86.
+ * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
+ * conflicts in case kvm is already setup for another implementation.
*/
+ r = kvm_irqfd_init();
+ if (r)
+ goto out_irqfd;
if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
r = -ENOMEM;
free_cpumask_var(cpus_hardware_enabled);
out_free_0:
kvm_irqfd_exit();
+out_irqfd:
kvm_arch_exit();
out_fail:
return r;