if (opp->mpic_mode_mask == GCR_MODE_PROXY)
vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
- kvm_device_get(dev);
out:
spin_unlock_irq(&opp->lock);
return ret;
BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu);
opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL;
- kvm_device_put(opp->dev);
}
/*
long mmu_notifier_count;
#endif
long tlbs_dirty;
+ struct list_head devices;
};
#define kvm_err(fmt, ...) \
struct kvm_device {
struct kvm_device_ops *ops;
struct kvm *kvm;
- atomic_t users;
void *private;
+ struct list_head vm_node;
};
/* create, destroy, and name are mandatory */
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
+ INIT_LIST_HEAD(&kvm->devices);
r = kvm_init_mmu_notifier(kvm);
if (r)
kfree(kvm->memslots);
}
+static void kvm_destroy_devices(struct kvm *kvm)
+{
+ struct list_head *node, *tmp;
+
+ list_for_each_safe(node, tmp, &kvm->devices) {
+ struct kvm_device *dev =
+ list_entry(node, struct kvm_device, vm_node);
+
+ list_del(node);
+ dev->ops->destroy(dev);
+ }
+}
+
static void kvm_destroy_vm(struct kvm *kvm)
{
int i;
kvm_arch_flush_shadow_all(kvm);
#endif
kvm_arch_destroy_vm(kvm);
+ kvm_destroy_devices(kvm);
kvm_free_physmem(kvm);
cleanup_srcu_struct(&kvm->srcu);
kvm_arch_free_vm(kvm);
}
}
-void kvm_device_get(struct kvm_device *dev)
-{
- atomic_inc(&dev->users);
-}
-
-void kvm_device_put(struct kvm_device *dev)
-{
- if (atomic_dec_and_test(&dev->users))
- dev->ops->destroy(dev);
-}
-
static int kvm_device_release(struct inode *inode, struct file *filp)
{
struct kvm_device *dev = filp->private_data;
struct kvm *kvm = dev->kvm;
- kvm_device_put(dev);
kvm_put_kvm(kvm);
return 0;
}
dev->ops = ops;
dev->kvm = kvm;
- atomic_set(&dev->users, 1);
ret = ops->create(dev, cd->type);
if (ret < 0) {
return ret;
}
+ list_add(&dev->vm_node, &kvm->devices);
kvm_get_kvm(kvm);
cd->fd = ret;
return 0;