KVM: avoid using rcu_dereference_protected
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 2 Aug 2017 15:55:54 +0000 (17:55 +0200)
committerRadim Krčmář <rkrcmar@redhat.com>
Wed, 2 Aug 2017 20:41:02 +0000 (22:41 +0200)
During teardown, accesses to memslots and buses are using
rcu_dereference_protected with an always-true condition because
these accesses are done outside the usual mutexes.  This
is because the last reference is gone and there cannot be any
concurrent modifications, but rcu_dereference_protected is
ugly and unobvious.

Instead, check the refcount in kvm_get_bus and __kvm_memslots.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 890b706d194348c99517ddf23266de860a395f2e..21a6fd6c44aff62baaea860fb05fb02cb69c0444 100644 (file)
@@ -477,7 +477,8 @@ struct kvm {
 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
 {
        return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
-                                     lockdep_is_held(&kvm->slots_lock));
+                                     lockdep_is_held(&kvm->slots_lock) ||
+                                     !refcount_read(&kvm->users_count));
 }
 
 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
@@ -570,7 +571,8 @@ void kvm_put_kvm(struct kvm *kvm);
 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
 {
        return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
-                       lockdep_is_held(&kvm->slots_lock));
+                       lockdep_is_held(&kvm->slots_lock) ||
+                       !refcount_read(&kvm->users_count));
 }
 
 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
index f3f74271f1a9f11d2670f8377affc2c125d5181f..15252d723b54e196d864d29a7aac2040686dc59a 100644 (file)
@@ -717,10 +717,9 @@ out_err_no_srcu:
        hardware_disable_all();
 out_err_no_disable:
        for (i = 0; i < KVM_NR_BUSES; i++)
-               kfree(rcu_access_pointer(kvm->buses[i]));
+               kfree(kvm_get_bus(kvm, i));
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
-               kvm_free_memslots(kvm,
-                       rcu_dereference_protected(kvm->memslots[i], 1));
+               kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
        kvm_arch_free_vm(kvm);
        mmdrop(current->mm);
        return ERR_PTR(r);
@@ -754,9 +753,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
        spin_unlock(&kvm_lock);
        kvm_free_irq_routing(kvm);
        for (i = 0; i < KVM_NR_BUSES; i++) {
-               struct kvm_io_bus *bus;
+               struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
 
-               bus = rcu_dereference_protected(kvm->buses[i], 1);
                if (bus)
                        kvm_io_bus_destroy(bus);
                kvm->buses[i] = NULL;
@@ -770,8 +768,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
        kvm_arch_destroy_vm(kvm);
        kvm_destroy_devices(kvm);
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
-               kvm_free_memslots(kvm,
-                       rcu_dereference_protected(kvm->memslots[i], 1));
+               kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
        cleanup_srcu_struct(&kvm->irq_srcu);
        cleanup_srcu_struct(&kvm->srcu);
        kvm_arch_free_vm(kvm);