KVM: PPC: Use RCU for arch.spapr_tce_tables
authorAlexey Kardashevskiy <aik@ozlabs.ru>
Mon, 15 Feb 2016 01:55:05 +0000 (12:55 +1100)
committerPaul Mackerras <paulus@samba.org>
Tue, 16 Feb 2016 02:44:26 +0000 (13:44 +1100)
At the moment only spapr_tce_tables updates are protected against races
but not lookups. This fixes missing protection by using RCU for the list.
As lookups also happen in real mode, this uses
list_for_each_entry_lockless() (which is expected not to access any
vmalloc'd memory).

This converts release_spapr_tce_table() to a RCU scheduled handler.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_64_vio_hv.c

index 9d08d8cbed1a1ec0e5893679c5e1fd9cb69dce09..ffdbc2dc18f9739e69758e703378817199849277 100644 (file)
@@ -183,6 +183,7 @@ struct kvmppc_spapr_tce_table {
        struct kvm *kvm;
        u64 liobn;
        u32 window_size;
+       struct rcu_head rcu;
        struct page *pages[0];
 };
 
index 638c6d9be9e08bec96542312aaf18ce8e21900e6..b34220d2aa42c96380b8f2d8fcbe1de630f5c31e 100644 (file)
@@ -807,7 +807,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
 {
 
 #ifdef CONFIG_PPC64
-       INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+       INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
        INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
 #endif
 
index 54cf9bc94dadfe3a1debf64f03cea511fc0aa97b..9526c34c29c2b0456ca5a55d9988de5d908121a2 100644 (file)
@@ -45,19 +45,16 @@ static long kvmppc_stt_npages(unsigned long window_size)
                     * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
 }
 
-static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
+static void release_spapr_tce_table(struct rcu_head *head)
 {
-       struct kvm *kvm = stt->kvm;
+       struct kvmppc_spapr_tce_table *stt = container_of(head,
+                       struct kvmppc_spapr_tce_table, rcu);
        int i;
 
-       mutex_lock(&kvm->lock);
-       list_del(&stt->list);
        for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
                __free_page(stt->pages[i]);
-       kfree(stt);
-       mutex_unlock(&kvm->lock);
 
-       kvm_put_kvm(kvm);
+       kfree(stt);
 }
 
 static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -88,7 +85,12 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
 {
        struct kvmppc_spapr_tce_table *stt = filp->private_data;
 
-       release_spapr_tce_table(stt);
+       list_del_rcu(&stt->list);
+
+       kvm_put_kvm(stt->kvm);
+
+       call_rcu(&stt->rcu, release_spapr_tce_table);
+
        return 0;
 }
 
@@ -131,7 +133,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
        kvm_get_kvm(kvm);
 
        mutex_lock(&kvm->lock);
-       list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+       list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
 
        mutex_unlock(&kvm->lock);
 
index f29ba2c63e07a2ff4281b5339757f36d74049955..124d69246e1184a6dc6f2932bc84f30051f25515 100644 (file)
@@ -51,7 +51,7 @@ static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
        struct kvm *kvm = vcpu->kvm;
        struct kvmppc_spapr_tce_table *stt;
 
-       list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list)
+       list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
                if (stt->liobn == liobn)
                        return stt;