drm/nv50-/vm: take mutex rather than irqsave spinlock
authorBen Skeggs <bskeggs@redhat.com>
Mon, 13 May 2013 11:13:15 +0000 (21:13 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Mon, 1 Jul 2013 03:44:50 +0000 (13:44 +1000)
These operations can take quite some time, and we really don't want to
have to hold a spinlock for too long.

Now that the lock ordering for vm and the gr/nv84 hw bug workaround has
been reversed, it's possible to use a mutex here.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c

index 966e61434c7a5f0587e738aabc1ae9a3f8c4832a..50c66122cc89bdcd73fd1b56a1acb8a5dde20fd8 100644 (file)
@@ -31,7 +31,6 @@
 
 struct nv50_vmmgr_priv {
        struct nouveau_vmmgr base;
-       spinlock_t lock;
 };
 
 static void
@@ -153,10 +152,9 @@ nv50_vm_flush(struct nouveau_vm *vm)
 {
        struct nv50_vmmgr_priv *priv = (void *)vm->vmm;
        struct nouveau_engine *engine;
-       unsigned long flags;
        int i, vme;
 
-       spin_lock_irqsave(&priv->lock, flags);
+       mutex_lock(&nv_subdev(priv)->mutex);
        for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
                if (!atomic_read(&vm->engref[i]))
                        continue;
@@ -182,7 +180,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
                if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
                        nv_error(priv, "vm flush timeout: engine %d\n", vme);
        }
-       spin_unlock_irqrestore(&priv->lock, flags);
+       mutex_unlock(&nv_subdev(priv)->mutex);
 }
 
 static int
@@ -220,7 +218,6 @@ nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        priv->base.map_sg = nv50_vm_map_sg;
        priv->base.unmap = nv50_vm_unmap;
        priv->base.flush = nv50_vm_flush;
-       spin_lock_init(&priv->lock);
        return 0;
 }
 
index 4c3b0a23b9d61a36f6335e999683bfb47647c1b8..beb09743aafff3484f52f227ca41ec46e9ed558f 100644 (file)
@@ -32,7 +32,6 @@
 
 struct nvc0_vmmgr_priv {
        struct nouveau_vmmgr base;
-       spinlock_t lock;
 };
 
 
@@ -164,12 +163,11 @@ void
 nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
 {
        struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
-       unsigned long flags;
 
        /* looks like maybe a "free flush slots" counter, the
         * faster you write to 0x100cbc to more it decreases
         */
-       spin_lock_irqsave(&priv->lock, flags);
+       mutex_lock(&nv_subdev(priv)->mutex);
        if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
                nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
                         nv_rd32(subdev, 0x100c80), type);
@@ -183,7 +181,7 @@ nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
                nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
                         nv_rd32(subdev, 0x100c80), type);
        }
-       spin_unlock_irqrestore(&priv->lock, flags);
+       mutex_unlock(&nv_subdev(priv)->mutex);
 }
 
 static void
@@ -227,7 +225,6 @@ nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        priv->base.map_sg = nvc0_vm_map_sg;
        priv->base.unmap = nvc0_vm_unmap;
        priv->base.flush = nvc0_vm_flush;
-       spin_lock_init(&priv->lock);
        return 0;
 }