drm/nouveau: reduce usage of fence spinlock to when absolutely necessary
authorBen Skeggs <bskeggs@redhat.com>
Mon, 31 May 2010 02:00:43 +0000 (12:00 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Tue, 13 Jul 2010 00:14:04 +0000 (10:14 +1000)
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nouveau_channel.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nv04_graph.c

index 53daeba4581e74b27c43c125fbff5809e36a9157..90fdcda332be360ab07cb862c9a6ad533724a55c 100644 (file)
@@ -258,9 +258,7 @@ nouveau_channel_free(struct nouveau_channel *chan)
        nouveau_debugfs_channel_fini(chan);
 
        /* Give outstanding push buffers a chance to complete */
-       spin_lock_irqsave(&chan->fence.lock, flags);
        nouveau_fence_update(chan);
-       spin_unlock_irqrestore(&chan->fence.lock, flags);
        if (chan->fence.sequence != chan->fence.sequence_ack) {
                struct nouveau_fence *fence = NULL;
 
index 47fa28ddec7566a8d3511efef44adf4deaf58156..587a0ab1fe67b6900ec6d27f6b0227e6a6062ccc 100644 (file)
@@ -188,7 +188,7 @@ struct nouveau_channel {
                struct list_head pending;
                uint32_t sequence;
                uint32_t sequence_ack;
-               uint32_t last_sequence_irq;
+               atomic_t last_sequence_irq;
        } fence;
 
        /* DMA push buffer */
@@ -1111,7 +1111,6 @@ extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
 extern int nouveau_fence_flush(void *obj, void *arg);
 extern void nouveau_fence_unref(void **obj);
 extern void *nouveau_fence_ref(void *obj);
-extern void nouveau_fence_handler(struct drm_device *dev, int channel);
 
 /* nouveau_gem.c */
 extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
index faddf53ff9ed79f0804735c151a28c3fb2d9a7ec..813d853b741b31ad4ba1a242d501ce23cce264ae 100644 (file)
@@ -67,12 +67,13 @@ nouveau_fence_update(struct nouveau_channel *chan)
        if (USE_REFCNT)
                sequence = nvchan_rd32(chan, 0x48);
        else
-               sequence = chan->fence.last_sequence_irq;
+               sequence = atomic_read(&chan->fence.last_sequence_irq);
 
        if (chan->fence.sequence_ack == sequence)
                return;
        chan->fence.sequence_ack = sequence;
 
+       spin_lock(&chan->fence.lock);
        list_for_each_safe(entry, tmp, &chan->fence.pending) {
                fence = list_entry(entry, struct nouveau_fence, entry);
 
@@ -84,6 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
                if (sequence == chan->fence.sequence_ack)
                        break;
        }
+       spin_unlock(&chan->fence.lock);
 }
 
 int
@@ -119,7 +121,6 @@ nouveau_fence_emit(struct nouveau_fence *fence)
 {
        struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
        struct nouveau_channel *chan = fence->channel;
-       unsigned long flags;
        int ret;
 
        ret = RING_SPACE(chan, 2);
@@ -127,9 +128,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
                return ret;
 
        if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
-               spin_lock_irqsave(&chan->fence.lock, flags);
                nouveau_fence_update(chan);
-               spin_unlock_irqrestore(&chan->fence.lock, flags);
 
                BUG_ON(chan->fence.sequence ==
                       chan->fence.sequence_ack - 1);
@@ -138,9 +137,9 @@ nouveau_fence_emit(struct nouveau_fence *fence)
        fence->sequence = ++chan->fence.sequence;
 
        kref_get(&fence->refcount);
-       spin_lock_irqsave(&chan->fence.lock, flags);
+       spin_lock(&chan->fence.lock);
        list_add_tail(&fence->entry, &chan->fence.pending);
-       spin_unlock_irqrestore(&chan->fence.lock, flags);
+       spin_unlock(&chan->fence.lock);
 
        BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
        OUT_RING(chan, fence->sequence);
@@ -173,14 +172,11 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg)
 {
        struct nouveau_fence *fence = nouveau_fence(sync_obj);
        struct nouveau_channel *chan = fence->channel;
-       unsigned long flags;
 
        if (fence->signalled)
                return true;
 
-       spin_lock_irqsave(&chan->fence.lock, flags);
        nouveau_fence_update(chan);
-       spin_unlock_irqrestore(&chan->fence.lock, flags);
        return fence->signalled;
 }
 
@@ -221,27 +217,12 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg)
        return 0;
 }
 
-void
-nouveau_fence_handler(struct drm_device *dev, int channel)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_channel *chan = NULL;
-
-       if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
-               chan = dev_priv->fifos[channel];
-
-       if (chan) {
-               spin_lock_irq(&chan->fence.lock);
-               nouveau_fence_update(chan);
-               spin_unlock_irq(&chan->fence.lock);
-       }
-}
-
 int
 nouveau_fence_init(struct nouveau_channel *chan)
 {
        INIT_LIST_HEAD(&chan->fence.pending);
        spin_lock_init(&chan->fence.lock);
+       atomic_set(&chan->fence.last_sequence_irq, 0);
        return 0;
 }
 
index dd09679d31dce616244f24f478be66385bc47702..c8973421b635529fcb027539aa2f7bbffde81546 100644 (file)
@@ -527,8 +527,7 @@ static int
 nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
                        int mthd, uint32_t data)
 {
-       chan->fence.last_sequence_irq = data;
-       nouveau_fence_handler(chan->dev, chan->id);
+       atomic_set(&chan->fence.last_sequence_irq, data);
        return 0;
 }