drm/i915: Enable irq to trace batch buffer completion.
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 24 Sep 2009 04:26:06 +0000 (05:26 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 29 Sep 2009 02:15:25 +0000 (03:15 +0100)
If we trigger a tracepoint for batch buffer submission, it is a reasonable
assumption that we wish to also trace the batch buffer completion. So in
order to capture the completion events, we need to enable irqs... However,
we cannot rely on the completion event to disable the irq later, so we
defer the irq disable to the retire request.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_trace.h

index 45d507ebd3ff0bf192f1290d859b509779a356c6..92aeb918e0c02481fa7909972add10e59b1a0f27 100644 (file)
@@ -1468,6 +1468,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        spin_lock_init(&dev_priv->user_irq_lock);
        spin_lock_init(&dev_priv->error_lock);
        dev_priv->user_irq_refcount = 0;
+       dev_priv->trace_irq_seqno = 0;
 
        ret = drm_vblank_init(dev, I915_NUM_PIPE);
 
index b24b2d145b75dc4450fdd24095eca0b519a9fa47..6035d3dae851c498147fda561f35fe92d8369c74 100644 (file)
@@ -202,6 +202,7 @@ typedef struct drm_i915_private {
        spinlock_t user_irq_lock;
        /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
        int user_irq_refcount;
+       u32 trace_irq_seqno;
        /** Cached value of IMR to avoid reads in updating the bitfield */
        u32 irq_mask_reg;
        u32 pipestat[2];
@@ -665,6 +666,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
 extern int i915_irq_wait(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 void i915_user_irq_get(struct drm_device *dev);
+void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
 void i915_user_irq_put(struct drm_device *dev);
 extern void i915_enable_interrupt (struct drm_device *dev);
 
index b5f9df230d09cb0af47d2091fc1e4724c5181d93..abfc27b0c2eaea77bf8f64eee6ec8f2f3908cefb 100644 (file)
@@ -1770,7 +1770,7 @@ i915_gem_retire_requests(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
 
-       if (!dev_priv->hw_status_page)
+       if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
                return;
 
        seqno = i915_get_gem_seqno(dev);
@@ -1794,6 +1794,12 @@ i915_gem_retire_requests(struct drm_device *dev)
                } else
                        break;
        }
+
+       if (unlikely (dev_priv->trace_irq_seqno &&
+                     i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+               i915_user_irq_put(dev);
+               dev_priv->trace_irq_seqno = 0;
+       }
 }
 
 void
index 4dfeec7cdd42c9a0e0c8b1832cc6c29ae4efa6e2..c3ceffa46ea0e2dfdfc600e7440f1750376eb1f9 100644 (file)
@@ -725,6 +725,16 @@ void i915_user_irq_put(struct drm_device *dev)
        spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
 }
 
+void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+       if (dev_priv->trace_irq_seqno == 0)
+               i915_user_irq_get(dev);
+
+       dev_priv->trace_irq_seqno = seqno;
+}
+
 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
index 908b3c4d8cf7cfa5b9e8d4610c2765a499eaa87a..01840d9bc38fa8b58401841caf795301beb8b795 100644 (file)
@@ -165,6 +165,7 @@ TRACE_EVENT(i915_gem_request_submit,
            TP_fast_assign(
                           __entry->dev = dev->primary->index;
                           __entry->seqno = seqno;
+                          i915_trace_irq_get(dev, seqno);
                           ),
 
            TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)