Merge remote-tracking branch 'airlied/drm-prime-vmap' into drm-intel-next-queued
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpu / drm / i915 / i915_gem.c
index 288d7b8f49ae48858a30c6ad1f9f7ce1d6d6e600..a20ac438b8ef3945b791e82bebc32b6013dc59eb 100644 (file)
@@ -1899,34 +1899,82 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
        return ret;
 }
 
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
-                       bool interruptible)
+                       bool interruptible, struct timespec *timeout)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
-       int ret = 0;
+       struct timespec before, now, wait_time={1,0};
+       unsigned long timeout_jiffies;
+       long end;
+       bool wait_forever = true;
 
        if (i915_seqno_passed(ring->get_seqno(ring), seqno))
                return 0;
 
        trace_i915_gem_request_wait_begin(ring, seqno);
+
+       if (timeout != NULL) {
+               wait_time = *timeout;
+               wait_forever = false;
+       }
+
+       timeout_jiffies = timespec_to_jiffies(&wait_time);
+
        if (WARN_ON(!ring->irq_get(ring)))
                return -ENODEV;
 
+       /* Record current time in case interrupted by signal, or wedged * */
+       getrawmonotonic(&before);
+
 #define EXIT_COND \
        (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
        atomic_read(&dev_priv->mm.wedged))
+       do {
+               if (interruptible)
+                       end = wait_event_interruptible_timeout(ring->irq_queue,
+                                                              EXIT_COND,
+                                                              timeout_jiffies);
+               else
+                       end = wait_event_timeout(ring->irq_queue, EXIT_COND,
+                                                timeout_jiffies);
 
-       if (interruptible)
-               ret = wait_event_interruptible(ring->irq_queue,
-                                              EXIT_COND);
-       else
-               wait_event(ring->irq_queue, EXIT_COND);
+               if (atomic_read(&dev_priv->mm.wedged))
+                       end = -EAGAIN;
+       } while (end == 0 && wait_forever);
+
+       getrawmonotonic(&now);
 
        ring->irq_put(ring);
        trace_i915_gem_request_wait_end(ring, seqno);
 #undef EXIT_COND
 
-       return ret;
+       if (timeout) {
+               struct timespec sleep_time = timespec_sub(now, before);
+               *timeout = timespec_sub(*timeout, sleep_time);
+       }
+
+       switch (end) {
+       case -EAGAIN: /* Wedged */
+       case -ERESTARTSYS: /* Signal */
+               return (int)end;
+       case 0: /* Timeout */
+               if (timeout)
+                       set_normalized_timespec(timeout, 0, 0);
+               return -ETIME;
+       default: /* Completed */
+               WARN_ON(end < 0); /* We're not aware of other errors */
+               return 0;
+       }
 }
 
 /**
@@ -1934,8 +1982,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
  * request and object lists appropriately for that event.
  */
 int
-i915_wait_request(struct intel_ring_buffer *ring,
-                 uint32_t seqno)
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        int ret = 0;
@@ -1950,9 +1997,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
        if (ret)
                return ret;
 
-       ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
-       if (atomic_read(&dev_priv->mm.wedged))
-               ret = -EAGAIN;
+       ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
 
        return ret;
 }
@@ -1975,7 +2020,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
         * it.
         */
        if (obj->active) {
-               ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+               ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
                if (ret)
                        return ret;
                i915_gem_retire_requests_ring(obj->ring);
@@ -1984,6 +2029,92 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
        return 0;
 }
 
+/**
+ * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Returns 0 if successful, else an error is returned with the remaining time in
+ * the timeout parameter.
+ *  -ETIME: object is still busy after timeout
+ *  -ERESTARTSYS: signal interrupted the wait
+ *  -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ *  -EAGAIN: GPU wedged
+ *  -ENOMEM: damn
+ *  -ENODEV: Internal IRQ fail
+ *  -E?: The add request failed
+ *
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the busy
+ * ioctl
+ */
+int
+i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+       struct drm_i915_gem_wait *args = data;
+       struct drm_i915_gem_object *obj;
+       struct intel_ring_buffer *ring = NULL;
+       struct timespec timeout;
+       u32 seqno = 0;
+       int ret = 0;
+
+       timeout = ns_to_timespec(args->timeout_ns);
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
+
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+       if (&obj->base == NULL) {
+               mutex_unlock(&dev->struct_mutex);
+               return -ENOENT;
+       }
+
+       /* Need to make sure the object is flushed first. This non-obvious
+        * flush is required to enforce that (active && !olr) == no wait
+        * necessary.
+        */
+       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       if (ret)
+               goto out;
+
+       if (obj->active) {
+               seqno = obj->last_rendering_seqno;
+               ring = obj->ring;
+       }
+
+       if (seqno == 0)
+                goto out;
+
+       ret = i915_gem_check_olr(ring, seqno);
+       if (ret)
+               goto out;
+
+       /* Do this after OLR check to make sure we make forward progress polling
+        * on this IOCTL with a 0 timeout (like busy ioctl)
+        */
+       if (!args->timeout_ns) {
+               ret = -ETIME;
+               goto out;
+       }
+
+       drm_gem_object_unreference(&obj->base);
+       mutex_unlock(&dev->struct_mutex);
+
+       ret = __wait_seqno(ring, seqno, true, &timeout);
+       WARN_ON(!timespec_valid(&timeout));
+       args->timeout_ns = timespec_to_ns(&timeout);
+       return ret;
+
+out:
+       drm_gem_object_unreference(&obj->base);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
 /**
  * i915_gem_object_sync - sync an object to a ring.
  *
@@ -2160,7 +2291,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
                        return ret;
        }
 
-       return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
+       return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
 }
 
 int i915_gpu_idle(struct drm_device *dev)
@@ -2364,7 +2495,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
        }
 
        if (obj->last_fenced_seqno) {
-               ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
+               ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
                if (ret)
                        return ret;
 
@@ -3030,7 +3161,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        if (seqno == 0)
                return 0;
 
-       ret = __wait_seqno(ring, seqno, true);
+       ret = __wait_seqno(ring, seqno, true, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
@@ -3435,6 +3566,38 @@ i915_gem_idle(struct drm_device *dev)
        return 0;
 }
 
+void i915_gem_l3_remap(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 misccpctl;
+       int i;
+
+       if (!IS_IVYBRIDGE(dev))
+               return;
+
+       if (!dev_priv->mm.l3_remap_info)
+               return;
+
+       misccpctl = I915_READ(GEN7_MISCCPCTL);
+       I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+       POSTING_READ(GEN7_MISCCPCTL);
+
+       for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
+               u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
+               if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
+                       DRM_DEBUG("0x%x was already programmed to %x\n",
+                                 GEN7_L3LOG_BASE + i, remap);
+               if (remap && !dev_priv->mm.l3_remap_info[i/4])
+                       DRM_DEBUG_DRIVER("Clearing remapped register\n");
+               I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
+       }
+
+       /* Make sure all the writes land before disabling dop clock gating */
+       POSTING_READ(GEN7_L3LOG_BASE);
+
+       I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+}
+
 void i915_gem_init_swizzling(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -3524,6 +3687,8 @@ i915_gem_init_hw(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
+       i915_gem_l3_remap(dev);
+
        i915_gem_init_swizzling(dev);
 
        ret = intel_init_render_ring_buffer(dev);