drm/i915: Fallback to single page pwrite/pread if unable to release fence
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 18 Aug 2016 16:16:45 +0000 (17:16 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 18 Aug 2016 21:36:43 +0000 (22:36 +0100)
If we cannot release the fence (for example if someone is inexplicably
trying to write into a tiled framebuffer that is currently pinned to the
display! *cough* kms_frontbuffer_tracking *cough*) fallback to using the
page-by-page pwrite/pread interface, rather than fail the syscall
entirely.

Since this is triggerable by the user (along pwrite) we have to remove
the WARN_ON(fence->pin_count).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-6-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_fence.c

index 5adbf8c3b81c97761c37457b96ccb8ac0322daae..a609522221ed272d5ba0b4389b3a77444b4c8580 100644 (file)
@@ -754,6 +754,15 @@ i915_gem_gtt_pread(struct drm_device *dev,
        int ret;
 
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+       if (!IS_ERR(vma)) {
+               node.start = i915_ggtt_offset(vma);
+               node.allocated = false;
+               ret = i915_gem_object_put_fence(obj);
+               if (ret) {
+                       i915_vma_unpin(vma);
+                       vma = ERR_PTR(ret);
+               }
+       }
        if (IS_ERR(vma)) {
                ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
                if (ret)
@@ -766,12 +775,6 @@ i915_gem_gtt_pread(struct drm_device *dev,
                }
 
                i915_gem_object_pin_pages(obj);
-       } else {
-               node.start = i915_ggtt_offset(vma);
-               node.allocated = false;
-               ret = i915_gem_object_put_fence(obj);
-               if (ret)
-                       goto out_unpin;
        }
 
        ret = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -1058,6 +1061,15 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
                                       PIN_MAPPABLE | PIN_NONBLOCK);
+       if (!IS_ERR(vma)) {
+               node.start = i915_ggtt_offset(vma);
+               node.allocated = false;
+               ret = i915_gem_object_put_fence(obj);
+               if (ret) {
+                       i915_vma_unpin(vma);
+                       vma = ERR_PTR(ret);
+               }
+       }
        if (IS_ERR(vma)) {
                ret = insert_mappable_node(i915, &node, PAGE_SIZE);
                if (ret)
@@ -1070,12 +1082,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
                }
 
                i915_gem_object_pin_pages(obj);
-       } else {
-               node.start = i915_ggtt_offset(vma);
-               node.allocated = false;
-               ret = i915_gem_object_put_fence(obj);
-               if (ret)
-                       goto out_unpin;
        }
 
        ret = i915_gem_object_set_to_gtt_domain(obj, true);
index 334c3c4e835707ed908c2346c70a7b0207e95d9e..b0c6c2777725cae82d2cde3637d16c8345477ae0 100644 (file)
@@ -298,7 +298,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 
        fence = &dev_priv->fence_regs[obj->fence_reg];
 
-       if (WARN_ON(fence->pin_count))
+       if (fence->pin_count)
                return -EBUSY;
 
        i915_gem_object_fence_lost(obj);