drm/i915: Preserve ring buffers objects across resume
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 9 Apr 2014 08:19:41 +0000 (09:19 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 5 May 2014 07:08:37 +0000 (09:08 +0200)
Tearing down the ring buffers across resume is overkill, risks
unnecessary failure and increases fragmentation.

After failure, since the device is still active we may end up trying to
write into the dangling iomapping and trigger an oops.

v2: stop_ringbuffers() was meant to call stop(ring) not
cleanup(ring) during resume!

Reported-by: Jae-hyeon Park <jhyeon@gmail.com>
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=72351
References: https://bugs.freedesktop.org/show_bug.cgi?id=76554
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Oscar Mateo <oscar.mateo@intel.com>
[danvet: s/ring->obj == NULL/!intel_ring_initialized(ring)/ as
suggested by Oscar.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 5c8b86196c84b565c426f2edc79d9bb8fbc3b9aa..f766d5f94d934ec0f57875a807d4ced7701f91ae 100644 (file)
@@ -2384,6 +2384,11 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 
                i915_gem_free_request(request);
        }
+
+       /* These may not have been flush before the reset, do so now */
+       kfree(ring->preallocated_lazy_request);
+       ring->preallocated_lazy_request = NULL;
+       ring->outstanding_lazy_seqno = 0;
 }
 
 void i915_gem_restore_fences(struct drm_device *dev)
@@ -2424,8 +2429,6 @@ void i915_gem_reset(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i)
                i915_gem_reset_ring_cleanup(dev_priv, ring);
 
-       i915_gem_cleanup_ringbuffer(dev);
-
        i915_gem_context_reset(dev);
 
        i915_gem_restore_fences(dev);
@@ -4233,6 +4236,17 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
        kfree(vma);
 }
 
+static void
+i915_gem_stop_ringbuffers(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       int i;
+
+       for_each_ring(ring, dev_priv, i)
+               intel_stop_ring_buffer(ring);
+}
+
 int
 i915_gem_suspend(struct drm_device *dev)
 {
@@ -4254,7 +4268,7 @@ i915_gem_suspend(struct drm_device *dev)
                i915_gem_evict_everything(dev);
 
        i915_kernel_lost_context(dev);
-       i915_gem_cleanup_ringbuffer(dev);
+       i915_gem_stop_ringbuffers(dev);
 
        /* Hack!  Don't let anybody do execbuf while we don't control the chip.
         * We need to replace this with a semaphore, or something.
index 890e0986edbe9ca8ab469c65b2fd67e169b91951..7d99e84f76a329964c40cea35caccece3c02a539 100644 (file)
@@ -1305,45 +1305,39 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
 
 static int init_status_page(struct intel_ring_buffer *ring)
 {
-       struct drm_device *dev = ring->dev;
        struct drm_i915_gem_object *obj;
-       int ret;
 
-       obj = i915_gem_alloc_object(dev, 4096);
-       if (obj == NULL) {
-               DRM_ERROR("Failed to allocate status page\n");
-               ret = -ENOMEM;
-               goto err;
-       }
+       if ((obj = ring->status_page.obj) == NULL) {
+               int ret;
 
-       ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-       if (ret)
-               goto err_unref;
+               obj = i915_gem_alloc_object(ring->dev, 4096);
+               if (obj == NULL) {
+                       DRM_ERROR("Failed to allocate status page\n");
+                       return -ENOMEM;
+               }
 
-       ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
-       if (ret)
-               goto err_unref;
+               ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+               if (ret)
+                       goto err_unref;
+
+               ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
+               if (ret) {
+err_unref:
+                       drm_gem_object_unreference(&obj->base);
+                       return ret;
+               }
+
+               ring->status_page.obj = obj;
+       }
 
        ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
        ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
-       if (ring->status_page.page_addr == NULL) {
-               ret = -ENOMEM;
-               goto err_unpin;
-       }
-       ring->status_page.obj = obj;
        memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
        DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
                        ring->name, ring->status_page.gfx_addr);
 
        return 0;
-
-err_unpin:
-       i915_gem_object_ggtt_unpin(obj);
-err_unref:
-       drm_gem_object_unreference(&obj->base);
-err:
-       return ret;
 }
 
 static int init_phys_status_page(struct intel_ring_buffer *ring)
@@ -1363,44 +1357,23 @@ static int init_phys_status_page(struct intel_ring_buffer *ring)
        return 0;
 }
 
-static int intel_init_ring_buffer(struct drm_device *dev,
-                                 struct intel_ring_buffer *ring)
+static int allocate_ring_buffer(struct intel_ring_buffer *ring)
 {
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ring->dev = dev;
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
-       ring->size = 32 * PAGE_SIZE;
-       memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
-
-       init_waitqueue_head(&ring->irq_queue);
-
-       if (I915_NEED_GFX_HWS(dev)) {
-               ret = init_status_page(ring);
-               if (ret)
-                       return ret;
-       } else {
-               BUG_ON(ring->id != RCS);
-               ret = init_phys_status_page(ring);
-               if (ret)
-                       return ret;
-       }
+       if (ring->obj)
+               return 0;
 
        obj = NULL;
        if (!HAS_LLC(dev))
                obj = i915_gem_object_create_stolen(dev, ring->size);
        if (obj == NULL)
                obj = i915_gem_alloc_object(dev, ring->size);
-       if (obj == NULL) {
-               DRM_ERROR("Failed to allocate ringbuffer\n");
-               ret = -ENOMEM;
-               goto err_hws;
-       }
-
-       ring->obj = obj;
+       if (obj == NULL)
+               return -ENOMEM;
 
        ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
        if (ret)
@@ -1414,55 +1387,72 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
                           ring->size);
        if (ring->virtual_start == NULL) {
-               DRM_ERROR("Failed to map ringbuffer.\n");
                ret = -EINVAL;
                goto err_unpin;
        }
 
-       ret = ring->init(ring);
-       if (ret)
-               goto err_unmap;
+       ring->obj = obj;
+       return 0;
+
+err_unpin:
+       i915_gem_object_ggtt_unpin(obj);
+err_unref:
+       drm_gem_object_unreference(&obj->base);
+       return ret;
+}
+
+static int intel_init_ring_buffer(struct drm_device *dev,
+                                 struct intel_ring_buffer *ring)
+{
+       int ret;
+
+       ring->dev = dev;
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       ring->size = 32 * PAGE_SIZE;
+       memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
+
+       init_waitqueue_head(&ring->irq_queue);
+
+       if (I915_NEED_GFX_HWS(dev)) {
+               ret = init_status_page(ring);
+               if (ret)
+                       return ret;
+       } else {
+               BUG_ON(ring->id != RCS);
+               ret = init_phys_status_page(ring);
+               if (ret)
+                       return ret;
+       }
+
+       ret = allocate_ring_buffer(ring);
+       if (ret) {
+               DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
+               return ret;
+       }
 
        /* Workaround an erratum on the i830 which causes a hang if
         * the TAIL pointer points to within the last 2 cachelines
         * of the buffer.
         */
        ring->effective_size = ring->size;
-       if (IS_I830(ring->dev) || IS_845G(ring->dev))
+       if (IS_I830(dev) || IS_845G(dev))
                ring->effective_size -= 2 * CACHELINE_BYTES;
 
        i915_cmd_parser_init_ring(ring);
 
-       return 0;
-
-err_unmap:
-       iounmap(ring->virtual_start);
-err_unpin:
-       i915_gem_object_ggtt_unpin(obj);
-err_unref:
-       drm_gem_object_unreference(&obj->base);
-       ring->obj = NULL;
-err_hws:
-       cleanup_status_page(ring);
-       return ret;
+       return ring->init(ring);
 }
 
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 {
-       struct drm_i915_private *dev_priv;
-       int ret;
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
 
        if (ring->obj == NULL)
                return;
 
-       /* Disable the ring buffer. The ring must be idle at this point */
-       dev_priv = ring->dev->dev_private;
-       ret = intel_ring_idle(ring);
-       if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
-               DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-                         ring->name, ret);
-
-       I915_WRITE_CTL(ring, 0);
+       intel_stop_ring_buffer(ring);
+       WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
        iounmap(ring->virtual_start);
 
@@ -2252,3 +2242,19 @@ intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
        ring->gpu_caches_dirty = false;
        return 0;
 }
+
+void
+intel_stop_ring_buffer(struct intel_ring_buffer *ring)
+{
+       int ret;
+
+       if (!intel_ring_initialized(ring))
+               return;
+
+       ret = intel_ring_idle(ring);
+       if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+               DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+                         ring->name, ret);
+
+       stop_ring(ring);
+}
index 413cdc74ed535463430209d7c8d12b15ce06e966..54839165eb6d809cbe83413c9bfdcbd7de6a31e3 100644 (file)
@@ -263,6 +263,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
 #define I915_GEM_HWS_SCRATCH_INDEX     0x30
 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
+void intel_stop_ring_buffer(struct intel_ring_buffer *ring);
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 
 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);