drm/i915: check for ERR_PTR from i915_gem_object_pin_map()
authorDave Gordon <david.s.gordon@intel.com>
Tue, 12 Apr 2016 13:46:16 +0000 (14:46 +0100)
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>
Wed, 20 Apr 2016 15:59:03 +0000 (16:59 +0100)
The newly-introduced function i915_gem_object_pin_map() returns an
ERR_PTR (not NULL) if the pin-and-map opertaion fails, so that's what we
must check for. And it's nicer not to assign such a pointer-or-error to
a structure being filled in until after it's been validated, so we
should keep it local and avoid exporting a bogus pointer. Also, for
clarity and symmetry, we should clear 'virtual_start' along with 'vma'
when unmapping a ringbuffer.

Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_ringbuffer.c

index 85102ad759622f5662d1a7c73a279719c273a417..6f1e0f127c0a1478b181bb6070f92e03b421648d 100644 (file)
@@ -3018,9 +3018,11 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
  * pages and then returns a contiguous mapping of the backing storage into
  * the kernel address space.
  *
- * The caller must hold the struct_mutex.
+ * The caller must hold the struct_mutex, and is responsible for calling
+ * i915_gem_object_unpin_map() when the mapping is no longer required.
  *
- * Returns the pointer through which to access the backing storage.
+ * Returns the pointer through which to access the mapped object, or an
+ * ERR_PTR() on error.
  */
 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
 
index 978f0b676c68fd43a9e0cff3f7728795dfc42b10..245386e20c52dd2848c484d2a3ceb5fe7c6e1fa0 100644 (file)
@@ -2088,6 +2088,7 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
                i915_gem_object_unpin_map(ringbuf->obj);
        else
                iounmap(ringbuf->virtual_start);
+       ringbuf->virtual_start = NULL;
        ringbuf->vma = NULL;
        i915_gem_object_ggtt_unpin(ringbuf->obj);
 }
@@ -2100,6 +2101,7 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
        struct drm_i915_gem_object *obj = ringbuf->obj;
        /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
        unsigned flags = PIN_OFFSET_BIAS | 4096;
+       void *addr;
        int ret;
 
        if (HAS_LLC(dev_priv) && !obj->stolen) {
@@ -2111,9 +2113,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                if (ret)
                        goto err_unpin;
 
-               ringbuf->virtual_start = i915_gem_object_pin_map(obj);
-               if (ringbuf->virtual_start == NULL) {
-                       ret = -ENOMEM;
+               addr = i915_gem_object_pin_map(obj);
+               if (IS_ERR(addr)) {
+                       ret = PTR_ERR(addr);
                        goto err_unpin;
                }
        } else {
@@ -2129,14 +2131,15 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                /* Access through the GTT requires the device to be awake. */
                assert_rpm_wakelock_held(dev_priv);
 
-               ringbuf->virtual_start = ioremap_wc(ggtt->mappable_base +
-                                                   i915_gem_obj_ggtt_offset(obj), ringbuf->size);
-               if (ringbuf->virtual_start == NULL) {
+               addr = ioremap_wc(ggtt->mappable_base +
+                                 i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+               if (addr == NULL) {
                        ret = -ENOMEM;
                        goto err_unpin;
                }
        }
 
+       ringbuf->virtual_start = addr;
        ringbuf->vma = i915_gem_obj_to_ggtt(obj);
        return 0;