drm/i915: Defer active reference until required
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 28 Oct 2016 12:58:29 +0000 (13:58 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 28 Oct 2016 19:53:43 +0000 (20:53 +0100)
We only need the active reference to keep the object alive after the
handle has been deleted (so as to prevent a synchronous gem_close). Why
then pay the price of a kref on every execbuf when we can insert that
final active ref just in time for the handle deletion?

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-6-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_batch_pool.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/intel_ringbuffer.c

index cf4b2427aff31288ee692dc2d8336cd1ff1d6590..edc59d08d0171a9170163d80c8b01b7294e566c3 100644 (file)
@@ -2246,6 +2246,12 @@ struct drm_i915_gem_object {
 #define __I915_BO_ACTIVE(bo) \
        ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
 
+       /**
+        * Have we taken a reference for the object for incomplete GPU
+        * activity?
+        */
+#define I915_BO_ACTIVE_REF (I915_BO_ACTIVE_SHIFT + I915_NUM_ENGINES)
+
        /**
         * This is set if the object has been written to since last bound
         * to the GTT
@@ -2407,6 +2413,28 @@ i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
        return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
 }
 
+static inline bool
+i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
+{
+       return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
+{
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+       __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
+{
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+       __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+
 static inline unsigned int
 i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
 {
index 537f502123ea2c3dd7d54999a27a73bdc0f360bf..c0103044deded68faeae3d08ed0fb82d56aab954 100644 (file)
@@ -2661,7 +2661,10 @@ i915_gem_object_retire__read(struct i915_gem_active *active,
                list_move_tail(&obj->global_list,
                               &request->i915->mm.bound_list);
 
-       i915_gem_object_put(obj);
+       if (i915_gem_object_has_active_reference(obj)) {
+               i915_gem_object_clear_active_reference(obj);
+               i915_gem_object_put(obj);
+       }
 }
 
 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
@@ -2935,6 +2938,12 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
        list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
                if (vma->vm->file == fpriv)
                        i915_vma_close(vma);
+
+       if (i915_gem_object_is_active(obj) &&
+           !i915_gem_object_has_active_reference(obj)) {
+               i915_gem_object_set_active_reference(obj);
+               i915_gem_object_get(obj);
+       }
        mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
@@ -4475,6 +4484,17 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        intel_runtime_pm_put(dev_priv);
 }
 
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
+{
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+       GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
+       if (i915_gem_object_is_active(obj))
+               i915_gem_object_set_active_reference(obj);
+       else
+               i915_gem_object_put(obj);
+}
+
 int i915_gem_suspend(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
index ed989596d9a38249c87ac223c71285abb5d512f5..cb25cad3318c6d5db1744aa0367bc426974e6cc5 100644 (file)
@@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
                list_for_each_entry_safe(obj, next,
                                         &pool->cache_list[n],
                                         batch_pool_link)
-                       i915_gem_object_put(obj);
+                       __i915_gem_object_release_unless_active(obj);
 
                INIT_LIST_HEAD(&pool->cache_list[n]);
        }
index 5dca32ac1c67594a32a9e84970913306cb7e19a4..47e888cc721f362fd3e21834b846b149be02fa2e 100644 (file)
@@ -155,7 +155,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
                if (ce->ring)
                        intel_ring_free(ce->ring);
 
-               i915_vma_put(ce->state);
+               __i915_gem_object_release_unless_active(ce->state->obj);
        }
 
        put_pid(ctx->pid);
index 61365ae22b530f38bd288d57e2edb960457dc2be..4cafce97998a366ad1bf3723e9418da6d589faf9 100644 (file)
@@ -1290,8 +1290,6 @@ void i915_vma_move_to_active(struct i915_vma *vma,
         * add the active reference first and queue for it to be dropped
         * *last*.
         */
-       if (!i915_gem_object_is_active(obj))
-               i915_gem_object_get(obj);
        i915_gem_object_set_active(obj, idx);
        i915_gem_active_set(&obj->last_read[idx], req);
 
index 947d5ad51fb7b2b22f3c950c9fb77399bc4b8581..a3a364478a89fe64c3048e1c305d97d7303196bb 100644 (file)
@@ -3734,11 +3734,16 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 void i915_vma_unpin_and_release(struct i915_vma **p_vma)
 {
        struct i915_vma *vma;
+       struct drm_i915_gem_object *obj;
 
        vma = fetch_and_zero(p_vma);
        if (!vma)
                return;
 
+       obj = vma->obj;
+
        i915_vma_unpin(vma);
-       i915_vma_put(vma);
+       i915_vma_close(vma);
+
+       __i915_gem_object_release_unless_active(obj);
 }
index a98c0f42badd33d74fd053c6bf451c10e0f41cf5..e7c3dbcc6c8136c45c48742b50123ee7dafed7a5 100644 (file)
@@ -224,7 +224,8 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
        i915_vma_move_to_active(so.vma, req, 0);
 err_unpin:
        i915_vma_unpin(so.vma);
+       i915_vma_close(so.vma);
 err_obj:
-       i915_gem_object_put(obj);
+       __i915_gem_object_release_unless_active(obj);
        return ret;
 }
index 8ef735faa6034a6c6491e35d8a1fb8097d7d134e..8eee9675d3bfcec570742659ca702b224fbd9849 100644 (file)
@@ -1762,14 +1762,19 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 static void cleanup_status_page(struct intel_engine_cs *engine)
 {
        struct i915_vma *vma;
+       struct drm_i915_gem_object *obj;
 
        vma = fetch_and_zero(&engine->status_page.vma);
        if (!vma)
                return;
 
+       obj = vma->obj;
+
        i915_vma_unpin(vma);
-       i915_gem_object_unpin_map(vma->obj);
-       i915_vma_put(vma);
+       i915_vma_close(vma);
+
+       i915_gem_object_unpin_map(obj);
+       __i915_gem_object_release_unless_active(obj);
 }
 
 static int init_status_page(struct intel_engine_cs *engine)
@@ -1967,7 +1972,11 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 void
 intel_ring_free(struct intel_ring *ring)
 {
-       i915_vma_put(ring->vma);
+       struct drm_i915_gem_object *obj = ring->vma->obj;
+
+       i915_vma_close(ring->vma);
+       __i915_gem_object_release_unless_active(obj);
+
        kfree(ring);
 }