From 5b4a60c2768434a8c6d5f803a2410245334b8bf7 Mon Sep 17 00:00:00 2001 From: John Harrison Date: Fri, 29 May 2015 17:43:34 +0100 Subject: [PATCH] drm/i915: Add flag to i915_add_request() to skip the cache flush In order to explcitly track all GPU work (and completely remove the outstanding lazy request), it is necessary to add extra i915_add_request() calls to various places. Some of these do not need the implicit cache flush done as part of the standard batch buffer submission process. This patch adds a flag to _add_request() to specify whether the flush is required or not. For: VIZ-5115 Signed-off-by: John Harrison Reviewed-by: Tomas Elf Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 7 +++++-- drivers/gpu/drm/i915/i915_gem.c | 17 ++++++++++------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/i915_gem_render_state.c | 2 +- drivers/gpu/drm/i915/intel_lrc.c | 2 +- 5 files changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 14154c460762..104893bea2f1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2890,9 +2890,12 @@ int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev); void __i915_add_request(struct intel_engine_cs *ring, struct drm_file *file, - struct drm_i915_gem_object *batch_obj); + struct drm_i915_gem_object *batch_obj, + bool flush_caches); #define i915_add_request(ring) \ - __i915_add_request(ring, NULL, NULL) + __i915_add_request(ring, NULL, NULL, true) +#define i915_add_request_no_flush(ring) \ + __i915_add_request(ring, NULL, NULL, false) int __i915_wait_request(struct drm_i915_gem_request *req, unsigned reset_counter, bool interruptible, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a0f51478581f..74c319350876 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2470,7 +2470,8 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) */ void __i915_add_request(struct intel_engine_cs *ring, struct drm_file *file, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + bool flush_caches) { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_gem_request *request; @@ -2502,12 +2503,14 @@ void __i915_add_request(struct intel_engine_cs *ring, * is that the flush _must_ happen before the next request, no matter * what. */ - if (i915.enable_execlists) - ret = logical_ring_flush_all_caches(ringbuf, request->ctx); - else - ret = intel_ring_flush_all_caches(ring); - /* Not allowed to fail! */ - WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret); + if (flush_caches) { + if (i915.enable_execlists) + ret = logical_ring_flush_all_caches(ringbuf, request->ctx); + else + ret = intel_ring_flush_all_caches(ring); + /* Not allowed to fail! */ + WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret); + } /* Record the position of the start of the request so that * should we detect the updated seqno part-way through the diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 76bfc68d1a88..a15517249bb9 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1066,7 +1066,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) params->ring->gpu_caches_dirty = true; /* Add a breadcrumb for the completion of the batch buffer */ - __i915_add_request(params->ring, params->file, params->batch_obj); + __i915_add_request(params->ring, params->file, params->batch_obj, true); } static int diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index ce4788ff3df5..4418616301e7 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring) i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); - __i915_add_request(ring, NULL, so.obj); + __i915_add_request(ring, NULL, so.obj, true); /* __i915_add_request moves object to inactive if it fails */ out: i915_gem_render_state_fini(&so); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 754aa39eb12b..47443b9aa172 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1599,7 +1599,7 @@ static int intel_lr_context_render_state_init(struct intel_engine_cs *ring, i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); - __i915_add_request(ring, file, so.obj); + __i915_add_request(ring, file, so.obj, true); /* intel_logical_ring_add_request moves object to inactive if it * fails */ out: -- 2.20.1