int __must_check i915_gem_suspend(struct drm_device *dev);
void __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *batch_obj);
+ struct drm_i915_gem_object *batch_obj,
+ bool flush_caches);
#define i915_add_request(ring) \
- __i915_add_request(ring, NULL, NULL)
+ __i915_add_request(ring, NULL, NULL, true)
+#define i915_add_request_no_flush(ring) \
+ __i915_add_request(ring, NULL, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
*/
void __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ bool flush_caches)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
* is that the flush _must_ happen before the next request, no matter
* what.
*/
- if (i915.enable_execlists)
- ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
- else
- ret = intel_ring_flush_all_caches(ring);
- /* Not allowed to fail! */
- WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
+ if (flush_caches) {
+ if (i915.enable_execlists)
+ ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
+ else
+ ret = intel_ring_flush_all_caches(ring);
+ /* Not allowed to fail! */
+ WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
+ }
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
params->ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- __i915_add_request(params->ring, params->file, params->batch_obj);
+ __i915_add_request(params->ring, params->file, params->batch_obj, true);
}
static int
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- __i915_add_request(ring, NULL, so.obj);
+ __i915_add_request(ring, NULL, so.obj, true);
/* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- __i915_add_request(ring, file, so.obj);
+ __i915_add_request(ring, file, so.obj, true);
/* intel_logical_ring_add_request moves object to inactive if it
* fails */
out: