Now that all callers of i915_add_request() have a request pointer to hand, it is
possible to update the add request function to take a request pointer rather
than pulling it out of the OLR.
For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
-void __i915_add_request(struct intel_engine_cs *ring,
+void __i915_add_request(struct drm_i915_gem_request *req,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
bool flush_caches);
-#define i915_add_request(ring) \
- __i915_add_request(ring, NULL, NULL, true)
-#define i915_add_request_no_flush(ring) \
- __i915_add_request(ring, NULL, NULL, false)
+#define i915_add_request(req) \
+ __i915_add_request(req, NULL, NULL, true)
+#define i915_add_request_no_flush(req) \
+ __i915_add_request(req, NULL, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
if (req == req->ring->outstanding_lazy_request)
- i915_add_request(req->ring);
+ i915_add_request(req);
return 0;
}
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void __i915_add_request(struct intel_engine_cs *ring,
+void __i915_add_request(struct drm_i915_gem_request *request,
struct drm_file *file,
struct drm_i915_gem_object *obj,
bool flush_caches)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct drm_i915_gem_request *request;
+ struct intel_engine_cs *ring;
+ struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
int ret;
- request = ring->outstanding_lazy_request;
if (WARN_ON(request == NULL))
return;
- if (i915.enable_execlists) {
- ringbuf = request->ctx->engine[ring->id].ringbuf;
- } else
- ringbuf = ring->buffer;
+ ring = request->ring;
+ dev_priv = ring->dev->dev_private;
+ ringbuf = request->ringbuf;
+
+ WARN_ON(request != ring->outstanding_lazy_request);
/*
* To ensure that this call will not fail, space for its emissions
return ret;
}
- i915_add_request_no_flush(req->ring);
+ i915_add_request_no_flush(req);
}
WARN_ON(ring->outstanding_lazy_request);
goto out;
}
- i915_add_request_no_flush(ring);
+ i915_add_request_no_flush(req);
}
out:
params->ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- __i915_add_request(params->ring, params->file, params->batch_obj, true);
+ __i915_add_request(params->request, params->file, params->batch_obj, true);
}
static int
}
if (request)
- i915_add_request_no_flush(request->ring);
+ i915_add_request_no_flush(request);
work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
work->enable_stall_check = true;
goto error;
}
- i915_add_request_no_flush(req->ring);
+ i915_add_request_no_flush(req);
}
ctx->rcs_initialized = true;
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, req);
- i915_add_request(req->ring);
+ i915_add_request(req);
overlay->flip_tail = tail;
ret = i915_wait_request(overlay->last_flip_req);
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, req);
- i915_add_request(req->ring);
+ i915_add_request(req);
return 0;
}
struct drm_i915_gem_request *req;
/* We need to add any requests required to flush the objects and ring */
+ WARN_ON(ring->outstanding_lazy_request);
if (ring->outstanding_lazy_request)
- i915_add_request(ring);
+ i915_add_request(ring->outstanding_lazy_request);
/* Wait upon the last request to be completed */
if (list_empty(&ring->request_list))