void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_engine_cs *ring,
- struct drm_file *file,
- struct drm_i915_gem_object *batch_obj);
+void __i915_add_request(struct intel_engine_cs *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_object *batch_obj);
#define i915_add_request(ring) \
__i915_add_request(ring, NULL, NULL)
int __i915_wait_request(struct drm_i915_gem_request *req,
int
i915_gem_check_olr(struct drm_i915_gem_request *req)
{
- int ret;
-
WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
- ret = 0;
if (req == req->ring->outstanding_lazy_request)
- ret = i915_add_request(req->ring);
+ i915_add_request(req->ring);
- return ret;
+ return 0;
}
static void fake_irq(unsigned long data)
return 0;
}
-int __i915_add_request(struct intel_engine_cs *ring,
- struct drm_file *file,
- struct drm_i915_gem_object *obj)
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct intel_engine_cs *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
request = ring->outstanding_lazy_request;
if (WARN_ON(request == NULL))
- return -ENOMEM;
+ return;
if (i915.enable_execlists) {
ringbuf = request->ctx->engine[ring->id].ringbuf;
* is that the flush _must_ happen before the next request, no matter
* what.
*/
- if (i915.enable_execlists) {
+ if (i915.enable_execlists)
ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
- if (ret)
- return ret;
- } else {
+ else
ret = intel_ring_flush_all_caches(ring);
- if (ret)
- return ret;
- }
+ /* Not allowed to fail! */
+ WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
*/
request->postfix = intel_ring_get_tail(ringbuf);
- if (i915.enable_execlists) {
+ if (i915.enable_execlists)
ret = ring->emit_request(ringbuf, request);
- if (ret)
- return ret;
- } else {
+ else {
ret = ring->add_request(ring);
- if (ret)
- return ret;
request->tail = intel_ring_get_tail(ringbuf);
}
+ /* Not allowed to fail! */
+ WARN(ret, "emit|add_request failed: %d!\n", ret);
request->head = request_start;
/* Sanity check that the reserved size was large enough. */
intel_ring_reserved_space_end(ringbuf);
-
- return 0;
}
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- (void)__i915_add_request(ring, file, obj);
+ __i915_add_request(ring, file, obj);
}
static int
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, NULL, so.obj);
+ __i915_add_request(ring, NULL, so.obj);
/* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, file, so.obj);
+ __i915_add_request(ring, file, so.obj);
/* intel_logical_ring_add_request moves object to inactive if it
* fails */
out:
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req,
ring->outstanding_lazy_request);
- ret = i915_add_request(ring);
- if (ret)
- return ret;
+ i915_add_request(ring);
overlay->flip_tail = tail;
ret = i915_wait_request(overlay->last_flip_req);
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req,
ring->outstanding_lazy_request);
- return i915_add_request(ring);
+ i915_add_request(ring);
+
+ return 0;
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
int intel_ring_idle(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *req;
- int ret;
/* We need to add any requests required to flush the objects and ring */
- if (ring->outstanding_lazy_request) {
- ret = i915_add_request(ring);
- if (ret)
- return ret;
- }
+ if (ring->outstanding_lazy_request)
+ i915_add_request(ring);
/* Wait upon the last request to be completed */
if (list_empty(&ring->request_list))