i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
- i915_gem_request_get_seqno(work->flip_queued_req)));
+ i915_gem_request_completed(work->flip_queued_req, true));
} else
seq_printf(m, "Flip not associated with any ring\n");
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
*pdst = src;
}
+/*
+ * XXX: i915_gem_request_completed should be here but currently needs the
+ * definition of i915_seqno_passed() which is below. It will be moved in
+ * a later patch when the call to i915_seqno_passed() is obsoleted...
+ */
+
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
return (int32_t)(seq1 - seq2) >= 0;
}
+static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
+ bool lazy_coherency)
+{
+ u32 seqno;
+
+ BUG_ON(req == NULL);
+
+ seqno = req->ring->get_seqno(req->ring, lazy_coherency);
+
+ return i915_seqno_passed(seqno, req->seqno);
+}
+
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
- if (i915_seqno_passed(ring->get_seqno(ring, true),
- i915_gem_request_get_seqno(req)))
+ if (i915_gem_request_completed(req, true))
return 0;
timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
break;
}
- if (i915_seqno_passed(ring->get_seqno(ring, false),
- i915_gem_request_get_seqno(req))) {
+ if (i915_gem_request_completed(req, false)) {
ret = 0;
break;
}
if (ring == NULL)
return;
- if (i915_seqno_passed(ring->get_seqno(ring, true),
- i915_gem_request_get_seqno(obj->last_read_req)))
+ if (i915_gem_request_completed(obj->last_read_req, true))
i915_gem_object_move_to_inactive(obj);
}
i915_gem_find_active_request(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
- u32 completed_seqno;
-
- completed_seqno = ring->get_seqno(ring, false);
list_for_each_entry(request, &ring->request_list, list) {
- if (i915_seqno_passed(completed_seqno, request->seqno))
+ if (i915_gem_request_completed(request, false))
continue;
return request;
void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
- uint32_t seqno;
-
if (list_empty(&ring->request_list))
return;
WARN_ON(i915_verify_lists(ring->dev));
- seqno = ring->get_seqno(ring, true);
-
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests.
struct drm_i915_gem_object,
ring_list);
- if (!i915_seqno_passed(seqno,
- i915_gem_request_get_seqno(obj->last_read_req)))
+ if (!i915_gem_request_completed(obj->last_read_req, true))
break;
i915_gem_object_move_to_inactive(obj);
struct drm_i915_gem_request,
list);
- if (!i915_seqno_passed(seqno, request->seqno))
+ if (!i915_gem_request_completed(request, true))
break;
trace_i915_gem_request_retire(request);
}
if (unlikely(ring->trace_irq_seqno &&
- i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
+ i915_seqno_passed(ring->get_seqno(ring, true),
+ ring->trace_irq_seqno))) {
ring->irq_put(ring);
ring->trace_irq_seqno = 0;
}