int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
-int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno);
+int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
* request and object lists appropriately for that event.
*/
int
-i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno)
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
int ret = 0;
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+ ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
i915_gem_retire_requests_ring(obj->ring);
return ret;
}
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
+ return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
int i915_gpu_idle(struct drm_device *dev)
}
if (obj->last_fenced_seqno) {
- ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
+ ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
if (ret)
return ret;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(ring, overlay->last_flip_req);
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(ring, overlay->last_flip_req);
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- ret = i915_wait_request(ring, seqno);
+ ret = i915_wait_seqno(ring, seqno);
dev_priv->mm.interruptible = was_interruptible;
if (!ret)