u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined);
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
static inline bool
if (!obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(obj, obj->cache_level);
- ret = i915_gem_object_get_fence(obj, NULL);
+ ret = i915_gem_object_get_fence(obj);
if (ret)
goto unlock;
/**
* i915_gem_object_get_fence - set up fencing for an object
* @obj: object to map through a fence reg
- * @pipelined: ring on which to queue the change, or NULL for CPU access
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
* For an untiled surface, this removes any existing fence.
*/
int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *pipelined;
struct drm_i915_fence_reg *reg;
int ret;
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- ret = i915_gem_object_get_fence(obj, ring);
+ ret = i915_gem_object_get_fence(obj);
if (ret)
goto err_unpin;
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
- ret = i915_gem_object_get_fence(obj, pipelined);
+ ret = i915_gem_object_get_fence(obj);
if (ret)
goto err_unpin;