i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_read_seqno = seqno;
-
- if (obj->fenced_gpu_access) {
- obj->last_fenced_seqno = seqno;
-
- /* Bump MRU to take account of the delayed flush */
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_fence_reg *reg;
-
- reg = &dev_priv->fence_regs[obj->fence_reg];
- list_move_tail(®->lru_list,
- &dev_priv->mm.fence_list);
- }
- }
}
void i915_vma_move_to_active(struct i915_vma *vma,
obj->base.write_domain = 0;
obj->last_fenced_seqno = 0;
- obj->fenced_gpu_access = false;
obj->active = 0;
drm_gem_object_unreference(&obj->base);
obj->last_fenced_seqno = 0;
}
- obj->fenced_gpu_access = false;
return 0;
}
{
struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
- bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
uint64_t flags;
int ret;
entry->flags |= __EXEC_OBJECT_HAS_PIN;
- if (has_fenced_gpu_access) {
- if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- ret = i915_gem_object_get_fence(obj);
- if (ret)
- return ret;
-
- if (i915_gem_object_pin_fence(obj))
- entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+ if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ return ret;
- obj->pending_fenced_gpu_access = true;
- }
+ if (i915_gem_object_pin_fence(obj))
+ entry->flags |= __EXEC_OBJECT_HAS_FENCE;
}
if (entry->offset != vma->node.start) {
obj = vma->obj;
entry = vma->exec_entry;
+ if (!has_fenced_gpu_access)
+ entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
need_fence =
- has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma);
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
- obj->pending_fenced_gpu_access = false;
}
list_splice(&ordered_vmas, vmas);
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring)
{
+ u32 seqno = intel_ring_get_seqno(ring);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
if (obj->base.write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
- obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->last_write_seqno = intel_ring_get_seqno(ring);
+ obj->last_write_seqno = seqno;
intel_fb_obj_invalidate(obj, ring);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
+ if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+ obj->last_fenced_seqno = seqno;
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
+ &dev_priv->mm.fence_list);
+ }
+ }
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}