enum i915_cache_level cache_level)
{
struct i915_vma *vma;
- int ret = 0;
+ int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->cache_level == cache_level)
- goto out;
+ return 0;
/* Inspect the list of currently bound VMA and unbind any that would
* be invalid given the new cache-level. This is principally to
}
}
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
+ cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ obj->cache_dirty = true;
+
list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
-out:
- /* Flush the dirty CPU caches to the backing storage so that the
- * object is now coherent at its new cache level (with respect
- * to the access domain).
- */
- if (obj->cache_dirty && cpu_write_needs_clflush(obj))
- i915_gem_clflush_object(obj, true);
-
return 0;
}
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
- i915_gem_object_flush_cpu_write_domain(obj);
+ /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
+ if (obj->cache_dirty) {
+ i915_gem_clflush_object(obj, true);
+ intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+ }
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
struct drm_i915_gem_object *obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
+ if (obj->pin_display && obj->cache_dirty)
+ i915_gem_clflush_object(obj, true);
intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
mutex_unlock(&dev->struct_mutex);