unsigned int cache_dirty:1;
atomic_t frontbuffer_bits;
+ unsigned int frontbuffer_ggtt_origin; /* write once */
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;
#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
#define STRIDE_MASK (~TILING_MASK)
- unsigned int has_wc_mmap;
/** Count of VMA actually bound by this object */
unsigned int bind_count;
unsigned int pin_display;
static inline enum fb_op_origin
write_origin(struct drm_i915_gem_object *obj, unsigned domain)
{
- return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
- ORIGIN_GTT : ORIGIN_CPU;
+ return (domain == I915_GEM_DOMAIN_GTT ?
+ obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
/**
up_write(&mm->mmap_sem);
/* This may race, but that's ok, it only gets set */
- WRITE_ONCE(obj->has_wc_mmap, true);
+ WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
i915_gem_object_put_unlocked(obj);
if (IS_ERR((void *)addr))
if (chunk_size >= obj->base.size >> PAGE_SHIFT)
view.type = I915_GGTT_VIEW_NORMAL;
+ /* Userspace is now writing through an untracked VMA, abandon
+ * all hope that the hardware is able to track future writes.
+ */
+ obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
+
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
}
if (IS_ERR(vma)) {
obj->ops = ops;
+ obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
obj->madv = I915_MADV_WILLNEED;
i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);