*/
unsigned int fence_dirty:1;
- /**
- * Is the object at the current location in the gtt mappable and
- * fenceable? Used to avoid costly recalculations.
- */
- unsigned int map_and_fenceable:1;
-
/**
* Whether the current gtt mapping needs to be mappable (and isn't just
* mappable by accident). Track pin and fault separate for a more
GEM_BUG_ON(obj->bind_count == 0);
GEM_BUG_ON(!obj->pages);
- if (i915_vma_is_ggtt(vma) &&
- vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+ if (i915_vma_is_map_and_fenceable(vma)) {
i915_gem_object_finish_gtt(obj);
/* release the fence reg _after_ flushing */
return ret;
__i915_vma_iounmap(vma);
+ vma->flags &= ~I915_VMA_CAN_FENCE;
}
if (likely(!vma->vm->closed)) {
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
- if (i915_vma_is_ggtt(vma)) {
- if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
- obj->map_and_fenceable = false;
- } else if (vma->pages) {
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
+ if (vma->pages != obj->pages) {
+ GEM_BUG_ON(!vma->pages);
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
}
vma->pages = NULL;
static bool
i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- struct drm_i915_gem_object *obj = vma->obj;
-
if (!drm_mm_node_allocated(&vma->node))
return false;
if (alignment && vma->node.start & (alignment - 1))
return true;
- if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
+ if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
return true;
if (flags & PIN_OFFSET_BIAS &&
u32 fence_size, fence_alignment;
fence_size = i915_gem_get_ggtt_size(dev_priv,
- obj->base.size,
+ vma->size,
i915_gem_object_get_tiling(obj));
fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
- obj->base.size,
+ vma->size,
i915_gem_object_get_tiling(obj),
true);
mappable = (vma->node.start + fence_size <=
dev_priv->ggtt.mappable_end);
- obj->map_and_fenceable = mappable && fenceable;
+ if (mappable && fenceable)
+ vma->flags |= I915_VMA_CAN_FENCE;
+ else
+ vma->flags &= ~I915_VMA_CAN_FENCE;
}
int __i915_vma_do_pin(struct i915_vma *vma,
WARN(i915_vma_is_pinned(vma),
"bo is already pinned in ggtt with incorrect alignment:"
- " offset=%08x, req.alignment=%llx, req.map_and_fenceable=%d,"
- " obj->map_and_fenceable=%d\n",
- i915_ggtt_offset(vma),
- alignment,
+ " offset=%08x, req.alignment=%llx,"
+ " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
+ i915_ggtt_offset(vma), alignment,
!!(flags & PIN_MAPPABLE),
- obj->map_and_fenceable);
+ i915_vma_is_map_and_fenceable(vma));
ret = i915_vma_unbind(vma);
if (ret)
return ERR_PTR(ret);
eb_vma_misplaced(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
- struct drm_i915_gem_object *obj = vma->obj;
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_vma_is_ggtt(vma));
return true;
/* avoid costly ping-pong once a batch bo ended up non-mappable */
- if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
+ !i915_vma_is_map_and_fenceable(vma))
return !only_mappable_for_reloc(entry->flags);
if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
!is_power_of_2(vma->node.size) ||
(vma->node.start & (vma->node.size - 1)),
"object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n",
- vma->node.start, obj->map_and_fenceable, vma->node.size);
+ vma->node.start,
+ i915_vma_is_map_and_fenceable(vma),
+ vma->node.size);
if (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
return 0;
}
} else if (enable) {
- if (WARN_ON(!obj->map_and_fenceable))
- return -EINVAL;
-
reg = i915_find_fence_reg(dev);
if (IS_ERR(reg))
return PTR_ERR(reg);
assert_rpm_wakelock_held(to_i915(vma->vm->dev));
lockdep_assert_held(&vma->vm->dev->struct_mutex);
- if (WARN_ON(!vma->obj->map_and_fenceable))
+ if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
return IO_ERR_PTR(-ENODEV);
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
#define I915_VMA_LOCAL_BIND BIT(7)
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
-#define I915_VMA_GGTT BIT(8)
-#define I915_VMA_CLOSED BIT(9)
+#define I915_VMA_GGTT BIT(8)
+#define I915_VMA_CAN_FENCE BIT(9)
+#define I915_VMA_CLOSED BIT(10)
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
return vma->flags & I915_VMA_GGTT;
}
+static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CAN_FENCE;
+}
+
static inline bool i915_vma_is_closed(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_CLOSED;
if (!vma)
return 0;
- if (!obj->map_and_fenceable)
+ if (!i915_vma_is_map_and_fenceable(vma))
return 0;
if (IS_GEN3(dev_priv)) {
goto bad;
}
- size = i915_gem_get_ggtt_size(dev_priv, obj->base.size, tiling_mode);
+ size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode);
if (vma->node.size < size)
goto bad;
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
- if (view.type == I915_GGTT_VIEW_NORMAL) {
+ if (i915_vma_is_map_and_fenceable(vma)) {
ret = i915_gem_object_get_fence(obj);
if (ret == -EDEADLK) {
/*
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
intel_fill_fb_ggtt_view(&view, fb, rotation);
+ vma = i915_gem_object_to_ggtt(obj, &view);
- if (view.type == I915_GGTT_VIEW_NORMAL)
+ if (i915_vma_is_map_and_fenceable(vma))
i915_gem_object_unpin_fence(obj);
- vma = i915_gem_object_to_ggtt(obj, &view);
i915_gem_object_unpin_from_display_plane(vma);
}