u64 size = 0;
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
- drm_mm_node_allocated(&vma->node))
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (i915_is_ggtt(vma->vm) && drm_mm_node_allocated(&vma->node))
size += vma->node.size;
}
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->pin_count > 0)
pin_count++;
}
seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
i915_is_ggtt(vma->vm) ? "g" : "pp",
vma->node.start, vma->node.size);
}
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(vma, head, mm_list) {
+ list_for_each_entry(vma, head, vm_link) {
seq_printf(m, " ");
describe_obj(m, vma->obj);
seq_printf(m, "\n");
stats->shared += obj->base.size;
if (USES_FULL_PPGTT(obj->base.dev)) {
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
struct i915_hw_ppgtt *ppgtt;
if (!drm_mm_node_allocated(&vma->node))
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->active_list, mm_list);
+ count_vmas(&vm->active_list, vm_link);
seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->inactive_list, mm_list);
+ count_vmas(&vm->inactive_list, vm_link);
seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+ list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
if (vma->pin_count)
pinned += vma->node.size;
- list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+ list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
if (vma->pin_count)
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
int ret;
drm_gem_object_reference(&obj->base);
- list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+ list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
if (i915_vma_unbind(vma))
break;
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
i915_gem_request_assign(&obj->last_read_req[ring->id], req);
- list_move_tail(&vma->mm_list, &vma->vm->active_list);
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
static void
list_move_tail(&obj->global_list,
&to_i915(obj->base.dev)->mm.bound_list);
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (!list_empty(&vma->mm_list))
- list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!list_empty(&vma->vm_link))
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
}
i915_gem_request_assign(&obj->last_fenced_req, NULL);
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (list_empty(&vma->vma_link))
+ if (list_empty(&vma->obj_link))
return 0;
if (!drm_mm_node_allocated(&vma->node)) {
vma->vm->unbind_vma(vma);
vma->bound = 0;
- list_del_init(&vma->mm_list);
+ list_del_init(&vma->vm_link);
if (i915_is_ggtt(vma->vm)) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
obj->map_and_fenceable = false;
goto err_remove_node;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&vma->mm_list, &vm->inactive_list);
+ list_add_tail(&vma->vm_link, &vm->inactive_list);
return vma;
/* And bump the LRU for this access */
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
- list_move_tail(&vma->mm_list,
+ list_move_tail(&vma->vm_link,
&to_i915(obj->base.dev)->gtt.base.inactive_list);
return 0;
* catch the issue of the CS prefetch crossing page boundaries and
* reading an invalid PTE on older architectures.
*/
- list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
*/
}
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
}
}
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
trace_i915_gem_object_destroy(obj);
- list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
int ret;
vma->pin_count = 0;
struct i915_address_space *vm)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
vma->vm == vm)
return vma;
if (WARN_ONCE(!view, "no view specified"))
return ERR_PTR(-EINVAL);
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
if (!i915_is_ggtt(vm))
i915_ppgtt_put(i915_vm_to_ppgtt(vm));
- list_del(&vma->vma_link);
+ list_del(&vma->obj_link);
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
}
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
- list_for_each_entry(vma, &o->vma_list, vma_link) {
+ list_for_each_entry(vma, &o->vma_list, obj_link) {
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
{
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link) {
+ list_for_each_entry(vma, &o->vma_list, obj_link) {
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
{
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, obj_link)
if (drm_mm_node_allocated(&vma->node))
return true;
BUG_ON(list_empty(&o->vma_list));
- list_for_each_entry(vma, &o->vma_list, vma_link) {
+ list_for_each_entry(vma, &o->vma_list, obj_link) {
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->pin_count > 0)
return true;
return;
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
- mm_list) {
+ vm_link) {
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
break;
}
search_again:
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(vma, &vm->inactive_list, mm_list) {
+ list_for_each_entry(vma, &vm->inactive_list, vm_link) {
if (mark_free(vma, &unwind_list))
goto found;
}
goto none;
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(vma, &vm->active_list, mm_list) {
+ list_for_each_entry(vma, &vm->active_list, vm_link) {
if (mark_free(vma, &unwind_list))
goto found;
}
WARN_ON(!list_empty(&vm->active_list));
}
- list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+ list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
if (vma->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
}
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
+ list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
}
/* Clear any non-preallocated blocks */
vm = &dev_priv->gtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != vm)
continue;
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&vma->vma_link);
- INIT_LIST_HEAD(&vma->mm_list);
+ INIT_LIST_HEAD(&vma->vm_link);
+ INIT_LIST_HEAD(&vma->obj_link);
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
if (i915_is_ggtt(vm))
vma->ggtt_view = *ggtt_view;
- list_add_tail(&vma->vma_link, &obj->vma_list);
+ list_add_tail(&vma->obj_link, &obj->vma_list);
if (!i915_is_ggtt(vm))
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
struct i915_ggtt_view ggtt_view;
/** This object's place on the active/inactive lists */
- struct list_head mm_list;
+ struct list_head vm_link;
- struct list_head vma_link; /* Link in the object's VMA list */
+ struct list_head obj_link; /* Link in the object's VMA list */
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
struct i915_vma *vma;
int count = 0;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (drm_mm_node_allocated(&vma->node))
count++;
if (vma->pin_count)
/* For the unbound phase, this should be a no-op! */
list_for_each_entry_safe(vma, v,
- &obj->vma_list, vma_link)
+ &obj->vma_list, obj_link)
if (i915_vma_unbind(vma))
break;
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+ list_add_tail(&vma->vm_link, &ggtt->inactive_list);
}
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
int ret = i915_vma_unbind(vma);
WARN_ON(ret && ret != -EIO);
}
struct i915_vma *vma;
int i = 0;
- list_for_each_entry(vma, head, mm_list) {
+ list_for_each_entry(vma, head, vm_link) {
capture_bo(err++, vma);
if (++i == count)
break;
if (err == last)
break;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == vm && vma->pin_count > 0)
capture_bo(err++, vma);
}
int i;
i = 0;
- list_for_each_entry(vma, &vm->active_list, mm_list)
+ list_for_each_entry(vma, &vm->active_list, vm_link)
i++;
error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == vm && vma->pin_count > 0)
i++;
}