return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (obj->stolen == NULL)
continue;
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
if (obj->stolen == NULL)
continue;
size = count = 0;
mapped_size = mapped_count = 0;
purgeable_size = purgeable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
size += obj->base.size;
++count;
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = dpy_size = dpy_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
size += obj->base.size;
++count;
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (show_pin_display_only && !obj->pin_display)
continue;
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
- struct list_head global_list;
+ struct list_head global_link;
union {
struct rcu_head rcu;
struct llist_node freed;
i915 = to_i915(obj->base.dev);
list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
- list_move_tail(&obj->global_list, list);
+ list_move_tail(&obj->global_link, list);
}
/**
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (--obj->bind_count == 0)
- list_move_tail(&obj->global_list,
+ list_move_tail(&obj->global_link,
&to_i915(obj->base.dev)->mm.unbound_list);
/* And finally now the object is completely decoupled from this vma,
}
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
- list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
obj->bind_count++;
{
mutex_init(&obj->mm.lock);
- INIT_LIST_HEAD(&obj->global_list);
+ INIT_LIST_HEAD(&obj->global_link);
INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
GEM_BUG_ON(!list_empty(&obj->vma_list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
- list_del(&obj->global_list);
+ list_del(&obj->global_link);
}
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, global_list) {
+ list_for_each_entry(obj, *p, global_link) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.bound_list, global_list) {
+ &dev_priv->mm.bound_list, global_link) {
bool ggtt_bound = false;
struct i915_vma *vma;
* (unless we are forced to ofc!)
*/
if (obj->bind_count)
- list_move_tail(&obj->global_list, &rq->i915->mm.bound_list);
+ list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
obj->mm.dirty = true; /* be paranoid */
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
- global_list))) {
- list_move_tail(&obj->global_list, &still_in_list);
+ global_link))) {
+ list_move_tail(&obj->global_link, &still_in_list);
if (!obj->mm.pages) {
- list_del_init(&obj->global_list);
+ list_del_init(&obj->global_link);
continue;
}
I915_MM_SHRINKER);
if (!obj->mm.pages) {
__i915_gem_object_invalidate(obj);
- list_del_init(&obj->global_list);
+ list_del_init(&obj->global_link);
count += obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&obj->mm.lock);
i915_gem_retire_requests(dev_priv);
count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
if (!obj->mm.pages)
continue;
else
unbound += obj->base.size >> PAGE_SHIFT;
}
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!obj->mm.pages)
continue;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
- list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
obj->bind_count++;
return obj;