From 1c7f4bca5a6f53c8aa5ecf52fc9f68194e44aede Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 26 Feb 2016 11:03:19 +0000 Subject: [PATCH] drm/i915: Rename vma->*_list to *_link for consistency Elsewhere we have adopted the convention of using '_link' to denote elements in the list (and '_list' for the actual list_head itself), and that the name should indicate which list the link belongs to (and preferrably not just where the link is being stored). s/vma_link/obj_link/ (we iterate over obj->vma_list) s/mm_list/vm_link/ (we iterate over vm->[in]active_list) Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_debugfs.c | 17 ++++---- drivers/gpu/drm/i915/i915_gem.c | 50 ++++++++++++------------ drivers/gpu/drm/i915/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/i915_gem_evict.c | 6 +-- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 ++--- drivers/gpu/drm/i915/i915_gem_gtt.h | 4 +- drivers/gpu/drm/i915/i915_gem_shrinker.c | 4 +- drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/i915_gem_userptr.c | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 8 ++-- 10 files changed, 52 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9e19cf0e7075..d7f03bceea57 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -117,9 +117,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) u64 size = 0; struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && - drm_mm_node_allocated(&vma->node)) + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (i915_is_ggtt(vma->vm) && drm_mm_node_allocated(&vma->node)) size += vma->node.size; } @@ -155,7 +154,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->pin_count > 0) pin_count++; } @@ -164,7 +163,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (display)"); if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", i915_is_ggtt(vma->vm) ? "g" : "pp", vma->node.start, vma->node.size); @@ -230,7 +229,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(vma, head, mm_list) { + list_for_each_entry(vma, head, vm_link) { seq_printf(m, " "); describe_obj(m, vma->obj); seq_printf(m, "\n"); @@ -342,7 +341,7 @@ static int per_file_stats(int id, void *ptr, void *data) stats->shared += obj->base.size; if (USES_FULL_PPGTT(obj->base.dev)) { - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { struct i915_hw_ppgtt *ppgtt; if (!drm_mm_node_allocated(&vma->node)) @@ -454,12 +453,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data) count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_vmas(&vm->active_list, mm_list); + count_vmas(&vm->active_list, vm_link); seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_vmas(&vm->inactive_list, mm_list); + count_vmas(&vm->inactive_list, vm_link); seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", count, mappable_count, size, mappable_size); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f68f34606f2f..bcd2e481d014 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -138,10 +138,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned = 0; mutex_lock(&dev->struct_mutex); - list_for_each_entry(vma, &ggtt->base.active_list, mm_list) + list_for_each_entry(vma, &ggtt->base.active_list, vm_link) if (vma->pin_count) pinned += vma->node.size; - list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list) + list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) if (vma->pin_count) pinned += vma->node.size; mutex_unlock(&dev->struct_mutex); @@ -272,7 +272,7 @@ drop_pages(struct drm_i915_gem_object *obj) int ret; drm_gem_object_reference(&obj->base); - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) + list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) if (i915_vma_unbind(vma)) break; @@ -2416,7 +2416,7 @@ void i915_vma_move_to_active(struct i915_vma *vma, list_move_tail(&obj->ring_list[ring->id], &ring->active_list); i915_gem_request_assign(&obj->last_read_req[ring->id], req); - list_move_tail(&vma->mm_list, &vma->vm->active_list); + list_move_tail(&vma->vm_link, &vma->vm->active_list); } static void @@ -2454,9 +2454,9 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring) list_move_tail(&obj->global_list, &to_i915(obj->base.dev)->mm.bound_list); - list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (!list_empty(&vma->mm_list)) - list_move_tail(&vma->mm_list, &vma->vm->inactive_list); + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (!list_empty(&vma->vm_link)) + list_move_tail(&vma->vm_link, &vma->vm->inactive_list); } i915_gem_request_assign(&obj->last_fenced_req, NULL); @@ -3317,7 +3317,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) struct drm_i915_private *dev_priv = obj->base.dev->dev_private; int ret; - if (list_empty(&vma->vma_link)) + if (list_empty(&vma->obj_link)) return 0; if (!drm_mm_node_allocated(&vma->node)) { @@ -3351,7 +3351,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) vma->vm->unbind_vma(vma); vma->bound = 0; - list_del_init(&vma->mm_list); + list_del_init(&vma->vm_link); if (i915_is_ggtt(vma->vm)) { if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { obj->map_and_fenceable = false; @@ -3609,7 +3609,7 @@ search_free: goto err_remove_node; list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&vma->mm_list, &vm->inactive_list); + list_add_tail(&vma->vm_link, &vm->inactive_list); return vma; @@ -3774,7 +3774,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) /* And bump the LRU for this access */ vma = i915_gem_obj_to_ggtt(obj); if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) - list_move_tail(&vma->mm_list, + list_move_tail(&vma->vm_link, &to_i915(obj->base.dev)->gtt.base.inactive_list); return 0; @@ -3809,7 +3809,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * catch the issue of the CS prefetch crossing page boundaries and * reading an invalid PTE on older architectures. */ - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { + list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -3872,7 +3872,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, */ } - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -3882,7 +3882,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, } } - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) vma->node.color = cache_level; obj->cache_level = cache_level; @@ -4556,7 +4556,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) trace_i915_gem_object_destroy(obj); - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { + list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { int ret; vma->pin_count = 0; @@ -4613,7 +4613,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm) { struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && vma->vm == vm) return vma; @@ -4630,7 +4630,7 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, if (WARN_ONCE(!view, "no view specified")) return ERR_PTR(-EINVAL); - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) if (vma->vm == ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma; @@ -4651,7 +4651,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma) if (!i915_is_ggtt(vm)) i915_ppgtt_put(i915_vm_to_ppgtt(vm)); - list_del(&vma->vma_link); + list_del(&vma->obj_link); kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); } @@ -5201,7 +5201,7 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - list_for_each_entry(vma, &o->vma_list, vma_link) { + list_for_each_entry(vma, &o->vma_list, obj_link) { if (i915_is_ggtt(vma->vm) && vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; @@ -5220,7 +5220,7 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, obj_link) if (vma->vm == ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma->node.start; @@ -5234,7 +5234,7 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, { struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) { + list_for_each_entry(vma, &o->vma_list, obj_link) { if (i915_is_ggtt(vma->vm) && vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; @@ -5251,7 +5251,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, obj_link) if (vma->vm == ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view) && drm_mm_node_allocated(&vma->node)) @@ -5264,7 +5264,7 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) { struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, obj_link) if (drm_mm_node_allocated(&vma->node)) return true; @@ -5281,7 +5281,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, BUG_ON(list_empty(&o->vma_list)); - list_for_each_entry(vma, &o->vma_list, vma_link) { + list_for_each_entry(vma, &o->vma_list, obj_link) { if (i915_is_ggtt(vma->vm) && vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; @@ -5294,7 +5294,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) if (vma->pin_count > 0) return true; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 4be2ce917f54..5dd84e148bba 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -142,7 +142,7 @@ static void i915_gem_context_clean(struct intel_context *ctx) return; list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, - mm_list) { + vm_link) { if (WARN_ON(__i915_vma_unbind_no_wait(vma))) break; } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 07c6e4d320c9..ea1f8d1bd228 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -116,7 +116,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, search_again: /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(vma, &vm->inactive_list, mm_list) { + list_for_each_entry(vma, &vm->inactive_list, vm_link) { if (mark_free(vma, &unwind_list)) goto found; } @@ -125,7 +125,7 @@ search_again: goto none; /* Now merge in the soon-to-be-expired objects... */ - list_for_each_entry(vma, &vm->active_list, mm_list) { + list_for_each_entry(vma, &vm->active_list, vm_link) { if (mark_free(vma, &unwind_list)) goto found; } @@ -270,7 +270,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) WARN_ON(!list_empty(&vm->active_list)); } - list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) + list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link) if (vma->pin_count == 0) WARN_ON(i915_vma_unbind(vma)); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 9127f8f3561c..68d3af6854d1 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2758,7 +2758,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, } vma->bound |= GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); - list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); + list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list); } /* Clear any non-preallocated blocks */ @@ -3258,7 +3258,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) vm = &dev_priv->gtt.base; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { flush = false; - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->vm != vm) continue; @@ -3314,8 +3314,8 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj, if (vma == NULL) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&vma->vma_link); - INIT_LIST_HEAD(&vma->mm_list); + INIT_LIST_HEAD(&vma->vm_link); + INIT_LIST_HEAD(&vma->obj_link); INIT_LIST_HEAD(&vma->exec_list); vma->vm = vm; vma->obj = obj; @@ -3323,7 +3323,7 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj, if (i915_is_ggtt(vm)) vma->ggtt_view = *ggtt_view; - list_add_tail(&vma->vma_link, &obj->vma_list); + list_add_tail(&vma->obj_link, &obj->vma_list); if (!i915_is_ggtt(vm)) i915_ppgtt_get(i915_vm_to_ppgtt(vm)); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 368d111aa9c5..31eb0b261fbf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -194,9 +194,9 @@ struct i915_vma { struct i915_ggtt_view ggtt_view; /** This object's place on the active/inactive lists */ - struct list_head mm_list; + struct list_head vm_link; - struct list_head vma_link; /* Link in the object's VMA list */ + struct list_head obj_link; /* Link in the object's VMA list */ /** This vma's place in the batchbuffer or on the eviction list */ struct list_head exec_list; diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 58c1e592bbdb..d3c473ffb90a 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -52,7 +52,7 @@ static int num_vma_bound(struct drm_i915_gem_object *obj) struct i915_vma *vma; int count = 0; - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (drm_mm_node_allocated(&vma->node)) count++; if (vma->pin_count) @@ -176,7 +176,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, /* For the unbound phase, this should be a no-op! */ list_for_each_entry_safe(vma, v, - &obj->vma_list, vma_link) + &obj->vma_list, obj_link) if (i915_vma_unbind(vma)) break; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index feec0f80d8ef..2e6e9fb6f80d 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -697,7 +697,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, vma->bound |= GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); - list_add_tail(&vma->mm_list, &ggtt->inactive_list); + list_add_tail(&vma->vm_link, &ggtt->inactive_list); } list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 7107f2fd38f5..4b09c840d493 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -78,7 +78,7 @@ static void cancel_userptr(struct work_struct *work) was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = false; - list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) { + list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) { int ret = i915_vma_unbind(vma); WARN_ON(ret && ret != -EIO); } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 978c026963b8..831895b8cb75 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -736,7 +736,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err, struct i915_vma *vma; int i = 0; - list_for_each_entry(vma, head, mm_list) { + list_for_each_entry(vma, head, vm_link) { capture_bo(err++, vma); if (++i == count) break; @@ -759,7 +759,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, if (err == last) break; - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) if (vma->vm == vm && vma->pin_count > 0) capture_bo(err++, vma); } @@ -1127,12 +1127,12 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, int i; i = 0; - list_for_each_entry(vma, &vm->active_list, mm_list) + list_for_each_entry(vma, &vm->active_list, vm_link) i++; error->active_bo_count[ndx] = i; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) if (vma->vm == vm && vma->pin_count > 0) i++; } -- 2.20.1